architectures: - SegmentationModel chunk_duration: 10.0 max_speakers_per_chunk: 3 max_speakers_per_frame: 2 min_duration: null model_type: pyannet pyannote.audio: architecture: class: SegmentationModel module: pyannote.audio.models.segmentation.PyanNet sample_rate: 16000 torch_dtype: float32 transformers_version: "4.50.2" warm_up: - 0.0 - 0.0 weigh_by_cardinality: false pipeline: name: pyannote.audio.pipelines.SpeakerDiarization params: clustering: AgglomerativeClustering embedding: pyannote/wespeaker-voxceleb-resnet34-LM embedding_batch_size: 32 embedding_exclude_overlap: true segmentation: pyannote/segmentation-3.0 segmentation_batch_size: 32 params: clustering: method: centroid min_cluster_size: 12 threshold: 0.7045654963945799 segmentation: min_duration_off: 0.0