| { |
| "adapter_hidden_layers": 0, |
| "adapter_inner_dim": 512, |
| "adapter_type": "subsampler", |
| "architectures": [ |
| "Blsp2Model" |
| ], |
| "conv_kernel_sizes": "5,5,5", |
| "kd_temperature": 2, |
| "lora_config": { |
| "auto_mapping": null, |
| "base_model_name_or_path": null, |
| "bias": "none", |
| "fan_in_fan_out": false, |
| "inference_mode": false, |
| "init_lora_weights": true, |
| "layers_pattern": null, |
| "layers_to_transform": null, |
| "lora_alpha": 16, |
| "lora_dropout": 0.05, |
| "modules_to_save": null, |
| "peft_type": "LORA", |
| "r": 16, |
| "revision": null, |
| "target_modules": [ |
| "c_attn", |
| "c_proj", |
| "w1", |
| "w2" |
| ], |
| "task_type": null |
| }, |
| "lora_scope": "audio", |
| "num_emotions": 5, |
| "num_post_cif_layers": 4, |
| "num_pre_cif_layers": 4, |
| "qwen_config": { |
| "_name_or_path": "", |
| "add_cross_attention": false, |
| "architectures": [ |
| "QWenLMHeadModel" |
| ], |
| "attn_dropout_prob": 0.0, |
| "auto_map": { |
| "AutoConfig": "configuration_qwen.QWenConfig", |
| "AutoModelForCausalLM": "modeling_qwen.QWenLMHeadModel" |
| }, |
| "bad_words_ids": null, |
| "begin_suppress_tokens": null, |
| "bf16": false, |
| "bos_token_id": null, |
| "chunk_size_feed_forward": 0, |
| "cross_attention_hidden_size": null, |
| "decoder_start_token_id": null, |
| "diversity_penalty": 0.0, |
| "do_sample": false, |
| "early_stopping": false, |
| "emb_dropout_prob": 0.0, |
| "encoder_no_repeat_ngram_size": 0, |
| "eos_token_id": null, |
| "exponential_decay_length_penalty": null, |
| "finetuning_task": null, |
| "forced_bos_token_id": null, |
| "forced_eos_token_id": null, |
| "fp16": false, |
| "fp32": false, |
| "hidden_size": 4096, |
| "id2label": { |
| "0": "LABEL_0", |
| "1": "LABEL_1" |
| }, |
| "initializer_range": 0.02, |
| "intermediate_size": 22016, |
| "is_decoder": false, |
| "is_encoder_decoder": false, |
| "kv_channels": 128, |
| "label2id": { |
| "LABEL_0": 0, |
| "LABEL_1": 1 |
| }, |
| "layer_norm_epsilon": 1e-06, |
| "length_penalty": 1.0, |
| "max_length": 20, |
| "max_position_embeddings": 8192, |
| "min_length": 0, |
| "model_type": "qwen", |
| "no_bias": true, |
| "no_repeat_ngram_size": 0, |
| "num_attention_heads": 32, |
| "num_beam_groups": 1, |
| "num_beams": 1, |
| "num_hidden_layers": 32, |
| "num_return_sequences": 1, |
| "onnx_safe": null, |
| "output_attentions": false, |
| "output_hidden_states": false, |
| "output_scores": false, |
| "pad_token_id": null, |
| "prefix": null, |
| "problem_type": null, |
| "pruned_heads": {}, |
| "remove_invalid_values": false, |
| "repetition_penalty": 1.0, |
| "return_dict": true, |
| "return_dict_in_generate": false, |
| "rotary_emb_base": 10000, |
| "rotary_pct": 1.0, |
| "scale_attn_weights": true, |
| "sep_token_id": null, |
| "seq_length": 8192, |
| "suppress_tokens": null, |
| "task_specific_params": null, |
| "temperature": 1.0, |
| "tf_legacy_loss": false, |
| "tie_encoder_decoder": false, |
| "tie_word_embeddings": false, |
| "tokenizer_class": "QWenTokenizer", |
| "top_k": 50, |
| "top_p": 1.0, |
| "torch_dtype": null, |
| "torchscript": false, |
| "transformers_version": "4.32.0", |
| "typical_p": 1.0, |
| "use_bfloat16": false, |
| "use_cache": true, |
| "use_cache_kernel": false, |
| "use_cache_quantization": false, |
| "use_dynamic_ntk": true, |
| "use_flash_attn": false, |
| "use_logn_attn": true, |
| "vocab_size": 151936 |
| }, |
| "torch_dtype": "bfloat16", |
| "transformers_version": "4.32.0", |
| "whisper_config": { |
| "_name_or_path": "openai/whisper-large-v2", |
| "activation_dropout": 0.0, |
| "activation_function": "gelu", |
| "add_cross_attention": false, |
| "apply_spec_augment": false, |
| "architectures": [ |
| "WhisperForConditionalGeneration" |
| ], |
| "attention_dropout": 0.0, |
| "bad_words_ids": null, |
| "begin_suppress_tokens": [ |
| 220, |
| 50257 |
| ], |
| "bos_token_id": 50257, |
| "chunk_size_feed_forward": 0, |
| "classifier_proj_size": 256, |
| "cross_attention_hidden_size": null, |
| "d_model": 1280, |
| "decoder_attention_heads": 20, |
| "decoder_ffn_dim": 5120, |
| "decoder_layerdrop": 0.0, |
| "decoder_layers": 32, |
| "decoder_start_token_id": 50258, |
| "diversity_penalty": 0.0, |
| "do_sample": false, |
| "dropout": 0.0, |
| "early_stopping": false, |
| "encoder_attention_heads": 20, |
| "encoder_ffn_dim": 5120, |
| "encoder_layerdrop": 0.0, |
| "encoder_layers": 32, |
| "encoder_no_repeat_ngram_size": 0, |
| "eos_token_id": 50257, |
| "exponential_decay_length_penalty": null, |
| "finetuning_task": null, |
| "forced_bos_token_id": null, |
| "forced_decoder_ids": [ |
| [ |
| 1, |
| 50259 |
| ], |
| [ |
| 2, |
| 50359 |
| ], |
| [ |
| 3, |
| 50363 |
| ] |
| ], |
| "forced_eos_token_id": null, |
| "id2label": { |
| "0": "LABEL_0", |
| "1": "LABEL_1" |
| }, |
| "init_std": 0.02, |
| "is_decoder": false, |
| "is_encoder_decoder": true, |
| "label2id": { |
| "LABEL_0": 0, |
| "LABEL_1": 1 |
| }, |
| "length_penalty": 1.0, |
| "mask_feature_length": 10, |
| "mask_feature_min_masks": 0, |
| "mask_feature_prob": 0.0, |
| "mask_time_length": 10, |
| "mask_time_min_masks": 2, |
| "mask_time_prob": 0.05, |
| "max_length": 448, |
| "max_source_positions": 1500, |
| "max_target_positions": 448, |
| "median_filter_width": 7, |
| "min_length": 0, |
| "model_type": "whisper", |
| "no_repeat_ngram_size": 0, |
| "num_beam_groups": 1, |
| "num_beams": 1, |
| "num_hidden_layers": 32, |
| "num_mel_bins": 80, |
| "num_return_sequences": 1, |
| "output_attentions": false, |
| "output_hidden_states": false, |
| "output_scores": false, |
| "pad_token_id": 50257, |
| "prefix": null, |
| "problem_type": null, |
| "pruned_heads": {}, |
| "remove_invalid_values": false, |
| "repetition_penalty": 1.0, |
| "return_dict": true, |
| "return_dict_in_generate": false, |
| "scale_embedding": false, |
| "sep_token_id": null, |
| "suppress_tokens": [ |
| 1, |
| 2, |
| 7, |
| 8, |
| 9, |
| 10, |
| 14, |
| 25, |
| 26, |
| 27, |
| 28, |
| 29, |
| 31, |
| 58, |
| 59, |
| 60, |
| 61, |
| 62, |
| 63, |
| 90, |
| 91, |
| 92, |
| 93, |
| 359, |
| 503, |
| 522, |
| 542, |
| 873, |
| 893, |
| 902, |
| 918, |
| 922, |
| 931, |
| 1350, |
| 1853, |
| 1982, |
| 2460, |
| 2627, |
| 3246, |
| 3253, |
| 3268, |
| 3536, |
| 3846, |
| 3961, |
| 4183, |
| 4667, |
| 6585, |
| 6647, |
| 7273, |
| 9061, |
| 9383, |
| 10428, |
| 10929, |
| 11938, |
| 12033, |
| 12331, |
| 12562, |
| 13793, |
| 14157, |
| 14635, |
| 15265, |
| 15618, |
| 16553, |
| 16604, |
| 18362, |
| 18956, |
| 20075, |
| 21675, |
| 22520, |
| 26130, |
| 26161, |
| 26435, |
| 28279, |
| 29464, |
| 31650, |
| 32302, |
| 32470, |
| 36865, |
| 42863, |
| 47425, |
| 49870, |
| 50254, |
| 50258, |
| 50358, |
| 50359, |
| 50360, |
| 50361, |
| 50362 |
| ], |
| "task_specific_params": null, |
| "temperature": 1.0, |
| "tf_legacy_loss": false, |
| "tie_encoder_decoder": false, |
| "tie_word_embeddings": true, |
| "tokenizer_class": null, |
| "top_k": 50, |
| "top_p": 1.0, |
| "torch_dtype": "float32", |
| "torchscript": false, |
| "transformers_version": "4.32.0", |
| "typical_p": 1.0, |
| "use_bfloat16": false, |
| "use_cache": true, |
| "use_weighted_layer_sum": false, |
| "vocab_size": 51865 |
| } |
| } |
|
|