From 7b19550dd11c5a0120a5df22747237fca30d47ae Mon Sep 17 00:00:00 2001 From: TelGome <2657726985@qq.com> Date: Tue, 20 Jan 2026 17:51:43 +0800 Subject: [PATCH] Add pass to restore batch dim in traced view ops. --- .../batch_dim_restore_view_pass.py | 85 + .../graph_net.json | 16 +- .../input_tensor_constraints.py | 737 ++++----- .../graph_net.json | 17 +- .../input_tensor_constraints.py | 1439 +++++++++-------- .../graph_net.json | 4 +- .../input_tensor_constraints.py | 277 ++-- 7 files changed, 1347 insertions(+), 1228 deletions(-) create mode 100644 graph_net/torch/dim_gen_passes/batch_dim_restore_view_pass.py diff --git a/graph_net/torch/dim_gen_passes/batch_dim_restore_view_pass.py b/graph_net/torch/dim_gen_passes/batch_dim_restore_view_pass.py new file mode 100644 index 000000000..05c64aac4 --- /dev/null +++ b/graph_net/torch/dim_gen_passes/batch_dim_restore_view_pass.py @@ -0,0 +1,85 @@ +import torch.fx as fx +from graph_net.torch.dim_gen_passes import DimensionGeneralizationPass +import os + + +class ConcretePass(DimensionGeneralizationPass): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def get_pass_name(cls) -> str: + return os.path.basename(__file__)[:-3] + + def need_rewrite(self, traced_module: fx.GraphModule) -> bool: + if 0 not in self.axes: + return False + return any(self._node_need_rewrite(node) for node in traced_module.graph.nodes) + + def _node_need_rewrite(self, node) -> bool: + if not (node.op == "call_method"): + return False + if not (node.target == "view"): + return False + if not (len(node.args) == 4): + return False + if not (isinstance(node.args[1], int)): + return False + if not node.args[2] == -1: + return False + return self._input_is_missing_batch_dim(node.args[0]) + + def _input_is_missing_batch_dim(self, input_node: fx.Node) -> bool: + meta = input_node.meta.get("tensor_meta") + return meta is not None and len(meta.shape) == 4 + + def rewrite(self, traced_module: fx.GraphModule) -> fx.GraphModule: + """ + Fx Pass: Restore batch dimension in view ops. + e.g., view(16, -1, 1) to view(batch, 16, -1, 1) + """ + # Create a new graph to hold the rewritten nodes + new_graph = fx.Graph() + + # Create a map to link nodes from the old graph to nodes in the new graph + val_map = {} + batch_size_node = None + + def create_batch_size_from_node(node): + return new_graph.call_method("size", args=(val_map[node], 0)) + + for node in traced_module.graph.nodes: + if self._node_need_rewrite(node): + # Get the input tensor node + input_tensor_node = node.args[0] + + # Map the input tensor node to the new graph node + new_input_node = val_map[input_tensor_node] + + if batch_size_node is None: + batch_size_node = create_batch_size_from_node(input_tensor_node) + + # Get the target shape arguments for view (e.g., 16, -1, 1) + view_args = node.args[1:] + + # Prepend batch_size to view arguments + # (batch_size, 16, -1, 1) + new_view_args = (batch_size_node,) + view_args + + # Insert the new view node into the new graph + new_node = new_graph.call_method( + "view", args=(new_input_node, *new_view_args) + ) + # Map the old node to the new node + val_map[node] = new_node + else: + # Copy other nodes to the new graph + new_node = new_graph.node_copy(node, lambda x: val_map[x]) + val_map[node] = new_node + + # Use first placeholder as anchor for batch size + if batch_size_node is None and node.op == "placeholder": + batch_size_node = create_batch_size_from_node(node) + + traced_module.graph = new_graph + traced_module.recompile() + return traced_module diff --git a/samples/transformers-auto-model/907508196l_wavlm-libri-clean-100h-base-plus-finetuned-ks/graph_net.json b/samples/transformers-auto-model/907508196l_wavlm-libri-clean-100h-base-plus-finetuned-ks/graph_net.json index 31f58cc32..9ddddd53d 100644 --- a/samples/transformers-auto-model/907508196l_wavlm-libri-clean-100h-base-plus-finetuned-ks/graph_net.json +++ b/samples/transformers-auto-model/907508196l_wavlm-libri-clean-100h-base-plus-finetuned-ks/graph_net.json @@ -15,5 +15,19 @@ "region:us" ], "heuristic_tag": "audio", - "dimension_generalization_passes": [] + "dimension_generalization_passes": [ + "batch_call_method_view_pass", + "batch_dim_restore_view_pass", + "tuple_arg_call_method_view_pass", + "naive_call_method_reshape_pass", + "naive_call_method_expand_pass", + "non_batch_call_method_expand_pass", + "non_batch_call_function_arange_pass", + "non_batch_call_function_getitem_slice_pass", + "non_batch_call_function_full_pass", + "non_batch_call_function_full_plus_one_pass", + "non_batch_call_function_zeros_pass", + "non_batch_call_function_arange_plus_one_pass" + ], + "symbolic_dimension_reifier": "naive_nlp_sym_dim_reifier" } \ No newline at end of file diff --git a/samples/transformers-auto-model/907508196l_wavlm-libri-clean-100h-base-plus-finetuned-ks/input_tensor_constraints.py b/samples/transformers-auto-model/907508196l_wavlm-libri-clean-100h-base-plus-finetuned-ks/input_tensor_constraints.py index c94e8eb46..e087b4529 100644 --- a/samples/transformers-auto-model/907508196l_wavlm-libri-clean-100h-base-plus-finetuned-ks/input_tensor_constraints.py +++ b/samples/transformers-auto-model/907508196l_wavlm-libri-clean-100h-base-plus-finetuned-ks/input_tensor_constraints.py @@ -1,1002 +1,1003 @@ from sympy import Symbol, Expr, Rel, Eq S0 = Symbol("S0") +S1 = Symbol("S1") -dynamic_dim_constraint_symbols = [S0] +dynamic_dim_constraint_symbols = [S0, S1] -dynamic_dim_constraint_symbol2example_value = {S0: 80000} +dynamic_dim_constraint_symbol2example_value = {S0: 1, S1: 80000} dynamic_dim_constraint_relations = [] dynamic_dim_constraint_input_shapes = [ - ([1, S0], "L_input_values_"), - ([10], "L_self_modules_classifier_parameters_bias_"), - ([10, 256], "L_self_modules_classifier_parameters_weight_"), - ([256], "L_self_modules_projector_parameters_bias_"), - ([256, 768], "L_self_modules_projector_parameters_weight_"), - ([768], "L_self_modules_wavlm_modules_encoder_modules_layer_norm_parameters_bias_"), + ([S0, S1], "L_input_values_"), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layer_norm_parameters_weight_", - ), - ( - [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", - ), - ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [512, 1, 10], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_conv_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_k_proj_parameters_bias_", + [512], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_layer_norm_parameters_weight_", ), ( - [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_k_proj_parameters_weight_", + [512], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_layer_norm_parameters_bias_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_out_proj_parameters_bias_", + [512, 512, 3], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_1_modules_conv_parameters_weight_", ), ( - [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_out_proj_parameters_weight_", + [512, 512, 3], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_2_modules_conv_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_q_proj_parameters_bias_", + [512, 512, 3], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_3_modules_conv_parameters_weight_", ), ( - [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_q_proj_parameters_weight_", + [512, 512, 3], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_4_modules_conv_parameters_weight_", ), ( - [320, 12], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_rel_attn_embed_parameters_weight_", + [512, 512, 2], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_5_modules_conv_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_v_proj_parameters_bias_", + [512, 512, 2], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_6_modules_conv_parameters_weight_", ), ( - [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_v_proj_parameters_weight_", + [512], + "L_self_modules_wavlm_modules_feature_projection_modules_layer_norm_parameters_weight_", ), ( - [1, 12, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_parameters_gru_rel_pos_const_", + [512], + "L_self_modules_wavlm_modules_feature_projection_modules_layer_norm_parameters_bias_", ), ( - [3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [768, 512], + "L_self_modules_wavlm_modules_feature_projection_modules_projection_parameters_weight_", ), ( - [3072, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + [768], + "L_self_modules_wavlm_modules_feature_projection_modules_projection_parameters_bias_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_output_dense_parameters_bias_", + [1, 1, 128], + "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_modules_parametrizations_modules_weight_parameters_original0_", ), ( - [768, 3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_output_dense_parameters_weight_", + [768, 48, 128], + "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_modules_parametrizations_modules_weight_parameters_original1_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layer_norm_parameters_weight_", ), + ([768], "L_self_modules_wavlm_modules_encoder_modules_layer_norm_parameters_bias_"), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_layer_norm_parameters_bias_", + [320, 12], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_rel_attn_embed_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 12, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_parameters_gru_rel_pos_const_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_q_proj_parameters_bias_", ), ( - [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_k_proj_parameters_weight_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_k_proj_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_v_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_out_proj_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_out_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_q_proj_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_v_proj_parameters_bias_", + [768, 768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_k_proj_parameters_weight_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 12, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_parameters_gru_rel_pos_const_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_layer_norm_parameters_weight_", ), ( - [3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_layer_norm_parameters_bias_", ), ( [3072, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_feed_forward_modules_output_dense_parameters_bias_", + [3072], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [768, 3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_final_layer_norm_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_final_layer_norm_parameters_bias_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 12, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_parameters_gru_rel_pos_const_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_q_proj_parameters_bias_", ), ( - [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_k_proj_parameters_weight_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_k_proj_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_v_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_out_proj_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_out_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_q_proj_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_v_proj_parameters_bias_", + [768, 768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_k_proj_parameters_weight_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 12, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_parameters_gru_rel_pos_const_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_layer_norm_parameters_weight_", ), ( - [3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_layer_norm_parameters_bias_", ), ( [3072, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_feed_forward_modules_output_dense_parameters_bias_", + [3072], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [768, 3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_final_layer_norm_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_final_layer_norm_parameters_bias_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 12, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_parameters_gru_rel_pos_const_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_q_proj_parameters_bias_", ), ( - [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_k_proj_parameters_weight_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_k_proj_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_v_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_out_proj_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_out_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_q_proj_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_v_proj_parameters_bias_", + [768, 768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_k_proj_parameters_weight_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 12, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_parameters_gru_rel_pos_const_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_layer_norm_parameters_weight_", ), ( - [3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_layer_norm_parameters_bias_", ), ( [3072, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_output_dense_parameters_bias_", + [3072], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [768, 3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_final_layer_norm_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_final_layer_norm_parameters_bias_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 12, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_parameters_gru_rel_pos_const_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_q_proj_parameters_bias_", ), ( - [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_k_proj_parameters_weight_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_k_proj_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_v_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_out_proj_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_out_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_q_proj_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_v_proj_parameters_bias_", + [768, 768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_k_proj_parameters_weight_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 12, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_parameters_gru_rel_pos_const_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_layer_norm_parameters_weight_", ), ( - [3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_layer_norm_parameters_bias_", ), ( [3072, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_output_dense_parameters_bias_", + [3072], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [768, 3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_final_layer_norm_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_final_layer_norm_parameters_bias_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 12, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_parameters_gru_rel_pos_const_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_q_proj_parameters_bias_", ), ( - [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_k_proj_parameters_weight_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_k_proj_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_v_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_out_proj_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_out_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_q_proj_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_v_proj_parameters_bias_", + [768, 768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_k_proj_parameters_weight_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 12, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_parameters_gru_rel_pos_const_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_layer_norm_parameters_weight_", ), ( - [3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_layer_norm_parameters_bias_", ), ( [3072, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_output_dense_parameters_bias_", + [3072], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [768, 3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_final_layer_norm_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_final_layer_norm_parameters_bias_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 12, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_parameters_gru_rel_pos_const_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_q_proj_parameters_bias_", ), ( - [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_k_proj_parameters_weight_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_k_proj_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_v_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_out_proj_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_out_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_q_proj_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_v_proj_parameters_bias_", + [768, 768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_k_proj_parameters_weight_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 12, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_parameters_gru_rel_pos_const_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_layer_norm_parameters_weight_", ), ( - [3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_layer_norm_parameters_bias_", ), ( [3072, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_feed_forward_modules_output_dense_parameters_bias_", + [3072], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [768, 3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_final_layer_norm_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_final_layer_norm_parameters_bias_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 12, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_parameters_gru_rel_pos_const_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_q_proj_parameters_bias_", ), ( - [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_k_proj_parameters_weight_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_k_proj_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_v_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_out_proj_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_out_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_q_proj_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_v_proj_parameters_bias_", + [768, 768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_k_proj_parameters_weight_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 12, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_parameters_gru_rel_pos_const_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_layer_norm_parameters_weight_", ), ( - [3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_layer_norm_parameters_bias_", ), ( [3072, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_feed_forward_modules_output_dense_parameters_bias_", + [3072], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [768, 3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_final_layer_norm_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_final_layer_norm_parameters_bias_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 12, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_parameters_gru_rel_pos_const_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_q_proj_parameters_bias_", ), ( - [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_k_proj_parameters_weight_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_k_proj_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_v_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_out_proj_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_out_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_q_proj_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_v_proj_parameters_bias_", + [768, 768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_k_proj_parameters_weight_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 12, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_parameters_gru_rel_pos_const_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_layer_norm_parameters_weight_", ), ( - [3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_layer_norm_parameters_bias_", ), ( [3072, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_feed_forward_modules_output_dense_parameters_bias_", + [3072], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [768, 3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_final_layer_norm_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_final_layer_norm_parameters_bias_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 12, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_parameters_gru_rel_pos_const_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_q_proj_parameters_bias_", ), ( - [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_k_proj_parameters_weight_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_k_proj_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_v_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_out_proj_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_out_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_q_proj_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_v_proj_parameters_bias_", + [768, 768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_k_proj_parameters_weight_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 12, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_parameters_gru_rel_pos_const_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_layer_norm_parameters_weight_", ), ( - [3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_layer_norm_parameters_bias_", ), ( [3072, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_feed_forward_modules_output_dense_parameters_bias_", + [3072], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [768, 3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_final_layer_norm_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_final_layer_norm_parameters_bias_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 12, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_parameters_gru_rel_pos_const_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_q_proj_parameters_bias_", ), ( - [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_k_proj_parameters_weight_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_k_proj_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_v_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_out_proj_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_out_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_q_proj_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_v_proj_parameters_bias_", + [768, 768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_k_proj_parameters_weight_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 12, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_parameters_gru_rel_pos_const_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_layer_norm_parameters_weight_", ), ( - [3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_layer_norm_parameters_bias_", ), ( [3072, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_feed_forward_modules_output_dense_parameters_bias_", + [3072], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [768, 3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_final_layer_norm_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_final_layer_norm_parameters_bias_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 12, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_parameters_gru_rel_pos_const_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_q_proj_parameters_bias_", ), ( - [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_k_proj_parameters_weight_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_k_proj_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_v_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_out_proj_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_out_proj_parameters_bias_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_q_proj_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_v_proj_parameters_bias_", + [768, 768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_k_proj_parameters_weight_", ), ( [768, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 12, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_parameters_gru_rel_pos_const_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_layer_norm_parameters_weight_", ), ( - [3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_layer_norm_parameters_bias_", ), ( [3072, 768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_feed_forward_modules_output_dense_parameters_bias_", + [3072], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [768, 3072], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_final_layer_norm_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_final_layer_norm_parameters_bias_", ), ( - [768], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( - [1, 1, 128], - "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_modules_parametrizations_modules_weight_parameters_original0_", + [8], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [768, 48, 128], - "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_modules_parametrizations_modules_weight_parameters_original1_", + [1, 12, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_parameters_gru_rel_pos_const_", ), ( [768], - "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_q_proj_parameters_bias_", ), ( - [512, 1, 10], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_conv_parameters_weight_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_k_proj_parameters_bias_", ), ( - [512], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_layer_norm_parameters_bias_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_v_proj_parameters_bias_", ), ( - [512], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_layer_norm_parameters_weight_", + [768, 768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_out_proj_parameters_weight_", ), ( - [512, 512, 3], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_1_modules_conv_parameters_weight_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_out_proj_parameters_bias_", ), ( - [512, 512, 3], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_2_modules_conv_parameters_weight_", + [768, 768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_q_proj_parameters_weight_", ), ( - [512, 512, 3], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_3_modules_conv_parameters_weight_", + [768, 768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_k_proj_parameters_weight_", ), ( - [512, 512, 3], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_4_modules_conv_parameters_weight_", + [768, 768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_v_proj_parameters_weight_", ), ( - [512, 512, 2], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_5_modules_conv_parameters_weight_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_layer_norm_parameters_weight_", ), ( - [512, 512, 2], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_6_modules_conv_parameters_weight_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_layer_norm_parameters_bias_", ), ( - [512], - "L_self_modules_wavlm_modules_feature_projection_modules_layer_norm_parameters_bias_", + [3072, 768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [512], - "L_self_modules_wavlm_modules_feature_projection_modules_layer_norm_parameters_weight_", + [3072], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + ), + ( + [768, 3072], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [768], - "L_self_modules_wavlm_modules_feature_projection_modules_projection_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( - [768, 512], - "L_self_modules_wavlm_modules_feature_projection_modules_projection_parameters_weight_", + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_final_layer_norm_parameters_weight_", ), + ( + [768], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_final_layer_norm_parameters_bias_", + ), + ([256, 768], "L_self_modules_projector_parameters_weight_"), + ([256], "L_self_modules_projector_parameters_bias_"), + ([10, 256], "L_self_modules_classifier_parameters_weight_"), + ([10], "L_self_modules_classifier_parameters_bias_"), ] diff --git a/samples/transformers-auto-model/Zahra99_wavlm-large-finetuned-iemocap/graph_net.json b/samples/transformers-auto-model/Zahra99_wavlm-large-finetuned-iemocap/graph_net.json index ed01485da..fab448f5b 100644 --- a/samples/transformers-auto-model/Zahra99_wavlm-large-finetuned-iemocap/graph_net.json +++ b/samples/transformers-auto-model/Zahra99_wavlm-large-finetuned-iemocap/graph_net.json @@ -15,5 +15,20 @@ "endpoints_compatible", "region:us" ], - "heuristic_tag": "audio" + "heuristic_tag": "audio", + "dimension_generalization_passes": [ + "batch_call_method_view_pass", + "batch_dim_restore_view_pass", + "tuple_arg_call_method_view_pass", + "naive_call_method_reshape_pass", + "naive_call_method_expand_pass", + "non_batch_call_method_expand_pass", + "non_batch_call_function_arange_pass", + "non_batch_call_function_getitem_slice_pass", + "non_batch_call_function_full_pass", + "non_batch_call_function_full_plus_one_pass", + "non_batch_call_function_zeros_pass", + "non_batch_call_function_arange_plus_one_pass" + ], + "symbolic_dimension_reifier": "naive_nlp_sym_dim_reifier" } \ No newline at end of file diff --git a/samples/transformers-auto-model/Zahra99_wavlm-large-finetuned-iemocap/input_tensor_constraints.py b/samples/transformers-auto-model/Zahra99_wavlm-large-finetuned-iemocap/input_tensor_constraints.py index 920ed2e9f..146b6f4e0 100644 --- a/samples/transformers-auto-model/Zahra99_wavlm-large-finetuned-iemocap/input_tensor_constraints.py +++ b/samples/transformers-auto-model/Zahra99_wavlm-large-finetuned-iemocap/input_tensor_constraints.py @@ -1,1965 +1,1966 @@ -from sympy import Symbol +from sympy import Symbol, Expr, Rel, Eq S0 = Symbol("S0") +S1 = Symbol("S1") -dynamic_dim_constraint_symbols = [S0] +dynamic_dim_constraint_symbols = [S0, S1] -dynamic_dim_constraint_symbol2example_value = {S0: 80000} +dynamic_dim_constraint_symbol2example_value = {S0: 1, S1: 80000} dynamic_dim_constraint_relations = [] dynamic_dim_constraint_input_shapes = [ - ([1, S0], "L_input_values_"), - ([4], "L_self_modules_classifier_parameters_bias_"), - ([4, 256], "L_self_modules_classifier_parameters_weight_"), - ([256], "L_self_modules_projector_parameters_bias_"), - ([256, 1024], "L_self_modules_projector_parameters_weight_"), + ([S0, S1], "L_input_values_"), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layer_norm_parameters_bias_", + [512, 1, 10], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_conv_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layer_norm_parameters_weight_", + [512], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_layer_norm_parameters_weight_", ), ( - [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + [512], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_layer_norm_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [512, 512, 3], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_1_modules_conv_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_k_proj_parameters_bias_", + [512], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_1_modules_layer_norm_parameters_weight_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_k_proj_parameters_weight_", + [512], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_1_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_out_proj_parameters_bias_", + [512, 512, 3], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_2_modules_conv_parameters_weight_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_out_proj_parameters_weight_", + [512], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_2_modules_layer_norm_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_q_proj_parameters_bias_", + [512], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_2_modules_layer_norm_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_q_proj_parameters_weight_", + [512, 512, 3], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_3_modules_conv_parameters_weight_", ), ( - [320, 16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_rel_attn_embed_parameters_weight_", + [512], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_3_modules_layer_norm_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_v_proj_parameters_bias_", + [512], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_3_modules_layer_norm_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_v_proj_parameters_weight_", + [512, 512, 3], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_4_modules_conv_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_parameters_gru_rel_pos_const_", + [512], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_4_modules_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [512], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_4_modules_layer_norm_parameters_bias_", ), ( - [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + [512, 512, 2], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_5_modules_conv_parameters_weight_", + ), + ( + [512], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_5_modules_layer_norm_parameters_weight_", + ), + ( + [512], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_5_modules_layer_norm_parameters_bias_", + ), + ( + [512, 512, 2], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_6_modules_conv_parameters_weight_", + ), + ( + [512], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_6_modules_layer_norm_parameters_weight_", + ), + ( + [512], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_6_modules_layer_norm_parameters_bias_", + ), + ( + [512], + "L_self_modules_wavlm_modules_feature_projection_modules_layer_norm_parameters_weight_", + ), + ( + [512], + "L_self_modules_wavlm_modules_feature_projection_modules_layer_norm_parameters_bias_", + ), + ( + [1024, 512], + "L_self_modules_wavlm_modules_feature_projection_modules_projection_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_output_dense_parameters_bias_", + "L_self_modules_wavlm_modules_feature_projection_modules_projection_parameters_bias_", ), ( - [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_output_dense_parameters_weight_", + [1, 1, 128], + "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_modules_parametrizations_modules_weight_parameters_original0_", + ), + ( + [1024, 64, 128], + "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_modules_parametrizations_modules_weight_parameters_original1_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_layer_norm_parameters_weight_", ), ( [1024], "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_layer_norm_parameters_weight_", + [320, 16], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_rel_attn_embed_parameters_weight_", + ), + ( + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_10_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_11_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_12_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_13_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_14_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_15_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_16_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_17_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_4_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_18_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_5_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_19_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_6_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_20_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_7_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_21_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_8_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 64], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_q_proj_parameters_bias_", ), ( - [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_k_proj_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_k_proj_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_v_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_out_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_out_proj_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_out_proj_parameters_bias_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_q_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_q_proj_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_v_proj_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_k_proj_parameters_weight_", ), ( [1024, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_modules_v_proj_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 16, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_attention_parameters_gru_rel_pos_const_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_final_layer_norm_parameters_weight_", ), ( - [4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_final_layer_norm_parameters_bias_", ), ( [4096, 1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_feed_forward_modules_intermediate_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_feed_forward_modules_output_dense_parameters_bias_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [1024, 4096], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_feed_forward_modules_output_dense_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_22_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_final_layer_norm_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_layer_norm_parameters_weight_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_layer_norm_parameters_bias_", ), ( - [1024], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_9_modules_layer_norm_parameters_weight_", + [8, 64], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( - [1, 1, 128], - "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_modules_parametrizations_modules_weight_parameters_original0_", + [8], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [1024, 64, 128], - "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_modules_parametrizations_modules_weight_parameters_original1_", + [1, 16, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_parameters_gru_rel_pos_const_", ), ( [1024], - "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_parameters_bias_", - ), - ( - [512, 1, 10], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_conv_parameters_weight_", - ), - ( - [512], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_layer_norm_parameters_bias_", - ), - ( - [512], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_layer_norm_parameters_weight_", - ), - ( - [512, 512, 3], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_1_modules_conv_parameters_weight_", - ), - ( - [512], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_1_modules_layer_norm_parameters_bias_", - ), - ( - [512], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_1_modules_layer_norm_parameters_weight_", - ), - ( - [512, 512, 3], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_2_modules_conv_parameters_weight_", - ), - ( - [512], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_2_modules_layer_norm_parameters_bias_", - ), - ( - [512], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_2_modules_layer_norm_parameters_weight_", - ), - ( - [512, 512, 3], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_3_modules_conv_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_q_proj_parameters_bias_", ), ( - [512], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_3_modules_layer_norm_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_k_proj_parameters_bias_", ), ( - [512], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_3_modules_layer_norm_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_v_proj_parameters_bias_", ), ( - [512, 512, 3], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_4_modules_conv_parameters_weight_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_out_proj_parameters_weight_", ), ( - [512], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_4_modules_layer_norm_parameters_bias_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_out_proj_parameters_bias_", ), ( - [512], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_4_modules_layer_norm_parameters_weight_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_q_proj_parameters_weight_", ), ( - [512, 512, 2], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_5_modules_conv_parameters_weight_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_k_proj_parameters_weight_", ), ( - [512], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_5_modules_layer_norm_parameters_bias_", + [1024, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_attention_modules_v_proj_parameters_weight_", ), ( - [512], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_5_modules_layer_norm_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_final_layer_norm_parameters_weight_", ), ( - [512, 512, 2], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_6_modules_conv_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_final_layer_norm_parameters_bias_", ), ( - [512], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_6_modules_layer_norm_parameters_bias_", + [4096, 1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [512], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_6_modules_layer_norm_parameters_weight_", + [4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( - [512], - "L_self_modules_wavlm_modules_feature_projection_modules_layer_norm_parameters_bias_", + [1024, 4096], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_feed_forward_modules_output_dense_parameters_weight_", ), ( - [512], - "L_self_modules_wavlm_modules_feature_projection_modules_layer_norm_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_23_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [1024], - "L_self_modules_wavlm_modules_feature_projection_modules_projection_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layer_norm_parameters_weight_", ), ( - [1024, 512], - "L_self_modules_wavlm_modules_feature_projection_modules_projection_parameters_weight_", + [1024], + "L_self_modules_wavlm_modules_encoder_modules_layer_norm_parameters_bias_", ), + ([256, 1024], "L_self_modules_projector_parameters_weight_"), + ([256], "L_self_modules_projector_parameters_bias_"), + ([4, 256], "L_self_modules_classifier_parameters_weight_"), + ([4], "L_self_modules_classifier_parameters_bias_"), ] diff --git a/samples/transformers-auto-model/hf-tiny-model-private_tiny-random-WavLMForSequenceClassification/graph_net.json b/samples/transformers-auto-model/hf-tiny-model-private_tiny-random-WavLMForSequenceClassification/graph_net.json index 1909424c4..641328fd9 100644 --- a/samples/transformers-auto-model/hf-tiny-model-private_tiny-random-WavLMForSequenceClassification/graph_net.json +++ b/samples/transformers-auto-model/hf-tiny-model-private_tiny-random-WavLMForSequenceClassification/graph_net.json @@ -16,6 +16,7 @@ "heuristic_tag": "audio", "dimension_generalization_passes": [ "batch_call_method_view_pass", + "batch_dim_restore_view_pass", "tuple_arg_call_method_view_pass", "naive_call_method_reshape_pass", "naive_call_method_expand_pass", @@ -26,5 +27,6 @@ "non_batch_call_function_full_plus_one_pass", "non_batch_call_function_zeros_pass", "non_batch_call_function_arange_plus_one_pass" - ] + ], + "symbolic_dimension_reifier": "naive_nlp_sym_dim_reifier" } \ No newline at end of file diff --git a/samples/transformers-auto-model/hf-tiny-model-private_tiny-random-WavLMForSequenceClassification/input_tensor_constraints.py b/samples/transformers-auto-model/hf-tiny-model-private_tiny-random-WavLMForSequenceClassification/input_tensor_constraints.py index d0155fe47..8d0e7b8ef 100644 --- a/samples/transformers-auto-model/hf-tiny-model-private_tiny-random-WavLMForSequenceClassification/input_tensor_constraints.py +++ b/samples/transformers-auto-model/hf-tiny-model-private_tiny-random-WavLMForSequenceClassification/input_tensor_constraints.py @@ -1,43 +1,96 @@ from sympy import Symbol, Expr, Rel, Eq S0 = Symbol("S0") +S1 = Symbol("S1") -dynamic_dim_constraint_symbols = [S0] +dynamic_dim_constraint_symbols = [S0, S1] -dynamic_dim_constraint_symbol2example_value = {S0: 80000} +dynamic_dim_constraint_symbol2example_value = {S0: 1, S1: 80000} dynamic_dim_constraint_relations = [] dynamic_dim_constraint_input_shapes = [ - ([1, S0], "L_input_values_"), - ([2], "L_self_modules_classifier_parameters_bias_"), - ([2, 256], "L_self_modules_classifier_parameters_weight_"), - ([256], "L_self_modules_projector_parameters_bias_"), - ([256, 16], "L_self_modules_projector_parameters_weight_"), - ([16], "L_self_modules_wavlm_modules_encoder_modules_layer_norm_parameters_bias_"), + ([S0, S1], "L_input_values_"), + ( + [32, 1, 8], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_conv_parameters_weight_", + ), + ( + [32], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_layer_norm_parameters_weight_", + ), + ( + [32], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_layer_norm_parameters_bias_", + ), + ( + [32, 32, 8], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_1_modules_conv_parameters_weight_", + ), + ( + [32, 32, 8], + "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_2_modules_conv_parameters_weight_", + ), + ( + [32], + "L_self_modules_wavlm_modules_feature_projection_modules_layer_norm_parameters_weight_", + ), + ( + [32], + "L_self_modules_wavlm_modules_feature_projection_modules_layer_norm_parameters_bias_", + ), + ( + [16, 32], + "L_self_modules_wavlm_modules_feature_projection_modules_projection_parameters_weight_", + ), + ( + [16], + "L_self_modules_wavlm_modules_feature_projection_modules_projection_parameters_bias_", + ), + ( + [1, 1, 16], + "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_modules_parametrizations_modules_weight_parameters_original0_", + ), + ( + [16, 8, 16], + "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_modules_parametrizations_modules_weight_parameters_original1_", + ), + ( + [16], + "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_parameters_bias_", + ), ( [16], "L_self_modules_wavlm_modules_encoder_modules_layer_norm_parameters_weight_", ), + ([16], "L_self_modules_wavlm_modules_encoder_modules_layer_norm_parameters_bias_"), ( - [8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + [320, 2], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_rel_attn_embed_parameters_weight_", ), ( [8, 8], "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), + ( + [8], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", + ), + ( + [1, 2, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_parameters_gru_rel_pos_const_", + ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_q_proj_parameters_bias_", ), ( - [16, 16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_k_proj_parameters_weight_", + [16], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_k_proj_parameters_bias_", ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_v_proj_parameters_bias_", ), ( [16, 16], @@ -45,39 +98,35 @@ ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_out_proj_parameters_bias_", ), ( [16, 16], "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_q_proj_parameters_weight_", ), ( - [320, 2], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_rel_attn_embed_parameters_weight_", - ), - ( - [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_v_proj_parameters_bias_", + [16, 16], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_k_proj_parameters_weight_", ), ( [16, 16], "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 2, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_attention_parameters_gru_rel_pos_const_", + [16], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_layer_norm_parameters_weight_", ), ( - [20], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [16], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_layer_norm_parameters_bias_", ), ( [20, 16], "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_output_dense_parameters_bias_", + [20], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [16, 20], @@ -85,7 +134,7 @@ ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [16], @@ -93,31 +142,31 @@ ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_final_layer_norm_parameters_bias_", ), ( - [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_0_modules_layer_norm_parameters_weight_", + [8, 8], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 2, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_parameters_gru_rel_pos_const_", ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_q_proj_parameters_bias_", ), ( - [16, 16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_k_proj_parameters_weight_", + [16], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_k_proj_parameters_bias_", ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_v_proj_parameters_bias_", ), ( [16, 16], @@ -125,35 +174,35 @@ ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_out_proj_parameters_bias_", ), ( [16, 16], "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_q_proj_parameters_weight_", ), ( - [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_v_proj_parameters_bias_", + [16, 16], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_k_proj_parameters_weight_", ), ( [16, 16], "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 2, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_attention_parameters_gru_rel_pos_const_", + [16], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_layer_norm_parameters_weight_", ), ( - [20], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [16], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_layer_norm_parameters_bias_", ), ( [20, 16], "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_output_dense_parameters_bias_", + [20], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [16, 20], @@ -161,7 +210,7 @@ ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [16], @@ -169,31 +218,31 @@ ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_final_layer_norm_parameters_bias_", ), ( - [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_1_modules_layer_norm_parameters_weight_", + [8, 8], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 2, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_parameters_gru_rel_pos_const_", ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_q_proj_parameters_bias_", ), ( - [16, 16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_k_proj_parameters_weight_", + [16], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_k_proj_parameters_bias_", ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_v_proj_parameters_bias_", ), ( [16, 16], @@ -201,35 +250,35 @@ ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_out_proj_parameters_bias_", ), ( [16, 16], "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_q_proj_parameters_weight_", ), ( - [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_v_proj_parameters_bias_", + [16, 16], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_k_proj_parameters_weight_", ), ( [16, 16], "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 2, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_attention_parameters_gru_rel_pos_const_", + [16], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_layer_norm_parameters_weight_", ), ( - [20], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [16], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_layer_norm_parameters_bias_", ), ( [20, 16], "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_output_dense_parameters_bias_", + [20], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [16, 20], @@ -237,7 +286,7 @@ ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [16], @@ -245,31 +294,31 @@ ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_final_layer_norm_parameters_bias_", ), ( - [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_2_modules_layer_norm_parameters_weight_", + [8, 8], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", ), ( [8], "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_gru_rel_pos_linear_parameters_bias_", ), ( - [8, 8], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_gru_rel_pos_linear_parameters_weight_", + [1, 2, 1, 1], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_parameters_gru_rel_pos_const_", ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_k_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_q_proj_parameters_bias_", ), ( - [16, 16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_k_proj_parameters_weight_", + [16], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_k_proj_parameters_bias_", ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_out_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_v_proj_parameters_bias_", ), ( [16, 16], @@ -277,35 +326,35 @@ ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_q_proj_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_out_proj_parameters_bias_", ), ( [16, 16], "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_q_proj_parameters_weight_", ), ( - [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_v_proj_parameters_bias_", + [16, 16], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_k_proj_parameters_weight_", ), ( [16, 16], "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_modules_v_proj_parameters_weight_", ), ( - [1, 2, 1, 1], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_attention_parameters_gru_rel_pos_const_", + [16], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_layer_norm_parameters_weight_", ), ( - [20], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_intermediate_dense_parameters_bias_", + [16], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_layer_norm_parameters_bias_", ), ( [20, 16], "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_intermediate_dense_parameters_weight_", ), ( - [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_output_dense_parameters_bias_", + [20], + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_intermediate_dense_parameters_bias_", ), ( [16, 20], @@ -313,7 +362,7 @@ ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_final_layer_norm_parameters_bias_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_feed_forward_modules_output_dense_parameters_bias_", ), ( [16], @@ -321,58 +370,10 @@ ), ( [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_layer_norm_parameters_bias_", - ), - ( - [16], - "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_layer_norm_parameters_weight_", - ), - ( - [1, 1, 16], - "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_modules_parametrizations_modules_weight_parameters_original0_", - ), - ( - [16, 8, 16], - "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_modules_parametrizations_modules_weight_parameters_original1_", - ), - ( - [16], - "L_self_modules_wavlm_modules_encoder_modules_pos_conv_embed_modules_conv_parameters_bias_", - ), - ( - [32, 1, 8], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_conv_parameters_weight_", - ), - ( - [32], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_layer_norm_parameters_bias_", - ), - ( - [32], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_0_modules_layer_norm_parameters_weight_", - ), - ( - [32, 32, 8], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_1_modules_conv_parameters_weight_", - ), - ( - [32, 32, 8], - "L_self_modules_wavlm_modules_feature_extractor_modules_conv_layers_modules_2_modules_conv_parameters_weight_", - ), - ( - [32], - "L_self_modules_wavlm_modules_feature_projection_modules_layer_norm_parameters_bias_", - ), - ( - [32], - "L_self_modules_wavlm_modules_feature_projection_modules_layer_norm_parameters_weight_", - ), - ( - [16], - "L_self_modules_wavlm_modules_feature_projection_modules_projection_parameters_bias_", - ), - ( - [16, 32], - "L_self_modules_wavlm_modules_feature_projection_modules_projection_parameters_weight_", + "L_self_modules_wavlm_modules_encoder_modules_layers_modules_3_modules_final_layer_norm_parameters_bias_", ), + ([256, 16], "L_self_modules_projector_parameters_weight_"), + ([256], "L_self_modules_projector_parameters_bias_"), + ([2, 256], "L_self_modules_classifier_parameters_weight_"), + ([2], "L_self_modules_classifier_parameters_bias_"), ]