criterion_config: kwargs: padding_idx: -1 rate: 0.3 size: 4264 smoothing: 0.1 name: lasr.model.e2e_ctc_att.e2e_loss:E2E_Loss model_config: kwargs: ctc_dropout: 0.1 decoder_attention_dim: 512 decoder_attention_heads: 8 decoder_dropout_rate: 0.1 decoder_input_layer: embed decoder_linear_units: 2048 decoder_num_block: 6 decoder_self_attention_dropout_rate: 0 decoder_src_attention_dropout_rate: 0 encoder_attention_dim: 512 encoder_attention_dropout_rate: 0 encoder_attention_heads: 8 encoder_dropout_rate: 0.1 encoder_input_layer: conv2d encoder_linear_units: 2048 encoder_num_blocks: 12 encoder_pos_enc_layer_type: rel_pos encoder_selfattention_layer_type: rel_selfattn idim: 80 odim: 4264 name: lasr.model.e2e_ctc_att.e2e_conformer:E2E_Conformer_CTC optim_config: kwargs: betas: - 0.9 - 0.98 name: torch.optim:Adam scheduler: kwargs: factor: 3 model_size: 512 offset: 0 warm_step: 25000 name: lasr.modules.optimizer.scheduler:WarmupScheduler tokenizer_config: kwargs: dict_path: ./dict.txt sc: '' name: lasr.data.tokenizer:CharTokenizer