diff --git a/config.py b/config.py index 154a39a..2d4dd8b 100644 --- a/config.py +++ b/config.py @@ -134,26 +134,26 @@ def load_parameters(): RNN_TYPE = 'LSTM' # RNN unit type ('LSTM' and 'GRU' supported) INIT_FUNCTION = 'glorot_uniform' # Initialization function for matrices (see keras/initializations.py) - SOURCE_TEXT_EMBEDDING_SIZE = 128 # Source language word embedding size. + SOURCE_TEXT_EMBEDDING_SIZE = 64 # Source language word embedding size. SRC_PRETRAINED_VECTORS = None # Path to pretrained vectors (e.g.: DATA_ROOT_PATH + '/DATA/word2vec.%s.npy' % SRC_LAN) # Set to None if you don't want to use pretrained vectors. # When using pretrained word embeddings. this parameter must match with the word embeddings size SRC_PRETRAINED_VECTORS_TRAINABLE = True # Finetune or not the target word embedding vectors. - TARGET_TEXT_EMBEDDING_SIZE = 128 # Source language word embedding size. + TARGET_TEXT_EMBEDDING_SIZE = 64 # Source language word embedding size. TRG_PRETRAINED_VECTORS = None # Path to pretrained vectors. (e.g. DATA_ROOT_PATH + '/DATA/word2vec.%s.npy' % TRG_LAN) # Set to None if you don't want to use pretrained vectors. # When using pretrained word embeddings, the size of the pretrained word embeddings must match with the word embeddings size. TRG_PRETRAINED_VECTORS_TRAINABLE = True # Finetune or not the target word embedding vectors. # Encoder configuration - ENCODER_HIDDEN_SIZE = 128 # For models with RNN encoder + ENCODER_HIDDEN_SIZE = 64 # For models with RNN encoder BIDIRECTIONAL_ENCODER = True # Use bidirectional encoder N_LAYERS_ENCODER = 1 # Stack this number of encoding layers BIDIRECTIONAL_DEEP_ENCODER = True # Use bidirectional encoder in all encoding layers # Decoder configuration - DECODER_HIDDEN_SIZE = 128 # For models with RNN decoder + DECODER_HIDDEN_SIZE = 64 # For models with RNN decoder N_LAYERS_DECODER = 1 # Stack this number of decoding layers. ADDITIONAL_OUTPUT_MERGE_MODE = 'sum' # Merge mode for the skip-connections ATTENTION_SIZE = DECODER_HIDDEN_SIZE diff --git a/sample_ensemble.py b/sample_ensemble.py index 9b9acfb..d3bacc0 100644 --- a/sample_ensemble.py +++ b/sample_ensemble.py @@ -87,7 +87,7 @@ def parse_args(): params_prediction['length_norm_factor'] = params.get('LENGTH_NORM_FACTOR', 0.0) params_prediction['coverage_norm_factor'] = params.get('COVERAGE_NORM_FACTOR', 0.0) params_prediction['pos_unk'] = params.get('POS_UNK', False) - params_prediction['heuristic'] = params.get('HEURISTIC', 0) + heuristic = params.get('HEURISTIC', 0) mapping = None if dataset.mapping == dict() else dataset.mapping for s in args.splits: @@ -105,7 +105,6 @@ def parse_args(): alphas = predictions[1] sources = [x.strip() for x in open(args.text, 'r').read().split('\n')] sources = sources[:-1] if len(sources[-1]) == 0 else sources - heuristic = params_prediction['heuristic'] else: samples = predictions alphas = None