Skip to content

Commit

Permalink
Smaller initial model
Browse files Browse the repository at this point in the history
  • Loading branch information
lvapeab committed May 31, 2017
1 parent b672fa3 commit e5ada24
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 6 deletions.
8 changes: 4 additions & 4 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,26 +134,26 @@ def load_parameters():
RNN_TYPE = 'LSTM' # RNN unit type ('LSTM' and 'GRU' supported)
INIT_FUNCTION = 'glorot_uniform' # Initialization function for matrices (see keras/initializations.py)

SOURCE_TEXT_EMBEDDING_SIZE = 128 # Source language word embedding size.
SOURCE_TEXT_EMBEDDING_SIZE = 64 # Source language word embedding size.
SRC_PRETRAINED_VECTORS = None # Path to pretrained vectors (e.g.: DATA_ROOT_PATH + '/DATA/word2vec.%s.npy' % SRC_LAN)
# Set to None if you don't want to use pretrained vectors.
# When using pretrained word embeddings. this parameter must match with the word embeddings size
SRC_PRETRAINED_VECTORS_TRAINABLE = True # Finetune or not the target word embedding vectors.

TARGET_TEXT_EMBEDDING_SIZE = 128 # Source language word embedding size.
TARGET_TEXT_EMBEDDING_SIZE = 64 # Source language word embedding size.
TRG_PRETRAINED_VECTORS = None # Path to pretrained vectors. (e.g. DATA_ROOT_PATH + '/DATA/word2vec.%s.npy' % TRG_LAN)
# Set to None if you don't want to use pretrained vectors.
# When using pretrained word embeddings, the size of the pretrained word embeddings must match with the word embeddings size.
TRG_PRETRAINED_VECTORS_TRAINABLE = True # Finetune or not the target word embedding vectors.

# Encoder configuration
ENCODER_HIDDEN_SIZE = 128 # For models with RNN encoder
ENCODER_HIDDEN_SIZE = 64 # For models with RNN encoder
BIDIRECTIONAL_ENCODER = True # Use bidirectional encoder
N_LAYERS_ENCODER = 1 # Stack this number of encoding layers
BIDIRECTIONAL_DEEP_ENCODER = True # Use bidirectional encoder in all encoding layers

# Decoder configuration
DECODER_HIDDEN_SIZE = 128 # For models with RNN decoder
DECODER_HIDDEN_SIZE = 64 # For models with RNN decoder
N_LAYERS_DECODER = 1 # Stack this number of decoding layers.
ADDITIONAL_OUTPUT_MERGE_MODE = 'sum' # Merge mode for the skip-connections
ATTENTION_SIZE = DECODER_HIDDEN_SIZE
Expand Down
3 changes: 1 addition & 2 deletions sample_ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def parse_args():
params_prediction['length_norm_factor'] = params.get('LENGTH_NORM_FACTOR', 0.0)
params_prediction['coverage_norm_factor'] = params.get('COVERAGE_NORM_FACTOR', 0.0)
params_prediction['pos_unk'] = params.get('POS_UNK', False)
params_prediction['heuristic'] = params.get('HEURISTIC', 0)
heuristic = params.get('HEURISTIC', 0)
mapping = None if dataset.mapping == dict() else dataset.mapping

for s in args.splits:
Expand All @@ -105,7 +105,6 @@ def parse_args():
alphas = predictions[1]
sources = [x.strip() for x in open(args.text, 'r').read().split('\n')]
sources = sources[:-1] if len(sources[-1]) == 0 else sources
heuristic = params_prediction['heuristic']
else:
samples = predictions
alphas = None
Expand Down

0 comments on commit e5ada24

Please sign in to comment.