desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Calculate entropy of distribution.'
def entropy(self, logits, sampling_dim, act_dim, act_type):
if self.env_spec.is_discrete(act_type): entropy = tf.reduce_sum(((- tf.nn.softmax(logits)) * tf.nn.log_softmax(logits)), (-1)) elif self.env_spec.is_box(act_type): means = logits[:, :(sampling_dim / 2)] std = logits[:, (sampling_dim / 2):] entropy = tf.reduce_sum((0.5 * (1 + tf.l...
'Calculate KL of distribution with itself. Used layer only for the gradients.'
def self_kl(self, logits, sampling_dim, act_dim, act_type):
if self.env_spec.is_discrete(act_type): probs = tf.nn.softmax(logits) log_probs = tf.nn.log_softmax(logits) self_kl = tf.reduce_sum((tf.stop_gradient(probs) * (tf.stop_gradient(log_probs) - log_probs)), (-1)) elif self.env_spec.is_box(act_type): means = logits[:, :(sampling_dim /...
'Calculate log-prob of action sampled from distribution.'
def log_prob_action(self, action, logits, sampling_dim, act_dim, act_type):
if self.env_spec.is_discrete(act_type): act_log_prob = tf.reduce_sum((tf.one_hot(action, act_dim) * tf.nn.log_softmax(logits)), (-1)) elif self.env_spec.is_box(act_type): means = logits[:, :(sampling_dim / 2)] std = logits[:, (sampling_dim / 2):] act_log_prob = (((-0.5) * tf.log(...
'Sample all actions given output of core network.'
def sample_actions(self, output, actions=None, greedy=False):
sampled_actions = [] logits = [] log_probs = [] entropy = [] self_kl = [] start_idx = 0 for (i, (act_dim, act_type)) in enumerate(self.env_spec.act_dims_and_types): sampling_dim = self.env_spec.sampling_dim(act_dim, act_type) if (self.fixed_std and self.env_spec.is_box(act_ty...
'Calculate KL between one policy output and another.'
def get_kl(self, my_logits, other_logits):
kl = [] for (i, (act_dim, act_type)) in enumerate(self.env_spec.act_dims_and_types): sampling_dim = self.env_spec.sampling_dim(act_dim, act_type) single_my_logits = my_logits[i] single_other_logits = other_logits[i] if self.env_spec.is_discrete(act_type): my_probs = t...
'Single RNN step. Equivalently, single-time-step sampled actions.'
def single_step(self, prev, cur, greedy=False):
(prev_internal_state, prev_actions, _, _, _, _) = prev (obs, actions) = cur (output, next_state) = self.core(obs, prev_internal_state, prev_actions) (actions, logits, log_probs, entropy, self_kl) = self.sample_actions(output, actions=actions, greedy=greedy) return (next_state, tuple(actions), tuple(...
'Sample single step from policy.'
def sample_step(self, obs, prev_internal_state, prev_actions, greedy=False):
(next_state, sampled_actions, logits, log_probs, entropies, self_kls) = self.single_step((prev_internal_state, prev_actions, None, None, None, None), (obs, None), greedy=greedy) return (next_state, sampled_actions)
'Calculate log-probs and other calculations on batch of episodes.'
def multi_step(self, all_obs, initial_state, all_actions):
batch_size = tf.shape(initial_state)[0] time_length = tf.shape(all_obs[0])[0] initial_actions = [act[0] for act in all_actions] all_actions = [tf.concat([act[1:], act[0:1]], 0) for act in all_actions] (internal_states, _, logits, log_probs, entropies, self_kls) = tf.scan(self.single_step, (all_obs, ...
'Get initializer for RNN.'
def get_initializer(self, batch_size, initial_state, initial_actions):
logits_init = [] log_probs_init = [] for (act_dim, act_type) in self.env_spec.act_dims_and_types: sampling_dim = self.env_spec.sampling_dim(act_dim, act_type) logits_init.append(tf.zeros([batch_size, sampling_dim])) log_probs_init.append(tf.zeros([batch_size])) entropy_init = [tf...
'Calculate KL between one policy and another on batch of episodes.'
def calculate_kl(self, my_logits, other_logits):
batch_size = tf.shape(my_logits[0])[1] time_length = tf.shape(my_logits[0])[0] reshaped_my_logits = [tf.reshape(my_logit, [(batch_size * time_length), (-1)]) for my_logit in my_logits] reshaped_other_logits = [tf.reshape(other_logit, [(batch_size * time_length), (-1)]) for other_logit in other_logits] ...
'Single step.'
def single_step(self, obs, actions, prev_actions, greedy=False):
batch_size = tf.shape(obs[0])[0] prev_internal_state = tf.zeros([batch_size, self.internal_dim]) (output, next_state) = self.core(obs, prev_internal_state, prev_actions) (actions, logits, log_probs, entropy, self_kl) = self.sample_actions(output, actions=actions, greedy=greedy) return (next_state, t...
'Sample single step from policy.'
def sample_step(self, obs, prev_internal_state, prev_actions, greedy=False):
(next_state, sampled_actions, logits, log_probs, entropies, self_kls) = self.single_step(obs, None, prev_actions, greedy=greedy) return (next_state, sampled_actions)
'Calculate log-probs and other calculations on batch of episodes.'
def multi_step(self, all_obs, initial_state, all_actions):
batch_size = tf.shape(initial_state)[0] time_length = tf.shape(all_obs[0])[0] reshaped_obs = [] for (obs, (obs_dim, obs_type)) in zip(all_obs, self.env_spec.obs_dims_and_types): if self.env_spec.is_discrete(obs_type): reshaped_obs.append(tf.reshape(obs, [(time_length * batch_size)]))...
'Create the Tensorflow placeholders.'
def setup_placeholders(self):
self.avg_episode_reward = tf.placeholder(tf.float32, [], 'avg_episode_reward') self.internal_state = tf.placeholder(tf.float32, [None, self.policy.rnn_state_dim], 'internal_state') self.single_observation = [] for (i, (obs_dim, obs_type)) in enumerate(self.env_spec.obs_dims_and_types): if self.e...
'Setup Tensorflow Graph.'
def setup(self):
self.setup_placeholders() tf.summary.scalar('avg_episode_reward', self.avg_episode_reward) with tf.variable_scope('model', reuse=None): with tf.variable_scope('policy_net'): (self.policy_internal_states, self.logits, self.log_probs, self.entropies, self.self_kls) = self.policy.multi_step...
'Sample batch of steps from policy.'
def sample_step(self, sess, single_observation, internal_state, single_action, greedy=False):
if greedy: outputs = [self.greedy_next_internal_state, self.greedy_sampled_actions] else: outputs = [self.next_internal_state, self.sampled_actions] feed_dict = {self.internal_state: internal_state} for (action_place, action) in zip(self.single_action, single_action): feed_dict[a...
'Train network using standard gradient descent.'
def train_step(self, sess, observations, internal_state, actions, rewards, terminated, pads, avg_episode_reward=0):
outputs = [self.raw_loss, self.gradient_ops, self.summary] feed_dict = {self.internal_state: internal_state, self.rewards: rewards, self.terminated: terminated, self.pads: pads, self.avg_episode_reward: avg_episode_reward} for (action_place, action) in zip(self.actions, actions): feed_dict[action_pl...
'Train policy using trust region step.'
def trust_region_step(self, sess, observations, internal_state, actions, rewards, terminated, pads, avg_episode_reward=0):
feed_dict = {self.internal_state: internal_state, self.rewards: rewards, self.terminated: terminated, self.pads: pads, self.avg_episode_reward: avg_episode_reward} for (action_place, action) in zip(self.actions, actions): feed_dict[action_place] = action for (obs_place, obs) in zip(self.observations...
'Train value network using value-specific optimizer.'
def fit_values(self, sess, observations, internal_state, actions, rewards, terminated, pads):
feed_dict = {self.internal_state: internal_state, self.rewards: rewards, self.terminated: terminated, self.pads: pads} for (action_place, action) in zip(self.actions, actions): feed_dict[action_place] = action for (obs_place, obs) in zip(self.observations, observations): feed_dict[obs_place]...
'Get controller.'
def get_controller(self):
cls = controller.Controller return cls(self.env, self.env_spec, self.internal_dim, use_online_batch=self.use_online_batch, batch_by_steps=self.batch_by_steps, unify_episodes=self.unify_episodes, replay_batch_size=self.replay_batch_size, max_step=self.max_step, cutoff_agent=self.cutoff_agent, save_trajectories_f...
'Run training.'
def run(self):
is_chief = ((FLAGS.task_id == 0) or (not FLAGS.supervisor)) sv = None def init_fn(sess, saver): ckpt = None if (FLAGS.save_dir and (sv is None)): load_dir = FLAGS.save_dir ckpt = tf.train.get_checkpoint_state(load_dir) if (ckpt and ckpt.model_checkpoint_path):...
'Get inputs to network as single tensor.'
def get_inputs(self, time_step, obs, prev_actions, internal_policy_states):
inputs = [tf.ones_like(time_step)] input_dim = 1 if (not self.input_policy_state): for (i, (obs_dim, obs_type)) in enumerate(self.env_spec.obs_dims_and_types): if self.env_spec.is_discrete(obs_type): inputs.append(tf.one_hot(obs[i], obs_dim)) input_dim += ...
'Reshape inputs from [time_length, batch_size, ...] to [time_length * batch_size, ...]. This allows for computing the value estimate in one go.'
def reshape_batched_inputs(self, all_obs, all_actions, internal_policy_states, policy_logits):
batch_size = tf.shape(all_obs[0])[1] time_length = tf.shape(all_obs[0])[0] reshaped_obs = [] for (obs, (obs_dim, obs_type)) in zip(all_obs, self.env_spec.obs_dims_and_types): if self.env_spec.is_discrete(obs_type): reshaped_obs.append(tf.reshape(obs, [(time_length * batch_size)])) ...
'Get value estimates given input.'
def get_values(self, all_obs, all_actions, internal_policy_states, policy_logits):
batch_size = tf.shape(all_obs[0])[1] time_length = tf.shape(all_obs[0])[0] (time_step, reshaped_obs, reshaped_prev_act, reshaped_internal_policy_states, reshaped_policy_logits) = self.reshape_batched_inputs(all_obs, all_actions, internal_policy_states, policy_logits) (input_dim, inputs) = self.get_input...
'Sample episodes from environment using model.'
def _sample_episodes(self, sess, greedy=False):
obs_after_reset = self.env.reset_if(self.start_episode) for (i, obs) in enumerate(obs_after_reset): if (obs is not None): self.step_count[i] = 0 self.internal_state[i] = self.initial_internal_state() for j in xrange(len(self.env_spec.obs_dims)): self.l...
'Sample steps from the environment until we have enough for a batch.'
def sample_episodes(self, sess):
if self.unify_episodes: self.all_new_ep = self.start_episode[0] episodes = [] total_steps = 0 while (total_steps < (self.max_step * len(self.env))): (initial_state, observations, actions, rewards, pads) = self._sample_episodes(sess) observations = zip(*observations) actio...
'Train model using batch.'
def _train(self, sess, observations, initial_state, actions, rewards, terminated, pads):
if self.use_trust_region: (loss, _, summary) = self.model.trust_region_step(sess, observations, initial_state, actions, rewards, terminated, pads, avg_episode_reward=np.mean(self.episode_rewards)) else: (loss, _, summary) = self.model.train_step(sess, observations, initial_state, actions, reward...
'Sample some episodes and train on some episodes.'
def train(self, sess):
cur_step = sess.run(self.model.inc_global_step) self.cur_step = cur_step if (self.cur_step == 0): for _ in xrange(100): sess.run(self.model.copy_op) sess.run(self.model.copy_op) (initial_state, observations, actions, rewards, terminated, pads) = self.sample_episodes(sess) sel...
'Use greedy sampling.'
def eval(self, sess):
(initial_state, observations, actions, rewards, pads) = self._sample_episodes(sess, greedy=True) total_rewards = np.sum((np.array(rewards) * (1 - np.array(pads))), axis=0) return np.mean(total_rewards)
'Convert time-major batch of episodes to batch-major list of episodes.'
def convert_from_batched_episodes(self, initial_state, observations, actions, rewards, terminated, pads):
rewards = np.array(rewards) pads = np.array(pads) observations = [np.array(obs) for obs in observations] actions = [np.array(act) for act in actions] total_rewards = np.sum((rewards * (1 - pads)), axis=0) total_length = np.sum((1 - pads), axis=0).astype('int32') episodes = [] num_episode...
'Convert batch-major list of episodes to time-major batch of episodes.'
def convert_to_batched_episodes(self, episodes, max_length=None):
lengths = [len(ep[(-2)]) for ep in episodes] max_length = (max_length or max(lengths)) new_episodes = [] for (ep, length) in zip(episodes, lengths): (initial, observations, actions, rewards, terminated) = ep observations = [np.resize(obs, ([(max_length + 1)] + list(obs.shape)[1:])) for o...
'Add batch of episodes to replay buffer.'
def add_to_replay_buffer(self, initial_state, observations, actions, rewards, terminated, pads):
if (self.replay_buffer is None): return rewards = np.array(rewards) pads = np.array(pads) total_rewards = np.sum((rewards * (1 - pads)), axis=0) episodes = self.convert_from_batched_episodes(initial_state, observations, actions, rewards, terminated, pads) priorities = (total_rewards if (...
'Sample a batch of episodes from the replay buffer.'
def get_from_replay_buffer(self, batch_size):
if ((self.replay_buffer is None) or (len(self.replay_buffer) < (1 * batch_size))): return (None, None) desired_count = (batch_size * self.max_step) while True: if (batch_size > len(self.replay_buffer)): batch_size = len(self.replay_buffer) (episodes, probs) = self.replay_...
'Seed the replay buffer with some episodes.'
def seed_replay_buffer(self, episodes):
if (self.replay_buffer is None): return for i in xrange(len(episodes)): episodes[i] = ([self.initial_internal_state()] + episodes[i]) self.replay_buffer.seed_buffer(episodes)
'Update the dictionary with a comma separated list.'
def update_config(self, in_string):
pairs = in_string.split(',') pairs = [pair.split('=') for pair in pairs] for (key, val) in pairs: self.dict_[key] = type(self.dict_[key])(val) self.__dict__.update(self.dict_) return self
'Evaluate bits/dim.'
def eval_epoch(self, hps):
n_eval_dict = {'imnet': 50000, 'lsun': 300, 'celeba': 19962, 'svhn': 26032} if (FLAGS.eval_set_size == 0): num_examples_eval = n_eval_dict[FLAGS.dataset] else: num_examples_eval = FLAGS.eval_set_size n_epoch = (num_examples_eval / hps.batch_size) eval_costs = [] bar_len = 70 ...
'Initializes the vectors from a text vocabulary and binary data.'
def __init__(self, vocab_filename, rows_filename, cols_filename=None):
with open(vocab_filename, 'r') as lines: self.vocab = [line.split()[0] for line in lines] self.word_to_idx = {word: idx for (idx, word) in enumerate(self.vocab)} n = len(self.vocab) with open(rows_filename, 'r') as rows_fh: rows_fh.seek(0, os.SEEK_END) size = rows_fh.tell() ...
'Computes the similarity of two tokens.'
def similarity(self, word1, word2):
idx1 = self.word_to_idx.get(word1) idx2 = self.word_to_idx.get(word2) if ((not idx1) or (not idx2)): return None return float((self.vecs[idx1] * self.vecs[idx2].transpose()))
'Returns the nearest neighbors to the query (a word or vector).'
def neighbors(self, query):
if isinstance(query, basestring): idx = self.word_to_idx.get(query) if (idx is None): return None query = self.vecs[idx] neighbors = (self.vecs * query.transpose()) return sorted(zip(self.vocab, neighbors.flat), key=(lambda kv: kv[1]), reverse=True)
'Returns the embedding for a token, or None if no embedding exists.'
def lookup(self, word):
idx = self.word_to_idx.get(word) return (None if (idx is None) else self.vecs[idx])
'Creates a new Swivel model.'
def __init__(self, input_base_path, hparams):
(self.row_ix_to_word, self.row_word_to_ix) = self._read_vocab(os.path.join(input_base_path, 'row_vocab.txt')) (self.col_ix_to_word, self.col_word_to_ix) = self._read_vocab(os.path.join(input_base_path, 'col_vocab.txt')) row_sums = self._read_marginals_file(os.path.join(input_base_path, 'row_sums.txt')) ...
'Reads the vocabulary file.'
def _read_vocab(self, filename):
with open(filename) as lines: ix_to_word = [line.strip() for line in lines] word_to_ix = {word: ix for (ix, word) in enumerate(ix_to_word)} return (ix_to_word, word_to_ix)
'Reads text file with one number per line to an array.'
def _read_marginals_file(self, filename):
with open(filename) as lines: return [float(line.strip()) for line in lines]
'Creates ops that read submatrix shards from disk.'
def _count_matrix_input(self, filenames, submatrix_rows, submatrix_cols):
random.shuffle(filenames) filename_queue = tf.train.string_input_producer(filenames) reader = tf.WholeFileReader() (_, serialized_example) = reader.read(filename_queue) features = tf.parse_single_example(serialized_example, features={'global_row': tf.FixedLenFeature([submatrix_rows], dtype=tf.int64)...
'Returns an op that runs an eval on a word similarity dataset. The eval dataset is assumed to be tab-separated, one scored word pair per line. The resulting value is Spearman\'s rho of the human judgements with the cosine similarity of the word embeddings. Args: filename: the filename containing the word similarity da...
def wordsim_eval_op(self, filename):
with open(filename, 'r') as fh: tuples = (line.strip().split(' DCTB ') for line in fh.read().splitlines()) (word1s, word2s, sims) = zip(*tuples) actuals = map(float, sims) v1s_t = tf.nn.embedding_lookup(self.row_embedding, [self.row_word_to_ix.get(w, 0) for w in word1s]) v2s_t = tf.n...
'Returns an op that runs an eval on an analogy dataset. The eval dataset is assumed to be tab-separated, with four tokens per line. The first three tokens are query terms, the last is the expected answer. For each line (e.g., "man king woman queen"), the vectors corresponding to the query terms are added ("king - man +...
def analogy_eval_op(self, filename, max_vocab_size=20000):
analogy_ixs = [] with open(filename, 'r') as lines: for line in lines: parts = line.strip().split(' DCTB ') if (len(parts) == 4): analogy_ixs.append([self.row_word_to_ix.get(w, 0) for w in parts]) (ix1s, ix2s, ix3s, _) = zip(*analogy_ixs) (v1s_t, v2s_t, v3...
'Writes tensor to output_path as tsv.'
def _write_tensor(self, vocab_path, output_path, session, embedding):
embeddings = session.run(embedding) with open(output_path, 'w') as out_f: with open(vocab_path) as vocab_f: for (index, word) in enumerate(vocab_f): word = word.strip() embedding = embeddings[index] print(' DCTB '.join(([word.strip()] + [str(x)...
'Writes row and column embeddings disk.'
def write_embeddings(self, config, session):
self._write_tensor(os.path.join(config.input_base_path, 'row_vocab.txt'), os.path.join(config.output_base_path, 'row_embedding.tsv'), session, self.row_embedding) self._write_tensor(os.path.join(config.input_base_path, 'col_vocab.txt'), os.path.join(config.output_base_path, 'col_embedding.tsv'), session, self.c...
'An alternative implemenation for the encoding coordinates. Args: net: a tensor of shape=[batch_size, height, width, num_features] Returns: a list of tensors with encoded image coordinates in them.'
def encode_coordinates_alt(self, net):
(batch_size, h, w, _) = net.shape.as_list() h_loc = [tf.tile(tf.reshape(tf.contrib.layers.one_hot_encoding(tf.constant([i]), num_classes=h), [h, 1]), [1, w]) for i in xrange(h)] h_loc = tf.concat([tf.expand_dims(t, 2) for t in h_loc], 2) w_loc = [tf.tile(tf.contrib.layers.one_hot_encoding(tf.constant([i...
'Stores argument in member variable for further use. Args: net: A tensor with shape [batch_size, num_features, feature_size] which contains some extracted image features. labels_one_hot: An optional (can be None) ground truth labels for the input features. Is a tensor with shape [batch_size, seq_length, num_char_classe...
def __init__(self, net, labels_one_hot, model_params, method_params):
self._params = model_params self._mparams = method_params self._net = net self._labels_one_hot = labels_one_hot self._batch_size = net.get_shape().dims[0].value self._char_logits = {} regularizer = slim.l2_regularizer(self._mparams.weight_decay) self._softmax_w = slim.model_variable('sof...
'Returns a sample to be used to predict a character during training. This function is used as a loop_function for an RNN decoder. Args: prev: output tensor from previous step of the RNN. A tensor with shape: [batch_size, num_char_classes]. i: index of a character in the output sequence. Returns: A tensor with shape [ba...
@abc.abstractmethod def get_train_input(self, prev, i):
pass
'Returns a sample to be used to predict a character during inference. This function is used as a loop_function for an RNN decoder. Args: prev: output tensor from previous step of the RNN. A tensor with shape: [batch_size, num_char_classes]. i: index of a character in the output sequence. Returns: A tensor with shape [b...
@abc.abstractmethod def get_eval_input(self, prev, i):
raise AssertionError('Not implemented')
'Unrolls an RNN cell for all inputs. This is a placeholder to call some RNN decoder. It has a similar to tf.seq2seq.rnn_decode interface. Args: decoder_inputs: A list of 2D Tensors* [batch_size x input_size]. In fact, most of existing decoders in presence of a loop_function use only the first element to determine batch...
@abc.abstractmethod def unroll_cell(self, decoder_inputs, initial_state, loop_function, cell):
pass
'Returns True if the layer is created for training stage.'
def is_training(self):
return (self._labels_one_hot is not None)
'Creates logits for a character if required. Args: inputs: A tensor with shape [batch_size, ?] (depth is implementation dependent). char_index: A integer index of a character in the output sequence. Returns: A tensor with shape [batch_size, num_char_classes]'
def char_logit(self, inputs, char_index):
if (char_index not in self._char_logits): self._char_logits[char_index] = tf.nn.xw_plus_b(inputs, self._softmax_w, self._softmax_b) return self._char_logits[char_index]
'Creates one hot encoding for a logit of a character. Args: logit: A tensor with shape [batch_size, num_char_classes]. Returns: A tensor with shape [batch_size, num_char_classes]'
def char_one_hot(self, logit):
prediction = tf.argmax(logit, dimension=1) return slim.one_hot_encoding(prediction, self._params.num_char_classes)
'A wrapper for get_train_input and get_eval_input. Args: prev: output tensor from previous step of the RNN. A tensor with shape: [batch_size, num_char_classes]. i: index of a character in the output sequence. Returns: A tensor with shape [batch_size, ?] - depth depends on implementation details.'
def get_input(self, prev, i):
if self.is_training(): return self.get_train_input(prev, i) else: return self.get_eval_input(prev, i)
'Creates character sequence logits for a net specified in the constructor. A "main" method for the sequence layer which glues together all pieces. Returns: A tensor with shape [batch_size, seq_length, num_char_classes].'
def create_logits(self):
with tf.variable_scope('LSTM'): first_label = self.get_input(prev=None, i=0) decoder_inputs = ([first_label] + ([None] * (self._params.seq_length - 1))) lstm_cell = tf.contrib.rnn.LSTMCell(self._mparams.num_lstm_units, use_peepholes=False, cell_clip=self._mparams.lstm_state_clip_value, state...
'Returns a subset of image features for a character. Args: char_index: an index of a character. Returns: A tensor with shape [batch_size, ?]. The output depth depends on the depth of input net.'
def get_image_feature(self, char_index):
(batch_size, features_num, _) = [d.value for d in self._net.get_shape()] slice_len = int((features_num / self._params.seq_length)) net_slice = self._net[:, char_index:(char_index + slice_len), :] feature = tf.reshape(net_slice, [batch_size, (-1)]) logging.debug('Image feature: %s', feature) ...
'See SequenceLayerBase.get_eval_input for details.'
def get_eval_input(self, prev, i):
del prev return self.get_image_feature(i)
'See SequenceLayerBase.get_train_input for details.'
def get_train_input(self, prev, i):
return self.get_eval_input(prev, i)
'See SequenceLayerBase.unroll_cell for details.'
def unroll_cell(self, decoder_inputs, initial_state, loop_function, cell):
return tf.contrib.legacy_seq2seq.rnn_decoder(decoder_inputs=decoder_inputs, initial_state=initial_state, cell=cell, loop_function=self.get_input)
'See SequenceLayerBase.get_eval_input for details.'
def get_eval_input(self, prev, i):
if (i == 0): prev = self._zero_label else: logit = self.char_logit(prev, char_index=(i - 1)) prev = self.char_one_hot(logit) image_feature = self.get_image_feature(char_index=i) return tf.concat([image_feature, prev], 1)
'See SequenceLayerBase.get_train_input for details.'
def get_train_input(self, prev, i):
if (i == 0): prev = self._zero_label else: prev = self._labels_one_hot[:, (i - 1), :] image_feature = self.get_image_feature(i) return tf.concat([image_feature, prev], 1)
'See SequenceLayerBase.get_eval_input for details.'
def get_eval_input(self, prev, i):
del prev, i return self._zero_label
'See SequenceLayerBase.get_train_input for details.'
def get_train_input(self, prev, i):
return self.get_eval_input(prev, i)
'See SequenceLayerBase.get_train_input for details.'
def get_train_input(self, prev, i):
if (i == 0): return self._zero_label else: return self._labels_one_hot[:, (i - 1), :]
'See SequenceLayerBase.get_eval_input for details.'
def get_eval_input(self, prev, i):
if (i == 0): return self._zero_label else: logit = self.char_logit(prev, char_index=(i - 1)) return self.char_one_hot(logit)
'Creates a lookup table. Args: charset: a dictionary with id-to-character mapping.'
def __init__(self, charset, default_character='?'):
mapping_strings = tf.constant(_dict_to_array(charset, default_character)) self.table = tf.contrib.lookup.index_to_string_table_from_tensor(mapping=mapping_strings, default_value=default_character)
'Returns a string corresponding to a sequence of character ids. Args: ids: a tensor with shape [batch_size, max_sequence_length]'
def get_text(self, ids):
return tf.reduce_join(self.table.lookup(tf.to_int64(ids)), reduction_indices=1)
'Initialized model parameters. Args: num_char_classes: size of character set. seq_length: number of characters in a sequence. num_views: Number of views (conv towers) to use. null_code: A character code corresponding to a character which indicates end of a sequence. mparams: a dictionary with hyper parameters for metho...
def __init__(self, num_char_classes, seq_length, num_views, null_code, mparams=None):
super(Model, self).__init__() self._params = ModelParams(num_char_classes=num_char_classes, seq_length=seq_length, num_views=num_views, null_code=null_code) self._mparams = self.default_mparams() if mparams: self._mparams.update(mparams)
'Computes convolutional features using the InceptionV3 model. Args: images: A tensor of shape [batch_size, height, width, channels]. is_training: whether is training or not. reuse: whether or not the network and its variables should be reused. To be able to reuse \'scope\' must be given. Returns: A tensor of shape [bat...
def conv_tower_fn(self, images, is_training=True, reuse=None):
mparams = self._mparams['conv_tower_fn'] logging.debug('Using final_endpoint=%s', mparams.final_endpoint) with tf.variable_scope('conv_tower_fn/INCE'): if reuse: tf.get_variable_scope().reuse_variables() with slim.arg_scope(inception.inception_v3_arg_scope()): (net...
'Splits an input tensor into a list of tensors (features). Args: net: A feature map of shape [batch_size, num_features, feature_size]. Raises: AssertionError: if num_features is less than seq_length. Returns: A list with seq_length tensors of shape [batch_size, feature_size]'
def _create_lstm_inputs(self, net):
num_features = net.get_shape().dims[1].value if (num_features < self._params.seq_length): raise AssertionError(('Incorrect dimension #1 of input tensor %d should be bigger than %d (shape=%s)' % (num_features, self._params.seq_length, net.get_shape()))) elif (num_f...
'Max pool across all nets in spatial dimensions. Args: nets_list: A list of 4D tensors with identical size. Returns: A tensor with the same size as any input tensors.'
def max_pool_views(self, nets_list):
(batch_size, height, width, num_features) = [d.value for d in nets_list[0].get_shape().dims] xy_flat_shape = (batch_size, 1, (height * width), num_features) nets_for_merge = [] with tf.variable_scope('max_pool_views', values=nets_list): for net in nets_list: nets_for_merge.append(tf....
'Combines output of multiple convolutional towers into a single tensor. It stacks towers one on top another (in height dim) in a 4x1 grid. The order is arbitrary design choice and shouldn\'t matter much. Args: nets: list of tensors of shape=[batch_size, height, width, num_features]. Returns: A tensor of shape [batch_si...
def pool_views_fn(self, nets):
with tf.variable_scope('pool_views_fn/STCK'): net = tf.concat(nets, 1) batch_size = net.get_shape().dims[0].value feature_size = net.get_shape().dims[3].value return tf.reshape(net, [batch_size, (-1), feature_size])
'Returns confidence scores (softmax values) for predicted characters. Args: chars_logit: chars logits, a tensor with shape [batch_size x seq_length x num_char_classes] Returns: A tuple (ids, log_prob, scores), where: ids - predicted characters, a int32 tensor with shape [batch_size x seq_length]; log_prob - a log proba...
def char_predictions(self, chars_logit):
log_prob = utils.logits_to_log_prob(chars_logit) ids = tf.to_int32(tf.argmax(log_prob, dimension=2), name='predicted_chars') mask = tf.cast(slim.one_hot_encoding(ids, self._params.num_char_classes), tf.bool) all_scores = tf.nn.softmax(chars_logit) selected_scores = tf.boolean_mask(all_scores, mask, ...
'Adds one-hot encoding of coordinates to different views in the networks. For each "pixel" of a feature map it adds a onehot encoded x and y coordinates. Args: net: a tensor of shape=[batch_size, height, width, num_features] Returns: a tensor with the same height and width, but altered feature_size.'
def encode_coordinates_fn(self, net):
mparams = self._mparams['encode_coordinates_fn'] if mparams.enabled: (batch_size, h, w, _) = net.shape.as_list() (x, y) = tf.meshgrid(tf.range(w), tf.range(h)) w_loc = slim.one_hot_encoding(x, num_classes=w) h_loc = slim.one_hot_encoding(y, num_classes=h) loc = tf.concat(...
'Creates a base part of the Model (no gradients, losses or summaries). Args: images: A tensor of shape [batch_size, height, width, channels]. labels_one_hot: Optional (can be None) one-hot encoding for ground truth labels. If provided the function will create a model for training. scope: Optional variable_scope. reuse:...
def create_base(self, images, labels_one_hot, scope='AttentionOcr_v1', reuse=None):
logging.debug('images: %s', images) is_training = (labels_one_hot is not None) with tf.variable_scope(scope, reuse=reuse): views = tf.split(value=images, num_or_size_splits=self._params.num_views, axis=2) logging.debug('Views=%d single view: %s', len(views), views[0]) net...
'Creates all losses required to train the model. Args: data: InputEndpoints namedtuple. endpoints: Model namedtuple. Returns: Total loss.'
def create_loss(self, data, endpoints):
self.sequence_loss_fn(endpoints.chars_logit, data.labels) total_loss = slim.losses.get_total_loss() tf.summary.scalar('TotalLoss', total_loss) return total_loss
'Applies a label smoothing regularization. Uses the same method as in https://arxiv.org/abs/1512.00567. Args: chars_labels: ground truth ids of charactes, shape=[batch_size, seq_length]; weight: label-smoothing regularization weight. Returns: A sensor with the same shape as the input.'
def label_smoothing_regularization(self, chars_labels, weight=0.1):
one_hot_labels = tf.one_hot(chars_labels, depth=self._params.num_char_classes, axis=(-1)) pos_weight = (1.0 - weight) neg_weight = (weight / self._params.num_char_classes) return ((one_hot_labels * pos_weight) + neg_weight)
'Loss function for char sequence. Depending on values of hyper parameters it applies label smoothing and can also ignore all null chars after the first one. Args: chars_logits: logits for predicted characters, shape=[batch_size, seq_length, num_char_classes]; chars_labels: ground truth ids of characters, shape=[batch_s...
def sequence_loss_fn(self, chars_logits, chars_labels):
mparams = self._mparams['sequence_loss_fn'] with tf.variable_scope('sequence_loss_fn/SLF'): if (mparams.label_smoothing > 0): smoothed_one_hot_labels = self.label_smoothing_regularization(chars_labels, mparams.label_smoothing) labels_list = tf.unstack(smoothed_one_hot_labels, axi...
'Creates all summaries for the model. Args: data: InputEndpoints namedtuple. endpoints: OutputEndpoints namedtuple. charset: A dictionary with mapping between character codes and unicode characters. Use the one provided by a dataset.charset. is_training: If True will create summary prefixes for training job, otherwise ...
def create_summaries(self, data, endpoints, charset, is_training):
def sname(label): prefix = ('train' if is_training else 'eval') return ('%s/%s' % (prefix, label)) max_outputs = 4 tf.summary.image(sname('image'), data.images, max_outputs=max_outputs) if is_training: tf.summary.image(sname('image/orig'), data.images_orig, max_outputs=max_output...
'Creates an init operations to restore weights from various checkpoints. Args: master_checkpoint: path to a checkpoint which contains all weights for the whole model. inception_checkpoint: path to a checkpoint which contains weights for the inception part only. Returns: a function to run initialization ops.'
def create_init_fn_to_restore(self, master_checkpoint, inception_checkpoint):
all_assign_ops = [] all_feed_dict = {} def assign_from_checkpoint(variables, checkpoint): logging.info('Request to re-store %d weights from %s', len(variables), checkpoint) if (not variables): logging.error("Can't find any variables to restore.") ...
'Wrapper for test session context manager with required initialization. Yields: A session object that should be used as a context manager.'
@contextlib.contextmanager def initialized_session(self):
with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) (yield sess)
'Returns a RandomUniform Tensor between -param_init and param_init.'
def RandomUniformInit(self, shape):
param_seed = self.utility.FLAGS.param_seed self.init_seed_counter += 1 return tf.random_uniform(shape, ((-1.0) * np.float32(self.utility.FLAGS.param_init).astype(self.utility.np_data_type[self.utility.FLAGS.data_type])), np.float32(self.utility.FLAGS.param_init).astype(self.utility.np_data_type[self.utility...
'Build the graph corresponding to the progressive BRNN model.'
def BuildGraph(self, input_codes):
layer_depth = self._config['layer_depth'] layer_count = self._config['layer_count'] code_shape = input_codes.get_shape() code_depth = code_shape[(-1)].value if (self._config['coded_layer_count'] > 0): prefix_depth = (self._config['coded_layer_count'] * layer_depth) if (code_depth < p...
'Build the Tensorflow graph corresponding to the entropy coder model. Args: input_codes: Tensor of size: batch_size x height x width x bit_depth corresponding to the codes to compress. The input codes are {-1, +1} codes.'
def BuildGraph(self, input_codes):
raise NotImplementedError()
'Returns a default model configuration to be used for unit tests.'
def GetConfigStringForUnitTest(self):
return None
'Context manager that handles graph, namescope, and nested blocks.'
@contextlib.contextmanager def _BlockScope(self):
self._stack.append(self) try: with self._graph.as_default(): with self._OptionalNameScope(self._scope_str): (yield self) finally: self._stack.pop()
'Implementation of __call__().'
def _Apply(self, *args, **kwargs):
raise NotImplementedError()
'Creates a new variable. This function creates a variable, then returns a local copy created by Identity operation. To get the Variable class object, use LookupRef() method. Note that each time Variable class object is used as an input to an operation, Tensorflow will create a new Send/Recv pair. This hurts performance...
def NewVar(self, value, **kwargs):
v = tf.Variable(value, **kwargs) self._variables.append(v) return v
'Returns bool if the block is initialized. By default, BlockBase assumes that a block is initialized when __call__() is executed for the first time. If this is an incorrect assumption for some subclasses, override this property in those subclasses. Returns: True if initialized, False otherwise.'
@property def initialized(self):
return self._called
'Asserts initialized property.'
def AssertInitialized(self):
if (not self.initialized): raise RuntimeError('{} has not been initialized.'.format(self))
'Returns the list of all tensorflow variables used inside this block.'
def VariableList(self):
variables = list(itertools.chain(itertools.chain.from_iterable((t.VariableList() for t in self._subblocks)), self._VariableList())) return variables
'Returns the list of all tensorflow variables owned by this block.'
def _VariableList(self):
self.AssertInitialized() return self._variables
'Returns L2 loss list of (almost) all variables used inside this block. When this method needs to be overridden, there are two choices. 1. Override CreateWeightLoss() to change the weight loss of all variables that belong to this block, both directly and indirectly. 2. Override _CreateWeightLoss() to change the weight ...
def CreateWeightLoss(self):
losses = list(itertools.chain(itertools.chain.from_iterable((t.CreateWeightLoss() for t in self._subblocks)), self._CreateWeightLoss())) return losses
'Returns weight loss list of variables that belong to this block.'
def _CreateWeightLoss(self):
self.AssertInitialized() with self._BlockScope(): return [tf.nn.l2_loss(v) for v in self._variables]
'Creates update operations for this block and its sub-blocks.'
def CreateUpdateOps(self):
ops = list(itertools.chain(itertools.chain.from_iterable((t.CreateUpdateOps() for t in self._subblocks)), self._CreateUpdateOps())) return ops
'Creates update operations for this block.'
def _CreateUpdateOps(self):
self.AssertInitialized() return []
'Mark all the variables of this block as non-trainable. All the variables owned directly or indirectly (through subblocks) are marked as non trainable. This function along with CheckpointInitOp can be used to load a pretrained model that consists in only one part of the whole graph.'
def MarkAsNonTrainable(self):
assert self._called all_variables = self.VariableList() collection = tf.get_collection_ref(tf.GraphKeys.TRAINABLE_VARIABLES) for v in all_variables: if (v in collection): collection.remove(v)