Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def get_hours_for_week(self, week_start=None):
week_start = week_start if week_start else self.week_start
week_end = week_start + relativedelta(days=7)
return ProjectHours.objects.filter(
week_start__gte=week_start, week_start__lt=week_end)
|
[
"\n Gets all ProjectHours entries in the 7-day period beginning on\n week_start.\n "
] |
Please provide a description of the function:def get_users_from_project_hours(self, project_hours):
name = ('user__first_name', 'user__last_name')
users = project_hours.values_list('user__id', *name).distinct()\
.order_by(*name)
return users
|
[
"\n Gets a list of the distinct users included in the project hours\n entries, ordered by name.\n "
] |
Please provide a description of the function:def check_all(self, all_entries, *args, **kwargs):
all_overlaps = 0
while True:
try:
user_entries = all_entries.next()
except StopIteration:
return all_overlaps
else:
user_total_overlaps = self.check_entry(
user_entries, *args, **kwargs)
all_overlaps += user_total_overlaps
|
[
"\n Go through lists of entries, find overlaps among each, return the total\n "
] |
Please provide a description of the function:def check_entry(self, entries, *args, **kwargs):
verbosity = kwargs.get('verbosity', 1)
user_total_overlaps = 0
user = ''
for index_a, entry_a in enumerate(entries):
# Show the name the first time through
if index_a == 0:
if args and verbosity >= 1 or verbosity >= 2:
self.show_name(entry_a.user)
user = entry_a.user
for index_b in range(index_a, len(entries)):
entry_b = entries[index_b]
if entry_a.check_overlap(entry_b):
user_total_overlaps += 1
self.show_overlap(entry_a, entry_b, verbosity=verbosity)
if user_total_overlaps and user and verbosity >= 1:
overlap_data = {
'first': user.first_name,
'last': user.last_name,
'total': user_total_overlaps,
}
self.stdout.write('Total overlapping entries for user ' +
'%(first)s %(last)s: %(total)d' % overlap_data)
return user_total_overlaps
|
[
"\n With a list of entries, check each entry against every other\n "
] |
Please provide a description of the function:def find_start(self, **kwargs):
week = kwargs.get('week', False)
month = kwargs.get('month', False)
year = kwargs.get('year', False)
days = kwargs.get('days', 0)
# If no flags are True, set to the beginning of last billing window
# to assure we catch all recent violations
start = timezone.now() - relativedelta(months=1, day=1)
# Set the start date based on arguments provided through options
if week:
start = utils.get_week_start()
if month:
start = timezone.now() - relativedelta(day=1)
if year:
start = timezone.now() - relativedelta(day=1, month=1)
if days:
start = timezone.now() - relativedelta(days=days)
start -= relativedelta(hour=0, minute=0, second=0, microsecond=0)
return start
|
[
"\n Determine the starting point of the query using CLI keyword arguments\n "
] |
Please provide a description of the function:def find_users(self, *args):
if args:
names = reduce(lambda query, arg: query |
(Q(first_name__icontains=arg) | Q(last_name__icontains=arg)),
args, Q()) # noqa
users = User.objects.filter(names)
# If no args given, check every user
else:
users = User.objects.all()
# Display errors if no user was found
if not users.count() and args:
if len(args) == 1:
raise CommandError('No user was found with the name %s' % args[0])
else:
arg_list = ', '.join(args)
raise CommandError('No users found with the names: %s' % arg_list)
return users
|
[
"\n Returns the users to search given names as args.\n Return all users if there are no args provided.\n "
] |
Please provide a description of the function:def find_entries(self, users, start, *args, **kwargs):
forever = kwargs.get('all', False)
for user in users:
if forever:
entries = Entry.objects.filter(user=user).order_by('start_time')
else:
entries = Entry.objects.filter(
user=user, start_time__gte=start).order_by(
'start_time')
yield entries
|
[
"\n Find all entries for all users, from a given starting point.\n If no starting point is provided, all entries are returned.\n "
] |
Please provide a description of the function:def cbv_decorator(function_decorator):
def class_decorator(View):
View.dispatch = method_decorator(function_decorator)(View.dispatch)
return View
return class_decorator
|
[
"Allows a function-based decorator to be used on a CBV."
] |
Please provide a description of the function:def date_totals(entries, by):
date_dict = {}
for date, date_entries in groupby(entries, lambda x: x['date']):
if isinstance(date, datetime.datetime):
date = date.date()
d_entries = list(date_entries)
if by == 'user':
name = ' '.join((d_entries[0]['user__first_name'],
d_entries[0]['user__last_name']))
elif by == 'project':
name = d_entries[0]['project__name']
else:
name = d_entries[0][by]
pk = d_entries[0][by]
hours = get_hours_summary(d_entries)
date_dict[date] = hours
return name, pk, date_dict
|
[
"Yield a user's name and a dictionary of their hours"
] |
Please provide a description of the function:def get_project_totals(entries, date_headers, hour_type=None, overtime=False,
total_column=False, by='user'):
totals = [0 for date in date_headers]
rows = []
for thing, thing_entries in groupby(entries, lambda x: x[by]):
name, thing_id, date_dict = date_totals(thing_entries, by)
dates = []
for index, day in enumerate(date_headers):
if isinstance(day, datetime.datetime):
day = day.date()
if hour_type:
total = date_dict.get(day, {}).get(hour_type, 0)
dates.append(total)
else:
billable = date_dict.get(day, {}).get('billable', 0)
nonbillable = date_dict.get(day, {}).get('non_billable', 0)
total = billable + nonbillable
dates.append({
'day': day,
'billable': billable,
'nonbillable': nonbillable,
'total': total
})
totals[index] += total
if total_column:
dates.append(sum(dates))
if overtime:
dates.append(find_overtime(dates))
dates = [date or '' for date in dates]
rows.append((name, thing_id, dates))
if total_column:
totals.append(sum(totals))
totals = [t or '' for t in totals]
yield (rows, totals)
|
[
"\n Yield hour totals grouped by user and date. Optionally including overtime.\n "
] |
Please provide a description of the function:def get_payroll_totals(month_work_entries, month_leave_entries):
def _get_user_info(entries):
fname = entries[0].get('user__first_name', '') if entries else ''
lname = entries[0].get('user__last_name', '') if entries else ''
name = '{0} {1}'.format(fname, lname).strip()
user_id = entries[0].get('user', None) if entries else None
return {'name': name, 'user_id': user_id}
def _get_index(status, label):
if label in labels[status]:
return labels[status].index(label)
# Otherwise: update labels, rows, and totals to reflect the addition.
labels[status].append(label)
for row in rows:
row[status].insert(-1, {'hours': Decimal(), 'percent': Decimal()})
totals[status].insert(-1, {'hours': Decimal(), 'percent': Decimal()})
return len(labels[status]) - 1
def _construct_row(name, user_id=None):
row = {'name': name, 'user_id': user_id}
for status in labels.keys():
# Include an extra entry for summary.
row[status] = [{'hours': Decimal(), 'percent': Decimal()}
for i in range(len(labels[status]) + 1)]
row['work_total'] = Decimal()
row['grand_total'] = Decimal()
return row
def _add_percentages(row, statuses, total):
if total:
for status in statuses:
for i in range(len(row[status])):
p = row[status][i]['hours'] / total * 100
row[status][i]['percent'] = p
def _get_sum(row, statuses):
return sum([row[status][-1]['hours'] for status in statuses])
work_statuses = ('billable', 'nonbillable')
leave_statuses = ('leave', )
labels = dict([(status, []) for status in work_statuses + leave_statuses])
rows = []
totals = _construct_row('Totals')
for user, work_entries in groupby(month_work_entries, lambda e: e['user']):
work_entries = list(work_entries)
row = _construct_row(**_get_user_info(work_entries))
rows.append(row)
for entry in work_entries:
status = 'billable' if entry['billable'] else 'nonbillable'
label = entry['project__type__label']
index = _get_index(status, label)
hours = entry['hours']
row[status][index]['hours'] += hours
row[status][-1]['hours'] += hours
totals[status][index]['hours'] += hours
totals[status][-1]['hours'] += hours
leave_entries = month_leave_entries.filter(user=user)
status = 'leave'
for entry in leave_entries:
label = entry.get('project__name')
index = _get_index(status, label)
hours = entry.get('hours')
row[status][index]['hours'] += hours
row[status][-1]['hours'] += hours
totals[status][index]['hours'] += hours
totals[status][-1]['hours'] += hours
row['work_total'] = _get_sum(row, work_statuses)
_add_percentages(row, work_statuses, row['work_total'])
row['leave_total'] = _get_sum(row, leave_statuses)
_add_percentages(row, leave_statuses, row['leave_total'])
row['grand_total'] = row['work_total'] + row['leave_total']
totals['work_total'] = _get_sum(totals, work_statuses)
_add_percentages(totals, work_statuses, totals['work_total'])
totals['leave_total'] = _get_sum(totals, leave_statuses)
_add_percentages(totals, leave_statuses, totals['leave_total'])
totals['grand_total'] = totals['work_total'] + totals['leave_total']
if rows:
rows.append(totals)
return labels, rows
|
[
"Summarizes monthly work and leave totals, grouped by user.\n\n Returns (labels, rows).\n labels -> {'billable': [proj_labels], 'nonbillable': [proj_labels]}\n rows -> [{\n name: name of user,\n billable, nonbillable, leave: [\n {'hours': hours for label, 'percent': % of work or leave total}\n ],\n work_total: sum of billable and nonbillable hours,\n leave_total: sum of leave hours\n grand_total: sum of work_total and leave_total\n }]\n\n The last entry in each of the billable/nonbillable/leave lists contains a\n summary of the status. The last row contains sum totals for all other rows.\n ",
"Helper for getting the associated user's first and last name.",
"\n Returns the index in row[status] (where row is the row corresponding\n to the current user) where hours for the project label should be\n recorded.\n\n If the label does not exist, then it is added to the labels list.\n Each row and the totals row is updated accordingly.\n\n Requires that labels, rows, and totals are in scope.\n ",
"Constructs an empty row for the given name.",
"For each entry in each status, percent = hours / total",
"Sum the number of hours worked in given statuses."
] |
Please provide a description of the function:def validate(self, validation_instances, metrics, iteration=None):
'''
Evaluate this model on `validation_instances` during training and
output a report.
:param validation_instances: The data to use to validate the model.
:type validation_instances: list(instance.Instance)
:param metrics: Functions like those found in the `metrics` module
for quantifying the performance of the learner.
:type metrics: list(function)
:param iteration: A label (anything with a sensible `str()` conversion)
identifying the current iteration in output.
'''
if not validation_instances or not metrics:
return {}
split_id = 'val%s' % iteration if iteration is not None else 'val'
train_results = evaluate.evaluate(self, validation_instances,
metrics=metrics, split_id=split_id)
output.output_results(train_results, split_id)
return train_results
|
[] |
Please provide a description of the function:def score(self, eval_instances, verbosity=0):
'''
Return scores (negative log likelihoods) assigned to each testing
instance in `eval_instances`.
:param eval_instances: The data to use to evaluate the model.
Instances should have at least the `input` and `output` fields
populated. `output` is needed to define which score is to
be returned.
:param verbosity: The level of diagnostic output, relative to the
global --verbosity option. Used to adjust output when models
are composed of multiple sub-models.
:type eval_instances: list(instance.Instance)
:returns: list(float)
'''
if hasattr(self, '_using_default_combined') and self._using_default_combined:
raise NotImplementedError
self._using_default_separate = True
return self.predict_and_score(eval_instances, verbosity=verbosity)[1]
|
[] |
Please provide a description of the function:def predict_and_score(self, eval_instances, random=False, verbosity=0):
'''
Return most likely outputs and scores for the particular set of
outputs given in `eval_instances`, as a tuple. Return value should
be equivalent to the default implementation of
return (self.predict(eval_instances), self.score(eval_instances))
but subclasses can override this to combine the two calls and reduce
duplicated work. Either the two separate methods or this one (or all
of them) should be overridden.
:param eval_instances: The data to use to evaluate the model.
Instances should have at least the `input` and `output` fields
populated. `output` is needed to define which score is to
be returned.
:param random: If `True`, sample from the probability distribution
defined by the classifier rather than output the most likely
prediction.
:param verbosity: The level of diagnostic output, relative to the
global --verbosity option. Used to adjust output when models
are composed of multiple sub-models.
:type eval_instances: list(instance.Instance)
:returns: tuple(list(output_type), list(float))
'''
if hasattr(self, '_using_default_separate') and self._using_default_separate:
raise NotImplementedError
self._using_default_combined = True
return (self.predict(eval_instances, random=random, verbosity=verbosity),
self.score(eval_instances, verbosity=verbosity))
|
[] |
Please provide a description of the function:def load(self, infile):
'''
Deserialize a model from a stored file.
By default, unpickle an entire object. If `dump` is overridden to
use a different storage format, `load` should be as well.
:param file outfile: A file-like object from which to retrieve the
serialized model.
'''
model = pickle.load(infile)
self.__dict__.update(model.__dict__)
|
[] |
Please provide a description of the function:def iter_batches(iterable, batch_size):
'''
Given a sequence or iterable, yield batches from that iterable until it
runs out. Note that this function returns a generator, and also each
batch will be a generator.
:param iterable: The sequence or iterable to split into batches
:param int batch_size: The number of elements of `iterable` to iterate over
in each batch
>>> batches = iter_batches('abcdefghijkl', batch_size=5)
>>> list(next(batches))
['a', 'b', 'c', 'd', 'e']
>>> list(next(batches))
['f', 'g', 'h', 'i', 'j']
>>> list(next(batches))
['k', 'l']
>>> list(next(batches))
Traceback (most recent call last):
...
StopIteration
Warning: It is important to iterate completely over each batch before
requesting the next, or batch sizes will be truncated to 1. For example,
making a list of all batches before asking for the contents of each
will not work:
>>> batches = list(iter_batches('abcdefghijkl', batch_size=5))
>>> len(batches)
12
>>> list(batches[0])
['a']
However, making a list of each individual batch as it is received will
produce expected behavior (as shown in the first example).
'''
# http://stackoverflow.com/a/8290514/4481448
sourceiter = iter(iterable)
while True:
batchiter = islice(sourceiter, batch_size)
yield chain([batchiter.next()], batchiter)
|
[] |
Please provide a description of the function:def gen_batches(iterable, batch_size):
'''
Returns a generator object that yields batches from `iterable`.
See `iter_batches` for more details and caveats.
Note that `iter_batches` returns an iterator, which never supports `len()`,
`gen_batches` returns an iterable which supports `len()` if and only if
`iterable` does. This *may* be an iterator, but could be a `SizedGenerator`
object. To obtain an iterator (for example, to use the `next()` function),
call `iter()` on this iterable.
>>> batches = gen_batches('abcdefghijkl', batch_size=5)
>>> len(batches)
3
>>> for batch in batches:
... print(list(batch))
['a', 'b', 'c', 'd', 'e']
['f', 'g', 'h', 'i', 'j']
['k', 'l']
'''
def batches_thunk():
return iter_batches(iterable, batch_size)
try:
length = len(iterable)
except TypeError:
return batches_thunk()
num_batches = (length - 1) // batch_size + 1
return SizedGenerator(batches_thunk, length=num_batches)
|
[] |
Please provide a description of the function:def sized_imap(func, iterable, strict=False):
'''
Return an iterable whose elements are the result of applying the callable `func`
to each element of `iterable`. If `iterable` has a `len()`, then the iterable returned
by this function will have the same `len()`. Otherwise calling `len()` on the
returned iterable will raise `TypeError`.
:param func: The function to apply to each element of `iterable`.
:param iterable: An iterable whose objects will be mapped.
:param bool strict: If `True` and `iterable` does not support `len()`, raise an exception
immediately instead of returning an iterable that does not support `len()`.
'''
try:
length = len(iterable)
except TypeError:
if strict:
raise
else:
return imap(func, iterable)
return SizedGenerator(lambda: imap(func, iterable), length=length)
|
[] |
Please provide a description of the function:def stripped(self, include_annotated=True):
'''
Return a version of this instance with all information removed that could be used
to "cheat" at test time: the true output and its annotated version, and the
reference to the full source.
If `include_annotated` is true, `annotated_input` will also be included (but not
`annotated_output` in either case).
'''
return Instance(self.input,
annotated_input=self.annotated_input if include_annotated else None,
alt_inputs=self.alt_inputs, alt_outputs=self.alt_outputs)
|
[] |
Please provide a description of the function:def inverted(self):
'''
Return a version of this instance with inputs replaced by outputs and vice versa.
'''
return Instance(input=self.output, output=self.input,
annotated_input=self.annotated_output,
annotated_output=self.annotated_input,
alt_inputs=self.alt_outputs,
alt_outputs=self.alt_inputs,
source=self.source)
|
[] |
Please provide a description of the function:def get_data_or_download(dir_name, file_name, url='', size='unknown'):
dname = os.path.join(stanza.DATA_DIR, dir_name)
fname = os.path.join(dname, file_name)
if not os.path.isdir(dname):
assert url, 'Could not locate data {}, and url was not specified. Cannot retrieve data.'.format(dname)
os.makedirs(dname)
if not os.path.isfile(fname):
assert url, 'Could not locate data {}, and url was not specified. Cannot retrieve data.'.format(fname)
logging.warn('downloading from {}. This file could potentially be *very* large! Actual size ({})'.format(url, size))
with open(fname, 'wb') as f:
f.write(get_from_url(url))
return fname
|
[
"Returns the data. if the data hasn't been downloaded, then first download the data.\n\n :param dir_name: directory to look in\n :param file_name: file name to retrieve\n :param url: if the file is not found, then download it from this url\n :param size: the expected size\n :return: path to the requested file\n "
] |
Please provide a description of the function:def add(self, word, count=1):
if word not in self:
super(Vocab, self).__setitem__(word, len(self))
self._counts[word] += count
return self[word]
|
[
"Add a word to the vocabulary and return its index.\n\n :param word: word to add to the dictionary.\n\n :param count: how many times to add the word.\n\n :return: index of the added word.\n\n WARNING: this function assumes that if the Vocab currently has N words, then\n there is a perfect bijection between these N words and the integers 0 through N-1.\n "
] |
Please provide a description of the function:def subset(self, words):
v = self.__class__(unk=self._unk)
unique = lambda seq: len(set(seq)) == len(seq)
assert unique(words)
for w in words:
if w in self:
v.add(w, count=self.count(w))
return v
|
[
"Get a new Vocab containing only the specified subset of words.\n\n If w is in words, but not in the original vocab, it will NOT be in the subset vocab.\n Indices will be in the order of `words`. Counts from the original vocab are preserved.\n\n :return (Vocab): a new Vocab object\n "
] |
Please provide a description of the function:def _index2word(self):
# TODO(kelvinguu): it would be nice to just use `dict.viewkeys`, but unfortunately those are not indexable
compute_index2word = lambda: self.keys() # this works because self is an OrderedDict
# create if it doesn't exist
try:
self._index2word_cache
except AttributeError:
self._index2word_cache = compute_index2word()
# update if it is out of date
if len(self._index2word_cache) != len(self):
self._index2word_cache = compute_index2word()
return self._index2word_cache
|
[
"Mapping from indices to words.\n\n WARNING: this may go out-of-date, because it is a copy, not a view into the Vocab.\n\n :return: a list of strings\n "
] |
Please provide a description of the function:def prune_rares(self, cutoff=2):
keep = lambda w: self.count(w) >= cutoff or w == self._unk
return self.subset([w for w in self if keep(w)])
|
[
"\n returns a **new** `Vocab` object that is similar to this one but with rare words removed.\n Note that the indices in the new `Vocab` will be remapped (because rare words will have been removed).\n\n :param cutoff: words occuring less than this number of times are removed from the vocabulary.\n\n :return: A new, pruned, vocabulary.\n\n NOTE: UNK is never pruned.\n "
] |
Please provide a description of the function:def sort_by_decreasing_count(self):
words = [w for w, ct in self._counts.most_common()]
v = self.subset(words)
return v
|
[
"Return a **new** `Vocab` object that is ordered by decreasing count.\n\n The word at index 1 will be most common, the word at index 2 will be\n next most common, and so on.\n\n :return: A new vocabulary sorted by decreasing count.\n\n NOTE: UNK will remain at index 0, regardless of its frequency.\n "
] |
Please provide a description of the function:def from_dict(cls, word2index, unk, counts=None):
try:
if word2index[unk] != 0:
raise ValueError('unk must be assigned index 0')
except KeyError:
raise ValueError('word2index must have an entry for unk.')
# check that word2index is a bijection
vals = set(word2index.values()) # unique indices
n = len(vals)
bijection = (len(word2index) == n) and (vals == set(range(n)))
if not bijection:
raise ValueError('word2index is not a bijection between N words and the integers 0 through N-1.')
# reverse the dictionary
index2word = {idx: word for word, idx in word2index.iteritems()}
vocab = cls(unk=unk)
for i in xrange(n):
vocab.add(index2word[i])
if counts:
matching_entries = set(word2index.keys()) == set(counts.keys())
if not matching_entries:
raise ValueError('entries of word2index do not match counts (did you include UNK?)')
vocab._counts = counts
return vocab
|
[
"Create Vocab from an existing string to integer dictionary.\n\n All counts are set to 0.\n\n :param word2index: a dictionary representing a bijection from N words to the integers 0 through N-1.\n UNK must be assigned the 0 index.\n\n :param unk: the string representing unk in word2index.\n\n :param counts: (optional) a Counter object mapping words to counts\n\n :return: a created vocab object.\n "
] |
Please provide a description of the function:def to_file(self, f):
for word in self._index2word:
count = self._counts[word]
f.write(u'{}\t{}\n'.format(word, count).encode('utf-8'))
|
[
"Write vocab to a file.\n\n :param (file) f: a file object, e.g. as returned by calling `open`\n\n File format:\n word0<TAB>count0\n word1<TAB>count1\n ...\n\n word with index 0 is on the 0th line and so on...\n "
] |
Please provide a description of the function:def from_file(cls, f):
word2index = {}
counts = Counter()
for i, line in enumerate(f):
word, count_str = line.split('\t')
word = word.decode('utf-8')
word2index[word] = i
counts[word] = float(count_str)
if i == 0:
unk = word
return cls.from_dict(word2index, unk, counts)
|
[
"Load vocab from a file.\n\n :param (file) f: a file object, e.g. as returned by calling `open`\n :return: a vocab object. The 0th line of the file is assigned to index 0, and so on...\n "
] |
Please provide a description of the function:def backfill_unk_emb(self, E, filled_words):
unk_emb = E[self[self._unk]]
for i, word in enumerate(self):
if word not in filled_words:
E[i] = unk_emb
|
[
" Backfills an embedding matrix with the embedding for the unknown token.\n\n :param E: original embedding matrix of dimensions `(vocab_size, emb_dim)`.\n :param filled_words: these words will not be backfilled with unk.\n\n NOTE: this function is for internal use.\n "
] |
Please provide a description of the function:def get_embeddings(self, rand=None, dtype='float32'):
rand = rand if rand else lambda shape: np.random.uniform(-0.1, 0.1, size=shape)
embeddings = get_data_or_download('senna', 'embeddings.txt', self.embeddings_url)
words = get_data_or_download('senna', 'words.lst', self.words_url)
E = rand((len(self), self.n_dim)).astype(dtype)
seen = []
for word_emb in zip(self.gen_word_list(words), self.gen_embeddings(embeddings)):
w, e = word_emb
if w in self:
seen += [w]
E[self[w]] = e
self.backfill_unk_emb(E, set(seen))
return E
|
[
"\n Retrieves the embeddings for the vocabulary.\n\n :param rand: Random initialization function for out-of-vocabulary words. Defaults to `np.random.uniform(-0.1, 0.1, size=shape)`.\n :param dtype: Type of the matrix.\n :return: embeddings corresponding to the vocab instance.\n\n NOTE: this function will download potentially very large binary dumps the first time it is called.\n "
] |
Please provide a description of the function:def get_embeddings(self, rand=None, dtype='float32', corpus='common_crawl_48', n_dim=300):
assert corpus in self.settings, '{} not in supported corpus {}'.format(corpus, self.settings.keys())
self.n_dim, self.corpus, self.setting = n_dim, corpus, self.settings[corpus]
assert n_dim in self.setting.n_dims, '{} not in supported dimensions {}'.format(n_dim, self.setting.n_dims)
rand = rand if rand else lambda shape: np.random.uniform(-0.1, 0.1, size=shape)
zip_file = get_data_or_download('glove', '{}.zip'.format(self.corpus), self.setting.url, size=self.setting.size)
E = rand((len(self), self.n_dim)).astype(dtype)
n_dim = str(self.n_dim)
with zipfile.ZipFile(zip_file) as zf:
# should be only 1 txt file
names = [info.filename for info in zf.infolist() if
info.filename.endswith('.txt') and n_dim in info.filename]
if not names:
s = 'no .txt files found in zip file that matches {}-dim!'.format(n_dim)
s += '\n available files: {}'.format(names)
raise IOError(s)
name = names[0]
seen = []
with zf.open(name) as f:
for line in f:
toks = str(line).rstrip().split(' ')
word = toks[0]
if word in self:
seen += [word]
E[self[word]] = np.array([float(w) for w in toks[1:]], dtype=dtype)
self.backfill_unk_emb(E, set(seen))
return E
|
[
"\n Retrieves the embeddings for the vocabulary.\n\n :param rand: Random initialization function for out-of-vocabulary words. Defaults to `np.random.uniform(-0.1, 0.1, size=shape)`.\n :param dtype: Type of the matrix.\n :param corpus: Corpus to use. Please see `GloveVocab.settings` for available corpus.\n :param n_dim: dimension of vectors to use. Please see `GloveVocab.settings` for available corpus.\n :return: embeddings corresponding to the vocab instance.\n\n NOTE: this function will download potentially very large binary dumps the first time it is called.\n "
] |
Please provide a description of the function:def to_unicode(s):
if not isinstance(s, six.string_types):
raise ValueError("{} must be str or unicode.".format(s))
if not isinstance(s, six.text_type):
s = six.text_type(s, 'utf-8')
return s
|
[
"Return the object as unicode (only matters for Python 2.x).\n\n If s is already Unicode, return s as is.\n Otherwise, assume that s is UTF-8 encoded, and convert to Unicode.\n\n :param (basestring) s: a str, unicode or other basestring object\n :return (unicode): the object as unicode\n "
] |
Please provide a description of the function:def best_gpu(max_usage=USAGE_THRESHOLD, verbose=False):
'''
Return the name of a device to use, either 'cpu' or 'gpu0', 'gpu1',...
The least-used GPU with usage under the constant threshold will be chosen;
ties are broken randomly.
'''
try:
proc = subprocess.Popen("nvidia-smi", stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = proc.communicate()
if error:
raise Exception(error)
except Exception, e:
sys.stderr.write("Couldn't run nvidia-smi to find best GPU, using CPU: %s\n" % str(e))
sys.stderr.write("(This is normal if you have no GPU or haven't configured CUDA.)\n")
return "cpu"
usages = parse_output(output)
pct_usage = [max(u.mem, cpu_backoff(u)) for u in usages]
max_usage = min(max_usage, min(pct_usage))
open_gpus = [index for index, usage in enumerate(usages)
if max(usage.mem, cpu_backoff(usage)) <= max_usage]
if verbose:
print('Best GPUs:')
for index in open_gpus:
print('%d: %s fan, %s mem, %s cpu' %
(index, format_percent(usages[index].fan),
format_percent(usages[index].mem),
format_percent(usages[index].cpu)))
if open_gpus:
result = "gpu" + str(random.choice(open_gpus))
else:
result = "cpu"
if verbose:
print('Chosen: ' + result)
return result
|
[] |
Please provide a description of the function:def parse_bytes(field):
'''
>>> parse_bytes('24B')
24.0
>>> parse_bytes('4MiB')
4194304.0
'''
if field[-1] in 'bB':
field = field[:-1]
try:
for i, prefix in enumerate('KMGTPEZ'):
if field.endswith(prefix + 'i'):
factor = 2 ** (10 * (i + 1))
return float(field[:-2]) * factor
return float(field)
except ValueError:
return None
|
[] |
Please provide a description of the function:def bind_theano(device=None, max_usage=USAGE_THRESHOLD, verbose=True):
'''
Initialize Theano to use a certain device. If `device` is None (the
default), use the device returned by calling `best_gpu`
with the same parameters.
This needs to be called *before* importing Theano. Currently (Dec 2015)
Theano has no way of switching devices after it is bound (which happens
on import).
'''
if device is None:
device = best_gpu(max_usage, verbose=verbose)
if device and device != 'cpu':
import unittest
try:
import theano.sandbox.cuda
theano.sandbox.cuda.use(device)
except (ImportError, unittest.case.SkipTest):
import theano.gpuarray
theano.gpuarray.use(device.replace('gpu', 'cuda'))
|
[] |
Please provide a description of the function:def evaluate(learner, eval_data, metrics, metric_names=None, split_id=None,
write_data=False):
'''
Evaluate `learner` on the instances in `eval_data` according to each
metric in `metric`, and return a dictionary summarizing the values of
the metrics.
Dump the predictions, scores, and metric summaries in JSON format
to "{predictions|scores|results}.`split_id`.json" in the run directory.
:param learner: The model to be evaluated.
:type learner: learner.Learner
:param eval_data: The data to use to evaluate the model.
:type eval_data: list(instance.Instance)
:param metrics: An iterable of functions that defines the standard by
which predictions are evaluated.
:type metrics: Iterable(function(eval_data: list(instance.Instance),
predictions: list(output_type),
scores: list(float)) -> list(float))
:param bool write_data: If `True`, write out the instances in `eval_data`
as JSON, one per line, to the file `data.<split_id>.jsons`.
'''
if metric_names is None:
metric_names = [
(metric.__name__ if hasattr(metric, '__name__')
else ('m%d' % i))
for i, metric in enumerate(metrics)
]
split_prefix = split_id + '.' if split_id else ''
if write_data:
config.dump([inst.__dict__ for inst in eval_data],
'data.%sjsons' % split_prefix,
default=json_default, lines=True)
results = {split_prefix + 'num_params': learner.num_params}
predictions, scores = learner.predict_and_score(eval_data)
config.dump(predictions, 'predictions.%sjsons' % split_prefix, lines=True)
config.dump(scores, 'scores.%sjsons' % split_prefix, lines=True)
for metric, metric_name in zip(metrics, metric_names):
prefix = split_prefix + (metric_name + '.' if metric_name else '')
inst_outputs = metric(eval_data, predictions, scores, learner)
if metric_name in ['data', 'predictions', 'scores']:
warnings.warn('not outputting metric scores for metric "%s" because it would shadow '
'another results file')
else:
config.dump(inst_outputs, '%s.%sjsons' % (metric_name, split_prefix), lines=True)
mean = np.mean(inst_outputs)
gmean = np.exp(np.log(inst_outputs).mean())
sum = np.sum(inst_outputs)
std = np.std(inst_outputs)
results.update({
prefix + 'mean': mean,
prefix + 'gmean': gmean,
prefix + 'sum': sum,
prefix + 'std': std,
# prefix + 'ci_lower': ci_lower,
# prefix + 'ci_upper': ci_upper,
})
config.dump_pretty(results, 'results.%sjson' % split_prefix)
return results
|
[] |
Please provide a description of the function:def json2pb(pb, js, useFieldNumber=False):
''' convert JSON string to google.protobuf.descriptor instance '''
for field in pb.DESCRIPTOR.fields:
if useFieldNumber:
key = field.number
else:
key = field.name
if key not in js:
continue
if field.type == FD.TYPE_MESSAGE:
pass
elif field.type in _js2ftype:
ftype = _js2ftype[field.type]
else:
raise ParseError("Field %s.%s of type '%d' is not supported" % (pb.__class__.__name__, field.name, field.type, ))
value = js[key]
if field.label == FD.LABEL_REPEATED:
pb_value = getattr(pb, field.name, None)
for v in value:
if field.type == FD.TYPE_MESSAGE:
json2pb(pb_value.add(), v, useFieldNumber=useFieldNumber)
else:
pb_value.append(ftype(v))
else:
if field.type == FD.TYPE_MESSAGE:
json2pb(getattr(pb, field.name, None), value, useFieldNumber=useFieldNumber)
else:
setattr(pb, field.name, ftype(value))
return pb
|
[] |
Please provide a description of the function:def pb2json(pb, useFieldNumber=False):
''' convert google.protobuf.descriptor instance to JSON string '''
js = {}
# fields = pb.DESCRIPTOR.fields #all fields
fields = pb.ListFields() #only filled (including extensions)
for field,value in fields:
if useFieldNumber:
key = field.number
else:
key = field.name
if field.type == FD.TYPE_MESSAGE:
ftype = partial(pb2json, useFieldNumber=useFieldNumber)
elif field.type in _ftype2js:
ftype = _ftype2js[field.type]
else:
raise ParseError("Field %s.%s of type '%d' is not supported" % (pb.__class__.__name__, field.name, field.type, ))
if field.label == FD.LABEL_REPEATED:
js_value = []
for v in value:
js_value.append(ftype(v))
else:
js_value = ftype(value)
js[key] = js_value
return js
|
[] |
Please provide a description of the function:def modified_ngram_precision(references, pred, n):
'''
Borrowed from the ntlk BLEU implementation:
http://www.nltk.org/_modules/nltk/translate/bleu_score.html
>>> modified_ngram_precision([['the', 'fat', 'cat', 'the', 'rat']],
... ['the', 'the', 'the', 'the', 'the'], 1)
(2, 5)
>>> modified_ngram_precision([['the', 'fat', 'cat', 'the', 'rat']],
... ['the', 'fat', 'the', 'rat'], 2)
(2, 3)
'''
counts = Counter(iter_ngrams(pred, n))
max_counts = {}
for reference in references:
reference_counts = Counter(iter_ngrams(reference, n))
for ngram in counts:
max_counts[ngram] = max(max_counts.get(ngram, 0),
reference_counts[ngram])
clipped_counts = {ngram: min(count, max_counts[ngram])
for ngram, count in counts.items()}
numerator = sum(clipped_counts.values())
denominator = sum(counts.values())
return numerator, denominator
|
[] |
Please provide a description of the function:def closest_length(refs, pred):
'''
>>> closest_length(['1234', '12345', '1'], '123')
4
>>> closest_length(['123', '12345', '1'], '12')
1
'''
smallest_diff = float('inf')
closest_length = float('inf')
for ref in refs:
diff = abs(len(ref) - len(pred))
if diff < smallest_diff or (diff == smallest_diff and len(ref) < closest_length):
smallest_diff = diff
closest_length = len(ref)
return closest_length
|
[] |
Please provide a description of the function:def _request(self, text, properties, retries=0):
text = to_unicode(text) # ensures unicode
try:
r = requests.post(self.server, params={'properties': str(properties)}, data=text.encode('utf-8'))
r.raise_for_status()
return r
except requests.ConnectionError as e:
if retries > 5:
logging.critical('Max retries exceeded!')
raise e
else:
logging.critical(repr(e))
logging.critical("It seems like we've temporarily ran out of ports. Taking a 30s break...")
time.sleep(30)
logging.critical("Retrying...")
return self._request(text, properties, retries=retries+1)
except requests.HTTPError:
if r.text == "CoreNLP request timed out. Your document may be too long.":
raise TimeoutException(r.text)
else:
raise AnnotationException(r.text)
|
[
"Send a request to the CoreNLP server.\n\n :param (str | unicode) text: raw text for the CoreNLPServer to parse\n :param (dict) properties: properties that the server expects\n :return: request result\n "
] |
Please provide a description of the function:def annotate_json(self, text, annotators=None):
# WARN(chaganty): I'd like to deprecate this function -- we
# should just use annotate().json
#properties = {
# 'annotators': ','.join(annotators or self.default_annotators),
# 'outputFormat': 'json',
#}
#return self._request(text, properties).json(strict=False)
doc = self.annotate(text, annotators)
return doc.json
|
[
"Return a JSON dict from the CoreNLP server, containing annotations of the text.\n\n :param (str) text: Text to annotate.\n :param (list[str]) annotators: a list of annotator names\n\n :return (dict): a dict of annotations\n "
] |
Please provide a description of the function:def annotate_proto(self, text, annotators=None):
properties = {
'annotators': ','.join(annotators or self.default_annotators),
'outputFormat': 'serialized',
'serializer': 'edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer'
}
r = self._request(text, properties)
buffer = r.content # bytes
size, pos = _DecodeVarint(buffer, 0)
buffer = buffer[pos:(pos + size)]
doc = CoreNLP_pb2.Document()
doc.ParseFromString(buffer)
return doc
|
[
"Return a Document protocol buffer from the CoreNLP server, containing annotations of the text.\n\n :param (str) text: text to be annotated\n :param (list[str]) annotators: a list of annotator names\n\n :return (CoreNLP_pb2.Document): a Document protocol buffer\n "
] |
Please provide a description of the function:def annotate(self, text, annotators=None):
doc_pb = self.annotate_proto(text, annotators)
return AnnotatedDocument.from_pb(doc_pb)
|
[
"Return an AnnotatedDocument from the CoreNLP server.\n\n :param (str) text: text to be annotated\n :param (list[str]) annotators: a list of annotator names\n\n See a list of valid annotator names here:\n http://stanfordnlp.github.io/CoreNLP/annotators.html\n\n :return (AnnotatedDocument): an annotated document\n "
] |
Please provide a description of the function:def from_pb(cls, pb):
obj = cls._from_pb(pb)
obj._pb = pb
return obj
|
[
"Instantiate the object from a protocol buffer.\n\n Args:\n pb (protobuf)\n\n Save a reference to the protocol buffer on the object.\n "
] |
Please provide a description of the function:def from_tokens(cls, text, toks):
sentence_pb = CoreNLP_pb2.Sentence()
sentence_pb.characterOffsetBegin = 0
sentence_pb.characterOffsetEnd = len(text)
sentence_pb.sentenceIndex = 0
sentence_pb.tokenOffsetBegin = 0
sentence_pb.tokenOffsetEnd = len(toks)
# Track progress in sentence and tokens.
char_idx = 0
tok_idx = 0
buf = ""
token_pb = None
while char_idx < len(text):
tok = toks[tok_idx]
# Scan to the beginning of the token.
if text[char_idx] != tok[0]:
buf += text[char_idx]
char_idx += 1
# Aha! we have found the token. Assert that they match.
else:
assert text[char_idx:char_idx+len(tok)] == tok, "text did not match a token"
# Create a new Token from this text.
if token_pb: token_pb.after = buf
token_pb = sentence_pb.token.add()
token_pb.before = buf
token_pb.beginChar = char_idx
token_pb.endChar = char_idx + len(tok)
# TODO(chaganty): potentially handle -LRB-?
token_pb.value = tok
token_pb.word = tok
token_pb.originalText = text[char_idx:char_idx+len(tok)]
buf = ""
char_idx += len(tok)
tok_idx += 1
if token_pb: token_pb.after = buf
assert tok_idx == len(toks), "text does not match all tokens"
return AnnotatedSentence.from_pb(sentence_pb)
|
[
"\n A helper method that allows you to construct an AnnotatedSentence with just token information:\n :param (str) text -- full text of the sentence.\n :param (list[str]) toks -- tokens\n "
] |
Please provide a description of the function:def depparse(self, mode="enhancedPlusPlus"):
assert mode in [
"basic",
"alternative",
"collapsedCCProcessed",
"collapsed",
"enhanced",
"enhancedPlusPlus", ], "Invalid mode"
dep_pb = getattr(self.pb, mode + "Dependencies")
if dep_pb is None:
raise AttributeError("No dependencies for mode: " + mode)
else:
tree = AnnotatedDependencyParseTree(dep_pb)
tree.sentence = self
return tree
|
[
"\n Retrieves the appropriate dependency parse.\n Must be one of:\n - basic\n - alternative\n - collapsedCCProcessed\n - collapsed\n - enhanced\n - enhancedPlusPlus\n "
] |
Please provide a description of the function:def to_json(self):
edges = []
for root in self.roots:
edges.append({
'governer': 0,
'dep': "root",
'dependent': root+1,
'governergloss': "root",
'dependentgloss': self.sentence[root].word,
})
for gov, dependents in self.graph.items():
for dependent, dep in dependents:
edges.append({
'governer': gov+1,
'dep': dep,
'dependent': dependent+1,
'governergloss': self.sentence[gov].word,
'dependentgloss': self.sentence[dependent].word,
})
return edges
|
[
"\n Represented as a list of edges:\n dependent: index of child\n dep: dependency label\n governer: index of parent\n dependentgloss: gloss of parent\n governergloss: gloss of parent\n "
] |
Please provide a description of the function:def character_span(self):
begin, end = self.token_span
return (self.sentence[begin].character_span[0], self.sentence[end-1].character_span[-1])
|
[
"\n Returns the character span of the token\n "
] |
Please provide a description of the function:def log_proto(self, proto, step_num):
self.summ_writer.add_summary(proto, step_num)
return proto
|
[
"Log a Summary protobuf to the event file.\n\n :param proto: a Summary protobuf\n :param step_num: the iteration number at which this value was logged\n "
] |
Please provide a description of the function:def log(self, key, val, step_num):
try:
ph, summ = self.summaries[key]
except KeyError:
# if we haven't defined a variable for this key, define one
with self.g.as_default():
ph = tf.placeholder(tf.float32, (), name=key) # scalar
summ = tf.scalar_summary(key, ph)
self.summaries[key] = (ph, summ)
summary_str = self.sess.run(summ, {ph: val})
self.summ_writer.add_summary(summary_str, step_num)
return val
|
[
"Directly log a scalar value to the event file.\n\n :param string key: a name for the value\n :param val: a float\n :param step_num: the iteration number at which this value was logged\n "
] |
Please provide a description of the function:def unescape_sql(inp):
if inp.startswith('"') and inp.endswith('"'):
inp = inp[1:-1]
return inp.replace('""','"').replace('\\\\','\\')
|
[
"\n :param inp: an input string to be unescaped\n :return: return the unescaped version of the string.\n "
] |
Please provide a description of the function:def parse_psql_array(inp):
inp = unescape_sql(inp)
# Strip '{' and '}'
if inp.startswith("{") and inp.endswith("}"):
inp = inp[1:-1]
lst = []
elem = ""
in_quotes, escaped = False, False
for ch in inp:
if escaped:
elem += ch
escaped = False
elif ch == '"':
in_quotes = not in_quotes
escaped = False
elif ch == '\\':
escaped = True
else:
if in_quotes:
elem += ch
elif ch == ',':
lst.append(elem)
elem = ""
else:
elem += ch
escaped = False
if len(elem) > 0:
lst.append(elem)
return lst
|
[
"\n :param inp: a string encoding an array\n :return: the array of elements as represented by the input\n "
] |
Please provide a description of the function:def save(self, fname):
with open(fname, 'wb') as f:
json.dump(self, f)
|
[
" Saves the dictionary in json format\n :param fname: file to save to\n "
] |
Please provide a description of the function:def load(cls, fname):
with open(fname) as f:
return Config(**json.load(f))
|
[
" Loads the dictionary from json file\n :param fname: file to load from\n :return: loaded dictionary\n "
] |
Please provide a description of the function:def read_events(stream):
'''
Read and return as a generator a sequence of Event protos from
file-like object `stream`.
'''
header_size = struct.calcsize('<QI')
len_size = struct.calcsize('<Q')
footer_size = struct.calcsize('<I')
while True:
header = stream.read(header_size)
if len(header) == 0:
break
elif len(header) < header_size:
raise SummaryReaderException('unexpected EOF (expected a %d-byte header, '
'got %d bytes)' % (header_size, len(header)))
data_len, len_crc = struct.unpack('<QI', header)
len_crc_actual = masked_crc(header[:len_size])
if len_crc_actual != len_crc:
raise SummaryReaderException('incorrect length CRC (%d != %d)' %
(len_crc_actual, len_crc))
data = stream.read(data_len)
if len(data) < data_len:
raise SummaryReaderException('unexpected EOF (expected %d bytes, got %d)' %
(data_len, len(data)))
yield Event.FromString(data)
footer = stream.read(footer_size)
if len(footer) < footer_size:
raise SummaryReaderException('unexpected EOF (expected a %d-byte footer, '
'got %d bytes)' % (footer_size, len(footer)))
data_crc, = struct.unpack('<I', footer)
data_crc_actual = masked_crc(data)
if data_crc_actual != data_crc:
raise SummaryReaderException('incorrect data CRC (%d != %d)' %
(data_crc_actual, data_crc))
|
[] |
Please provide a description of the function:def write_events(stream, events):
'''
Write a sequence of Event protos to file-like object `stream`.
'''
for event in events:
data = event.SerializeToString()
len_field = struct.pack('<Q', len(data))
len_crc = struct.pack('<I', masked_crc(len_field))
data_crc = struct.pack('<I', masked_crc(data))
stream.write(len_field)
stream.write(len_crc)
stream.write(data)
stream.write(data_crc)
|
[] |
Please provide a description of the function:def log_image(self, step, tag, val):
'''
Write an image event.
:param int step: Time step (x-axis in TensorBoard graphs)
:param str tag: Label for this value
:param numpy.ndarray val: Image in RGB format with values from
0 to 255; a 3-D array with index order (row, column, channel).
`val.shape[-1] == 3`
'''
# TODO: support floating-point tensors, 4-D tensors, grayscale
if len(val.shape) != 3:
raise ValueError('`log_image` value should be a 3-D tensor, instead got shape %s' %
(val.shape,))
if val.shape[2] != 3:
raise ValueError('Last dimension of `log_image` value should be 3 (RGB), '
'instead got shape %s' %
(val.shape,))
fakefile = StringIO()
png.Writer(size=(val.shape[1], val.shape[0])).write(
fakefile, val.reshape(val.shape[0], val.shape[1] * val.shape[2]))
encoded = fakefile.getvalue()
# https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/summary.proto
RGB = 3
image = Summary.Image(height=val.shape[0], width=val.shape[1],
colorspace=RGB, encoded_image_string=encoded)
summary = Summary(value=[Summary.Value(tag=tag, image=image)])
self._add_event(step, summary)
|
[] |
Please provide a description of the function:def log_scalar(self, step, tag, val):
'''
Write a scalar event.
:param int step: Time step (x-axis in TensorBoard graphs)
:param str tag: Label for this value
:param float val: Scalar to graph at this time step (y-axis)
'''
summary = Summary(value=[Summary.Value(tag=tag, simple_value=float(np.float32(val)))])
self._add_event(step, summary)
|
[] |
Please provide a description of the function:def log_histogram(self, step, tag, val):
'''
Write a histogram event.
:param int step: Time step (x-axis in TensorBoard graphs)
:param str tag: Label for this value
:param numpy.ndarray val: Arbitrary-dimensional array containing
values to be aggregated in the resulting histogram.
'''
hist = Histogram()
hist.add(val)
summary = Summary(value=[Summary.Value(tag=tag, histo=hist.encode_to_proto())])
self._add_event(step, summary)
|
[] |
Please provide a description of the function:def flush(self):
'''
Force all queued events to be written to the events file.
The queue will automatically be flushed at regular time intervals,
when it grows too large, and at program exit (with the usual caveats
of `atexit`: this won't happen if the program is killed with a
signal or `os._exit()`).
'''
if self.queue:
with open(self.filename, 'ab') as outfile:
write_events(outfile, self.queue)
del self.queue[:]
|
[] |
Please provide a description of the function:def options(allow_partial=False, read=False):
'''
Get the object containing the values of the parsed command line options.
:param bool allow_partial: If `True`, ignore unrecognized arguments and allow
the options to be re-parsed next time `options` is called. This
also suppresses overwrite checking (the check is performed the first
time `options` is called with `allow_partial=False`).
:param bool read: If `True`, do not create or overwrite a `config.json`
file, and do not check whether such file already exists. Use for scripts
that read from the run directory rather than/in addition to writing to it.
:return argparse.Namespace: An object storing the values of the options specified
to the parser returned by `get_options_parser()`.
'''
global _options
if allow_partial:
opts, extras = _options_parser.parse_known_args()
if opts.run_dir:
mkdirp(opts.run_dir)
return opts
if _options is None:
# Add back in the help option (only show help and quit once arguments are finalized)
_options_parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
help='show this help message and exit')
_options = _options_parser.parse_args()
if _options.run_dir:
mkdirp(_options.run_dir, overwrite=_options.overwrite or read)
if not read:
options_dump = vars(_options)
# People should be able to rerun an experiment with -C config.json safely.
# Don't include the overwrite option, since using a config from an experiment
# done with -O should still require passing -O for it to be overwritten again.
del options_dump['overwrite']
# And don't write the name of the other config file in this new one! It's
# probably harmless (config file interpretation can't be chained with the
# config option), but still confusing.
del options_dump['config']
dump_pretty(options_dump, 'config.json')
return _options
|
[] |
Please provide a description of the function:def mkdirp(dirname, overwrite=True):
'''
Create a directory at the path given by `dirname`, if it doesn't
already exist. If `overwrite` is False, raise an error when trying
to create a directory that already has a config.json file in it.
Otherwise do nothing if the directory already exists. (Note that an
existing directory without a config.json will not raise an error
regardless.)
http://stackoverflow.com/a/14364249/4481448
'''
try:
os.makedirs(dirname)
except OSError:
if not os.path.isdir(dirname):
raise
config_path = os.path.join(dirname, 'config.json')
if not overwrite and os.path.lexists(config_path):
raise OverwriteError('%s exists and already contains a config.json. To allow '
'overwriting, pass the -O/--overwrite option.' % dirname)
|
[] |
Please provide a description of the function:def inner_products(self, vec):
products = self.array.dot(vec)
return self._word_to_score(np.arange(len(products)), products)
|
[
"Get the inner product of a vector with every embedding.\n\n :param (np.array) vector: the query vector\n\n :return (list[tuple[str, float]]): a map of embeddings to inner products\n "
] |
Please provide a description of the function:def _word_to_score(self, ids, scores):
# should be 1-D vectors
assert len(ids.shape) == 1
assert ids.shape == scores.shape
w2s = {}
for i in range(len(ids)):
w2s[self.vocab.index2word(ids[i])] = scores[i]
return w2s
|
[
"Return a map from each word to its score.\n\n :param (np.array) ids: a vector of word ids\n :param (np.array) scores: a vector of scores\n\n :return (dict[unicode, float]): a map from each word (unicode) to its score (float)\n "
] |
Please provide a description of the function:def k_nearest(self, vec, k):
nbr_score_pairs = self.inner_products(vec)
return sorted(nbr_score_pairs.items(), key=lambda x: x[1], reverse=True)[:k]
|
[
"Get the k nearest neighbors of a vector (in terms of highest inner products).\n\n :param (np.array) vec: query vector\n :param (int) k: number of top neighbors to return\n\n :return (list[tuple[str, float]]): a list of (word, score) pairs, in descending order\n "
] |
Please provide a description of the function:def _init_lsh_forest(self):
import sklearn.neighbors
lshf = sklearn.neighbors.LSHForest()
lshf.fit(self.array)
return lshf
|
[
"Construct an LSH forest for nearest neighbor search."
] |
Please provide a description of the function:def k_nearest_approx(self, vec, k):
if not hasattr(self, 'lshf'):
self.lshf = self._init_lsh_forest()
# TODO(kelvin): make this inner product score, to be consistent with k_nearest
distances, neighbors = self.lshf.kneighbors([vec], n_neighbors=k, return_distance=True)
scores = np.subtract(1, distances)
nbr_score_pairs = self._word_to_score(np.squeeze(neighbors), np.squeeze(scores))
return sorted(nbr_score_pairs.items(), key=lambda x: x[1], reverse=True)
|
[
"Get the k nearest neighbors of a vector (in terms of cosine similarity).\n\n :param (np.array) vec: query vector\n :param (int) k: number of top neighbors to return\n\n :return (list[tuple[str, float]]): a list of (word, cosine similarity) pairs, in descending order\n "
] |
Please provide a description of the function:def to_dict(self):
d = {}
for word, idx in self.vocab.iteritems():
d[word] = self.array[idx].tolist()
return d
|
[
"Convert to dictionary.\n\n :return (dict): A dict mapping from strings to vectors.\n "
] |
Please provide a description of the function:def to_files(self, array_file, vocab_file):
logging.info('Writing array...')
np.save(array_file, self.array)
logging.info('Writing vocab...')
self.vocab.to_file(vocab_file)
|
[
"Write the embedding matrix and the vocab to files.\n\n :param (file) array_file: file to write array to\n :param (file) vocab_file: file to write vocab to\n "
] |
Please provide a description of the function:def from_files(cls, array_file, vocab_file):
logging.info('Loading array...')
array = np.load(array_file)
logging.info('Loading vocab...')
vocab = Vocab.from_file(vocab_file)
return cls(array, vocab)
|
[
"Load the embedding matrix and the vocab from files.\n\n :param (file) array_file: file to read array from\n :param (file) vocab_file: file to read vocab from\n\n :return (Embeddings): an Embeddings object\n "
] |
Please provide a description of the function:def to_file_path(self, path_prefix):
with self._path_prefix_to_files(path_prefix, 'w') as (array_file, vocab_file):
self.to_files(array_file, vocab_file)
|
[
"Write the embedding matrix and the vocab to <path_prefix>.npy and <path_prefix>.vocab.\n\n :param (str) path_prefix: path prefix of the saved files\n "
] |
Please provide a description of the function:def from_file_path(cls, path_prefix):
with cls._path_prefix_to_files(path_prefix, 'r') as (array_file, vocab_file):
return cls.from_files(array_file, vocab_file)
|
[
"Load the embedding matrix and the vocab from <path_prefix>.npy and <path_prefix>.vocab.\n\n :param (str) path_prefix: path prefix of the saved files\n "
] |
Please provide a description of the function:def get_uuids():
result = shell('cl ls -w {} -u'.format(worksheet))
uuids = result.split('\n')
uuids = uuids[1:-1] # trim non uuids
return uuids
|
[
"List all bundle UUIDs in the worksheet."
] |
Please provide a description of the function:def open_file(uuid, path):
# create temporary file just so we can get an unused file path
f = tempfile.NamedTemporaryFile()
f.close() # close and delete right away
fname = f.name
# download file to temporary path
cmd ='cl down -o {} -w {} {}/{}'.format(fname, worksheet, uuid, path)
try:
shell(cmd)
except RuntimeError:
try:
os.remove(fname) # if file exists, remove it
except OSError:
pass
raise IOError('Failed to open file {}/{}'.format(uuid, path))
f = open(fname)
yield f
f.close()
os.remove(fname)
|
[
"Get the raw file content within a particular bundle at a particular path.\n\n Path have no leading slash.\n "
] |
Please provide a description of the function:def launch_job(job_name, cmd=None,
code_dir=None, excludes='*.ipynb .git .ipynb_checkpoints', dependencies=tuple(),
queue='john', image='codalab/python', memory='18g',
debug=False, tail=False):
print 'Remember to set up SSH tunnel and LOG IN through the command line before calling this.'
def execute(cmd):
return shell(cmd, verbose=True, debug=debug)
if code_dir:
execute('cl up -n code -w {} {} -x {}'.format(worksheet, code_dir, excludes))
options = '-v -n {} -w {} --request-queue {} --request-docker-image {} --request-memory {}'.format(
job_name, worksheet, queue, image, memory)
dep_str = ' '.join(['{0}:{0}'.format(dep) for dep in dependencies])
cmd = "cl run {} {} '{}'".format(options, dep_str, cmd)
if tail:
cmd += ' -t'
execute(cmd)
|
[
"Launch a job on CodaLab (optionally upload code that the job depends on).\n\n Args:\n job_name: name of the job\n cmd: command to execute\n code_dir: path to code folder. If None, no code is uploaded.\n excludes: file types to exclude from the upload\n dependencies: list of other bundles that we depend on\n debug: if True, prints SSH commands, but does not execute them\n tail: show the streaming output returned by CodaLab once it launches the job\n "
] |
Please provide a description of the function:def load_img(self, img_path):
with open_file(self.uuid, img_path) as f:
return mpimg.imread(f)
|
[
"\n Return an image object that can be immediately plotted with matplotlib\n "
] |
Please provide a description of the function:def slope(self):
x = range(self.window_size)
y = self.vals
slope, bias = np.polyfit(x, y, 1)
return slope
|
[
"\n :return: the esitmated slope for points in the current window\n "
] |
Please provide a description of the function:def output_results(results, split_id='results', output_stream=None):
'''
Log `results` readably to `output_stream`, with a header
containing `split_id`.
:param results: a dictionary of summary statistics from an evaluation
:type results: dict(str -> object)
:param str split_id: an identifier for the source of `results` (e.g. 'dev')
:param file output_stream: the file-like object to which to log the results
(default: stdout)
:type split_id: str
'''
if output_stream is None:
output_stream = sys.stdout
output_stream.write('----- %s -----\n' % split_id)
for name in sorted(results.keys()):
output_stream.write('%s: %s\n' % (name, repr(results[name])))
output_stream.flush()
|
[] |
Please provide a description of the function:def labels_to_onehots(labels, num_classes):
batch_size = labels.get_shape().as_list()[0]
with tf.name_scope("one_hot"):
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
sparse_ptrs = tf.concat(1, [indices, labels], name="ptrs")
onehots = tf.sparse_to_dense(sparse_ptrs, [batch_size, num_classes],
1.0, 0.0)
return onehots
|
[
"Convert a vector of integer class labels to a matrix of one-hot target vectors.\n\n :param labels: a vector of integer labels, 0 to num_classes. Has shape (batch_size,).\n :param num_classes: the total number of classes\n :return: has shape (batch_size, num_classes)\n "
] |
Please provide a description of the function:def start_task(self, name, size):
'''
Add a task to the stack. If, for example, `name` is `'Iteration'` and
`size` is 10, progress on that task will be shown as
..., Iteration <p> of 10, ...
:param str name: A descriptive name for the type of subtask that is
being completed.
:param int size: The total number of subtasks to complete.
'''
if len(self.task_stack) == 0:
self.start_time = datetime.datetime.now()
self.task_stack.append(Task(name, size, 0))
|
[] |
Please provide a description of the function:def progress(self, p):
'''
Update the current progress on the task at the top of the stack.
:param int p: The current subtask number, between 0 and `size`
(passed to `start_task`), inclusive.
'''
self.task_stack[-1] = self.task_stack[-1]._replace(progress=p)
self.progress_report()
|
[] |
Please provide a description of the function:def end_task(self):
'''
Remove the current task from the stack.
'''
self.progress(self.task_stack[-1].size)
self.task_stack.pop()
|
[] |
Please provide a description of the function:def progress_report(self, force=False):
'''
Print the current progress.
:param bool force: If `True`, print the report regardless of the
elapsed time since the last progress report.
'''
now = datetime.datetime.now()
if (len(self.task_stack) > 1 or self.task_stack[0] > 0) and \
now - self.last_report < self.resolution and not force:
return
stack_printout = ', '.join('%s %s of %s' % (t.name, t.progress, t.size)
for t in self.task_stack)
frac_done = self.fraction_done()
if frac_done == 0.0:
now_str = now.strftime('%c')
eta_str = 'unknown on %s' % now_str
else:
elapsed = (now - self.start_time)
estimated_length = elapsed.total_seconds() / frac_done
eta = self.start_time + datetime.timedelta(seconds=estimated_length)
eta_str = eta.strftime('%c')
print '%s (~%d%% done, ETA %s)' % (stack_printout,
round(frac_done * 100.0),
eta_str)
self.last_report = datetime.datetime.now()
|
[] |
Please provide a description of the function:def fraction_done(self, start=0.0, finish=1.0, stack=None):
'''
:return float: The estimated fraction of the overall task hierarchy
that has been finished. A number in the range [0.0, 1.0].
'''
if stack is None:
stack = self.task_stack
if len(stack) == 0:
return start
elif stack[0].size == 0:
# Avoid divide by zero
return finish
else:
top_fraction = stack[0].progress * 1.0 / stack[0].size
next_top_fraction = (stack[0].progress + 1.0) / stack[0].size
inner_start = start + top_fraction * (finish - start)
inner_finish = start + next_top_fraction * (finish - start)
return self.fraction_done(inner_start, inner_finish, stack[1:])
|
[] |
Please provide a description of the function:def load_conll(cls, fname):
def process_cache(cache, fields):
cache = [l.split() for l in cache if l]
if not cache:
return None
fields['label'].append(cache[0][0])
instance = {k: [] for k in fields if k != 'label'}
for l in cache[1:]:
for i, k in enumerate(fields):
if k != 'label':
instance[k].append(None if l[i] == '-' else l[i])
for k, v in instance.items():
fields[k].append(v)
cache = []
with open(fname) as f:
header = f.next().strip().split('\t')
header[0] = header[0].lstrip('# ')
fields = OrderedDict([(head, []) for head in header])
fields['label'] = []
for line in f:
line = line.strip()
if line:
cache.append(line)
else:
# met empty line, process cache
process_cache(cache, fields)
cache = []
if cache:
process_cache(cache, fields)
return cls(fields)
|
[
"\n The CONLL file must have a tab delimited header, for example::\n\n # description tags\n Alice\n Hello t1\n my t2\n name t3\n is t4\n alice t5\n\n Bob\n I'm t1\n bob t2\n\n Here, the fields are `description` and `tags`. The first instance has the label `Alice` and the\n description `['Hello', 'my', 'name', 'is', 'alice']` and the tags `['t1', 't2', 't3', 't4', 't5']`.\n The second instance has the label `Bob` and the description `[\"I'm\", 'bob']` and the tags `['t1', 't2']`.\n\n :param fname: The CONLL formatted file from which to load the dataset\n\n :return: loaded Dataset instance\n "
] |
Please provide a description of the function:def write_conll(self, fname):
if 'label' not in self.fields:
raise InvalidFieldsException("dataset is not in CONLL format: missing label field")
def instance_to_conll(inst):
tab = [v for k, v in inst.items() if k != 'label']
return '{}\n{}'.format(inst['label'], '\n'.join(['\t'.join(['-' if e is None else str(e) for e in row]) for row in zip(*tab)]))
with open(fname, 'wb') as f:
f.write('# {}'.format('\t'.join([k for k in self.fields if k != 'label'])))
for i, d in enumerate(self):
f.write('\n{}'.format(instance_to_conll(d)))
if i != len(self) - 1:
f.write('\n')
|
[
"\n Serializes the dataset in CONLL format to fname\n "
] |
Please provide a description of the function:def convert(self, converters, in_place=False):
dataset = self if in_place else self.__class__(OrderedDict([(name, data[:]) for name, data in self.fields.items()]))
for name, convert in converters.items():
if name not in self.fields.keys():
raise InvalidFieldsException('Converter specified for non-existent field {}'.format(name))
for i, d in enumerate(dataset.fields[name]):
dataset.fields[name][i] = convert(d)
return dataset
|
[
"\n Applies transformations to the dataset.\n\n :param converters: A dictionary specifying the function to apply to each field. If a field is missing from the dictionary, then it will not be transformed.\n\n :param in_place: Whether to perform the transformation in place or create a new dataset instance\n\n :return: the transformed dataset instance\n "
] |
Please provide a description of the function:def shuffle(self):
order = range(len(self))
random.shuffle(order)
for name, data in self.fields.items():
reindexed = []
for _, i in enumerate(order):
reindexed.append(data[i])
self.fields[name] = reindexed
return self
|
[
"\n Re-indexes the dataset in random order\n\n :return: the shuffled dataset instance\n "
] |
Please provide a description of the function:def copy(self, keep_fields=None):
keep_fields = self.fields.keys() or keep_fields
return self.__class__(OrderedDict([(name, data[:]) for name, data in self.fields.items() if name in keep_fields]))
|
[
"\n :param keep_fields: if specified, then only the given fields will be kept\n :return: A deep copy of the dataset (each instance is copied).\n "
] |
Please provide a description of the function:def pad(cls, sequences, padding, pad_len=None):
max_len = max([len(s) for s in sequences])
pad_len = pad_len or max_len
assert pad_len >= max_len, 'pad_len {} must be greater or equal to the longest sequence {}'.format(pad_len, max_len)
for i, s in enumerate(sequences):
sequences[i] = [padding] * (pad_len - len(s)) + s
return np.array(sequences)
|
[
"\n Pads a list of sequences such that they form a matrix.\n\n :param sequences: a list of sequences of varying lengths.\n :param padding: the value of padded cells.\n :param pad_len: the length of the maximum padded sequence.\n "
] |
Please provide a description of the function:def log_likelihood_bits(eval_data, predictions, scores, learner='ignored'):
'''
Return the log likelihood of each correct output in base 2 (bits),
computed from the scores in `scores` (which should be in base e, nats).
>>> bits = log_likelihood_bits(None, None, [np.log(0.5), np.log(0.125), np.log(0.25)])
>>> [round(b) for b in bits]
[-1.0, -3.0, -2.0]
'''
return (np.array(scores) / np.log(2.0)).tolist()
|
[] |
Please provide a description of the function:def accuracy(eval_data, predictions, scores='ignored', learner='ignored'):
'''
Return the accuracy of each prediction in `predictions`: 1 if it is equal
to the correct output in `eval_data`, 0 otherwise.
>>> data = [Instance('input', 'correct'),
... Instance('input', 'correct'),
... Instance('input', 'correct')]
>>> accuracy(data, ['correct', 'wrong', 'correct'])
[1, 0, 1]
'''
return [int(inst.output == pred)
for inst, pred in zip(eval_data, predictions)]
|
[] |
Please provide a description of the function:def prec1(eval_data, predictions, scores='ignored', learner='ignored'):
'''
Return the precision@1 of each prediction in `predictions`: 1 if it is equal
to any of the correct outputs for the corresponding instance in `eval_data`,
0 otherwise.
>>> data = [Instance('input', ['correct', 'right']),
... Instance('input', ['correct', 'right']),
... Instance('input', ['correct', 'right'])]
>>> prec1(data, ['correct', 'wrong', 'right'])
[1, 0, 1]
'''
return [int(any(o == pred for o in inst.output))
for inst, pred in zip(eval_data, predictions)]
|
[] |
Please provide a description of the function:def bleu(eval_data, predictions, scores='ignored', learner='ignored'):
'''
Return corpus-level BLEU score of `predictions` using the `output`
field of the instances in `eval_data` as references. This is returned
as a length-1 list of floats.
This uses the NLTK unsmoothed implementation, which has been known
to have some bugs. This function patches over the biggest bug, which
is that NLTK ignores n-gram overlap counts of 0 (this should result
in a zero BLEU score).
>>> data = [Instance('input', 'this is the good'),
... Instance('input', 'the bad'),
... Instance('input', 'and the ugly')]
>>> bleu(data, ['this is the good', 'the good', 'seriously really good']) # doctest: +ELLIPSIS
[0.65599...]
>>> np.exp(np.mean([np.log(5. / 9.), np.log(3. / 6.),
... np.log(2. / 3.), np.log(1. / 1.)])) # doctest: +ELLIPSIS
0.65599...
'''
ref_groups = ([inst.output.split()]
if isinstance(inst.output, basestring) else
[_maybe_tokenize(r) for r in inst.output]
for inst in eval_data)
return [corpus_bleu(ref_groups, [p.split() for p in predictions])]
|
[] |
Please provide a description of the function:def _has_4gram_match(ref, pred):
'''
>>> _has_4gram_match(['four', 'lovely', 'tokens', 'here'],
... ['four', 'lovely', 'tokens', 'here'])
True
>>> _has_4gram_match(['four', 'lovely', 'tokens', 'here'],
... ['four', 'lovely', 'tokens', 'here', 'and', 'there'])
True
>>> _has_4gram_match(['four', 'lovely', 'tokens', 'here'],
... ['four', 'ugly', 'tokens', 'here'])
False
>>> _has_4gram_match(['four', 'lovely', 'tokens'],
... ['lovely', 'tokens', 'here'])
False
'''
if len(ref) < 4 or len(pred) < 4:
return False
for i in range(len(ref) - 3):
for j in range(len(pred) - 3):
if ref[i:i + 4] == pred[j:j + 4]:
return True
return False
|
[] |
Please provide a description of the function:def squared_error(eval_data, predictions, scores='ignored', learner='ignored'):
'''
Return the squared error of each prediction in `predictions` with respect
to the correct output in `eval_data`.
>>> data = [Instance('input', (0., 0., 1.)),
... Instance('input', (0., 1., 1.)),
... Instance('input', (1., 0., 0.))]
>>> squared_error(data, [(0., 1., 1.), (0., 1., 1.), (-1., 1., 0.)])
[1.0, 0.0, 5.0]
'''
return [np.sum((np.array(pred) - np.array(inst.output)) ** 2)
for inst, pred in zip(eval_data, predictions)]
|
[] |
Please provide a description of the function:def perplexity(eval_data, predictions, scores, learner='ignored'):
'''
Return the perplexity `exp(-score)` computed from each score in `scores`.
The log scores in `scores` should be base e (`exp`, `log`).
The correct average to use for this metric is the geometric mean. It is
recommended to work in log space to calcuate this mean (or use
`scipy.stats.mstats.gmean`):
mean_perplexity = np.exp(np.log(perplexities).mean())
>>> perplexities = perplexity(None, None, [np.log(0.5), np.log(0.1), np.log(0.25)])
>>> [round(p) for p in perplexities]
[2.0, 10.0, 4.0]
'''
return np.exp(-np.array(scores)).tolist()
|
[] |
Please provide a description of the function:def token_perplexity_macro(eval_data, predictions, scores, learner='ignored'):
'''
Return the per-token perplexity `exp(-score / num_tokens)` computed from each
score in `scores.`
The correct macro-average is given by the geometric mean.
>>> refs = [Instance(None, ''),
... Instance(None, ''),
... Instance(None, '2')]
>>> scores = [np.log(1.0), np.log(0.25), np.log(1 / 64.)]
>>> perplexities = token_perplexity_macro(refs, None, scores)
>>> [round(p) for p in perplexities]
... # sequence perplexities: [1, 4, 16]
... # per-token perplexities: [1, 4, 8]
[1.0, 4.0, 8.0]
'''
lens = np.array([len(_maybe_tokenize(inst.output)) + 1 for inst in eval_data])
return np.exp(-np.array(scores) / lens).tolist()
|
[] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.