text
stringlengths 0
828
|
---|
""""""
|
actions = load_grouped_actions(spec, pop_keys=pop_keys)
|
attrs = {""actions"": actions, ""name"": name}
|
if ""as"" in spec:
|
attrs[""as_""] = spec[""as""]
|
if pop_keys:
|
del spec[""as""]
|
for k in (""requires"", ""methods"", ""defaults"", ""default_option""):
|
if k in spec:
|
attrs[k] = spec[k]
|
if pop_keys:
|
del spec[k]
|
return metaclass(name, (base_class,), attrs)"
|
270,"def plot_stat_summary(df, fig=None):
|
'''
|
Plot stats grouped by test capacitor load _and_ frequency.
|
In other words, we calculate the mean of all samples in the data
|
frame for each test capacitance and frequency pairing, plotting
|
the following stats:
|
- Root mean squared error
|
- Coefficient of variation
|
- Bias
|
## [Coefficient of variation][1] ##
|
> In probability theory and statistics, the coefficient of
|
> variation (CV) is a normalized measure of dispersion of a
|
> probability distribution or frequency distribution. It is defined
|
> as the ratio of the standard deviation to the mean.
|
[1]: http://en.wikipedia.org/wiki/Coefficient_of_variation
|
'''
|
if fig is None:
|
fig = plt.figure(figsize=(8, 8))
|
# Define a subplot layout, 3 rows, 2 columns
|
grid = GridSpec(3, 2)
|
stats = calculate_stats(df, groupby=['test_capacitor',
|
'frequency']).dropna()
|
for i, stat in enumerate(['RMSE %', 'cv %', 'bias %']):
|
axis = fig.add_subplot(grid[i, 0])
|
axis.set_title(stat)
|
# Plot a colormap to show how the statistical value changes
|
# according to frequency/capacitance pairs.
|
plot_colormap(stats, stat, axis=axis, fig=fig)
|
axis = fig.add_subplot(grid[i, 1])
|
axis.set_title(stat)
|
# Plot a histogram to show the distribution of statistical
|
# values across all frequency/capacitance pairs.
|
try:
|
axis.hist(stats[stat].values, bins=50)
|
except AttributeError:
|
print stats[stat].describe()
|
fig.tight_layout()"
|
271,"def calculate_inverse_document_frequencies(self):
|
""""""Q.calculate_inverse_document_frequencies() -- measures how much
|
information the term provides, i.e. whether the term is common or
|
rare across all documents.
|
This is obtained by dividing the total number of documents
|
by the number of documents containing the term,
|
and then taking the logarithm of that quotient.
|
""""""
|
for doc in self.processed_corpus:
|
for word in doc:
|
self.inverse_document_frequencies[word] += 1
|
for key,value in self.inverse_document_frequencies.iteritems():
|
idf = log((1.0 * len(self.corpus)) / value)
|
self.inverse_document_frequencies[key] = idf"
|
272,"def calculate_term_frequencies(self):
|
""""""Q.calculate_term_frequencies() -- calculate the number of times
|
each term t occurs in document d.
|
""""""
|
for doc in self.processed_corpus:
|
term_frequency_doc = defaultdict(int)
|
for word in doc:
|
term_frequency_doc[word] += 1
|
for key,value in term_frequency_doc.iteritems():
|
term_frequency_doc[key] = (1.0 * value) / len(doc)
|
self.term_frequencies.append(term_frequency_doc)"
|
273,"def match_query_to_corpus(self):
|
""""""Q.match_query_to_corpus() -> index -- return the matched corpus
|
index of the user query
|
""""""
|
ranking = []
|
for i,doc in enumerate(self.processed_corpus):
|
rank = 0.0
|
for word in self.processed_query:
|
if word in doc:
|
rank += self.term_frequencies[i][word] * self.inverse_document_frequencies[word]
|
ranking.append((rank,i))
|
matching_corpus_index = 0
|
max_rank = 0
|
for rank,index in ranking:
|
if rank > max_rank:
|
matching_corpus_index = index
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.