signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def connect(self):
self.serial = serial.Serial(port=self.port, baudrate=self.baudrate, timeout=self.timeout)<EOL>self.alive = True <EOL>self.rxThread = threading.Thread(target=self._readLoop)<EOL>self.rxThread.daemon = True<EOL>self.rxThread.start()<EOL>
Connects to the device and starts the read thread
f2362:c0:m1
def close(self):
self.alive = False<EOL>self.rxThread.join()<EOL>self.serial.close()<EOL>
Stops the read thread, waits for it to exit cleanly, then closes the underlying serial port
f2362:c0:m2
def _placeholderCallback(self, *args, **kwargs):
Placeholder callback function (does nothing)
f2362:c0:m4
def _readLoop(self):
try:<EOL><INDENT>readTermSeq = list(self.RX_EOL_SEQ)<EOL>readTermLen = len(readTermSeq)<EOL>rxBuffer = []<EOL>while self.alive:<EOL><INDENT>data = self.serial.read(<NUM_LIT:1>)<EOL>if data != '<STR_LIT>': <EOL><INDENT>rxBuffer.append(data)<EOL>if rxBuffer[-readTermLen:] == readTermSeq: <EOL><INDENT>line = '<STR_LIT>'.join(rxBuffer[:-readTermLen])<EOL>rxBuffer = []<EOL>if len(line) > <NUM_LIT:0>: <EOL><INDENT>self._handleLineRead(line)<EOL><DEDENT><DEDENT>elif self._expectResponseTermSeq:<EOL><INDENT>if rxBuffer[-len(self._expectResponseTermSeq):] == self._expectResponseTermSeq:<EOL><INDENT>line = '<STR_LIT>'.join(rxBuffer) <EOL>rxBuffer = []<EOL>self._handleLineRead(line, checkForResponseTerm=False) <EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>except serial.SerialException as e:<EOL><INDENT>self.alive = False<EOL>try:<EOL><INDENT>self.serial.close()<EOL><DEDENT>except Exception: <EOL><INDENT>pass<EOL><DEDENT>self.fatalErrorCallback(e)<EOL><DEDENT>
Read thread main loop Reads lines from the connected device
f2362:c0:m5
def __init__(self, data=None):
super(TimeoutException, self).__init__(data)<EOL>self.data = data<EOL>
@param data: Any data that was read was read before timeout occurred (if applicable)
f2363:c1:m0
def __init__(self, message, cause=None):
super(InterruptedException, self).__init__(message)<EOL>self.cause = cause<EOL>
@param cause: the exception that caused this interruption (usually a CmeError)
f2363:c3:m0
def _len_lcs(x, y):
table = _lcs(x, y)<EOL>n, m = _get_index_of_lcs(x, y)<EOL>return table[n, m]<EOL>
Returns the length of the Longest Common Subsequence between sequences x and y. Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence :param x: sequence of words :param y: sequence of words :returns integer: Length of LCS between x and y
f2390:m4
def _lcs(x, y):
n, m = _get_index_of_lcs(x, y)<EOL>table = dict()<EOL>for i in range(n + <NUM_LIT:1>):<EOL><INDENT>for j in range(m + <NUM_LIT:1>):<EOL><INDENT>if i == <NUM_LIT:0> or j == <NUM_LIT:0>:<EOL><INDENT>table[i, j] = <NUM_LIT:0><EOL><DEDENT>elif x[i - <NUM_LIT:1>] == y[j - <NUM_LIT:1>]:<EOL><INDENT>table[i, j] = table[i - <NUM_LIT:1>, j - <NUM_LIT:1>] + <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>table[i, j] = max(table[i - <NUM_LIT:1>, j], table[i, j - <NUM_LIT:1>])<EOL><DEDENT><DEDENT><DEDENT>return table<EOL>
Computes the length of the longest common subsequence (lcs) between two strings. The implementation below uses a DP programming algorithm and runs in O(nm) time where n = len(x) and m = len(y). Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence :param x: collection of words :param y: collection of words :returns table: dictionary of coord and len lcs
f2390:m5
def _recon_lcs(x, y):
table = _lcs(x, y)<EOL>def _recon(i, j):<EOL><INDENT>if i == <NUM_LIT:0> or j == <NUM_LIT:0>:<EOL><INDENT>return []<EOL><DEDENT>elif x[i - <NUM_LIT:1>] == y[j - <NUM_LIT:1>]:<EOL><INDENT>return _recon(i - <NUM_LIT:1>, j - <NUM_LIT:1>) + [(x[i - <NUM_LIT:1>], i)]<EOL><DEDENT>elif table[i - <NUM_LIT:1>, j] > table[i, j - <NUM_LIT:1>]:<EOL><INDENT>return _recon(i - <NUM_LIT:1>, j)<EOL><DEDENT>else:<EOL><INDENT>return _recon(i, j - <NUM_LIT:1>)<EOL><DEDENT><DEDENT>i, j = _get_index_of_lcs(x, y)<EOL>recon_tuple = tuple(map(lambda r: r[<NUM_LIT:0>], _recon(i, j)))<EOL>return recon_tuple<EOL>
Returns the Longest Subsequence between x and y. Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence :param x: sequence of words :param y: sequence of words :returns sequence: LCS of x and y
f2390:m6
def rouge_n(evaluated_sentences, reference_sentences, n=<NUM_LIT:2>):
if len(evaluated_sentences) <= <NUM_LIT:0> or len(reference_sentences) <= <NUM_LIT:0>:<EOL><INDENT>raise (ValueError("<STR_LIT>"))<EOL><DEDENT>evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences)<EOL>reference_ngrams = _get_word_ngrams(n, reference_sentences)<EOL>reference_count = len(reference_ngrams)<EOL>overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)<EOL>overlapping_count = len(overlapping_ngrams)<EOL>return overlapping_count / reference_count<EOL>
Computes ROUGE-N of two text collections of sentences. Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/ papers/rouge-working-note-v1.3.1.pdf :param evaluated_sentences: The sentences that have been picked by the summarizer :param reference_sentences: The sentences from the referene set :param n: Size of ngram. Defaults to 2. :returns: float 0 <= ROUGE-N <= 1, where 0 means no overlap and 1 means exactly the same. :raises ValueError: raises exception if a param has len <= 0
f2390:m7
def rouge_1(evaluated_sentences, reference_sentences):
return rouge_n(evaluated_sentences, reference_sentences, <NUM_LIT:1>)<EOL>
Rouge-N where N=1. This is a commonly used metric. :param evaluated_sentences: The sentences that have been picked by the summarizer :param reference_sentences: The sentences from the referene set :returns: float 0 <= ROUGE-N <= 1, where 0 means no overlap and 1 means exactly the same.
f2390:m8
def rouge_2(evaluated_sentences, reference_sentences):
return rouge_n(evaluated_sentences, reference_sentences, <NUM_LIT:2>)<EOL>
Rouge-N where N=2. This is a commonly used metric. :param evaluated_sentences: The sentences that have been picked by the summarizer :param reference_sentences: The sentences from the referene set :returns: float 0 <= ROUGE-N <= 1, where 0 means no overlap and 1 means exactly the same.
f2390:m9
def _f_lcs(llcs, m, n):
r_lcs = llcs / m<EOL>p_lcs = llcs / n<EOL>beta = p_lcs / r_lcs<EOL>num = (<NUM_LIT:1> + (beta ** <NUM_LIT:2>)) * r_lcs * p_lcs<EOL>denom = r_lcs + ((beta ** <NUM_LIT:2>) * p_lcs)<EOL>return num / denom<EOL>
Computes the LCS-based F-measure score Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/ rouge-working-note-v1.3.1.pdf :param llcs: Length of LCS :param m: number of words in reference summary :param n: number of words in candidate summary :returns float: LCS-based F-measure score
f2390:m10
def rouge_l_sentence_level(evaluated_sentences, reference_sentences):
if len(evaluated_sentences) <= <NUM_LIT:0> or len(reference_sentences) <= <NUM_LIT:0>:<EOL><INDENT>raise (ValueError("<STR_LIT>"))<EOL><DEDENT>reference_words = _split_into_words(reference_sentences)<EOL>evaluated_words = _split_into_words(evaluated_sentences)<EOL>m = len(reference_words)<EOL>n = len(evaluated_words)<EOL>lcs = _len_lcs(evaluated_words, reference_words)<EOL>return _f_lcs(lcs, m, n)<EOL>
Computes ROUGE-L (sentence level) of two text collections of sentences. http://research.microsoft.com/en-us/um/people/cyl/download/papers/ rouge-working-note-v1.3.1.pdf Calculated according to: R_lcs = LCS(X,Y)/m P_lcs = LCS(X,Y)/n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) where: X = reference summary Y = Candidate summary m = length of reference summary n = length of candidate summary :param evaluated_sentences: The sentences that have been picked by the summarizer :param reference_sentences: The sentences from the referene set :returns float: F_lcs :raises ValueError: raises exception if a param has len <= 0
f2390:m11
def _union_lcs(evaluated_sentences, reference_sentence):
if len(evaluated_sentences) <= <NUM_LIT:0>:<EOL><INDENT>raise (ValueError("<STR_LIT>"))<EOL><DEDENT>lcs_union = set()<EOL>reference_words = _split_into_words([reference_sentence])<EOL>combined_lcs_length = <NUM_LIT:0><EOL>for eval_s in evaluated_sentences:<EOL><INDENT>evaluated_words = _split_into_words([eval_s])<EOL>lcs = set(_recon_lcs(reference_words, evaluated_words))<EOL>combined_lcs_length += len(lcs)<EOL>lcs_union = lcs_union.union(lcs)<EOL><DEDENT>union_lcs_count = len(lcs_union)<EOL>union_lcs_value = union_lcs_count / combined_lcs_length<EOL>return union_lcs_value<EOL>
Returns LCS_u(r_i, C) which is the LCS score of the union longest common subsequence between reference sentence ri and candidate summary C. For example, if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8 and c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1 is “w1 w2” and the longest common subsequence of r_i and c2 is “w1 w3 w5”. The union longest common subsequence of r_i, c1, and c2 is “w1 w2 w3 w5” and LCS_u(r_i, C) = 4/5. :param evaluated_sentences: The sentences that have been picked by the summarizer :param reference_sentence: One of the sentences in the reference summaries :returns float: LCS_u(r_i, C) :raises ValueError: raises exception if a param has len <= 0
f2390:m12
def rouge_l_summary_level(evaluated_sentences, reference_sentences):
if len(evaluated_sentences) <= <NUM_LIT:0> or len(reference_sentences) <= <NUM_LIT:0>:<EOL><INDENT>raise (ValueError("<STR_LIT>"))<EOL><DEDENT>m = len(_split_into_words(reference_sentences))<EOL>n = len(_split_into_words(evaluated_sentences))<EOL>union_lcs_sum_across_all_references = <NUM_LIT:0><EOL>for ref_s in reference_sentences:<EOL><INDENT>union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences, ref_s)<EOL><DEDENT>return _f_lcs(union_lcs_sum_across_all_references, m, n)<EOL>
Computes ROUGE-L (summary level) of two text collections of sentences. http://research.microsoft.com/en-us/um/people/cyl/download/papers/ rouge-working-note-v1.3.1.pdf Calculated according to: R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) where: SUM(i,u) = SUM from i through u u = number of sentences in reference summary C = Candidate summary made up of v sentences m = number of words in reference summary n = number of words in candidate summary :param evaluated_sentences: The sentences that have been picked by the summarizer :param reference_sentences: The sentences from the referene set :returns float: F_lcs :raises ValueError: raises exception if a param has len <= 0
f2390:m13
def cosine_similarity(evaluated_model, reference_model):
if not (isinstance(evaluated_model, TfModel) and isinstance(reference_model, TfModel)):<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>")<EOL><DEDENT>terms = frozenset(evaluated_model.terms) | frozenset(reference_model.terms)<EOL>numerator = <NUM_LIT:0.0><EOL>for term in terms:<EOL><INDENT>numerator += evaluated_model.term_frequency(term) * reference_model.term_frequency(term)<EOL><DEDENT>denominator = evaluated_model.magnitude * reference_model.magnitude<EOL>if denominator == <NUM_LIT:0.0>:<EOL><INDENT>raise ValueError("<STR_LIT>" % (<EOL>evaluated_model, reference_model))<EOL><DEDENT>return numerator / denominator<EOL>
Computes cosine similarity of two text documents. Each document has to be represented as TF model of non-empty document. :returns float: 0 <= cos <= 1, where 0 means independence and 1 means exactly the same.
f2393:m0
def unit_overlap(evaluated_model, reference_model):
if not (isinstance(evaluated_model, TfModel) and isinstance(reference_model, TfModel)):<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>")<EOL><DEDENT>terms1 = frozenset(evaluated_model.terms)<EOL>terms2 = frozenset(reference_model.terms)<EOL>if not terms1 and not terms2:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>")<EOL><DEDENT>common_terms_count = len(terms1 & terms2)<EOL>return common_terms_count / (len(terms1) + len(terms2) - common_terms_count)<EOL>
Computes unit overlap of two text documents. Documents has to be represented as TF models of non-empty document. :returns float: 0 <= overlap <= 1, where 0 means no match and 1 means exactly the same.
f2393:m1
def f_score(evaluated_sentences, reference_sentences, weight=<NUM_LIT:1.0>):
p = precision(evaluated_sentences, reference_sentences)<EOL>r = recall(evaluated_sentences, reference_sentences)<EOL>weight **= <NUM_LIT:2> <EOL>denominator = weight * p + r<EOL>if denominator == <NUM_LIT:0.0>:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>else:<EOL><INDENT>return ((weight + <NUM_LIT:1>) * p * r) / denominator<EOL><DEDENT>
Computation of F-Score measure. It is computed as F(E) = ( (W^2 + 1) * P(E) * R(E) ) / ( W^2 * P(E) + R(E) ), where: - P(E) is precision metrics of extract E. - R(E) is recall metrics of extract E. - W is a weighting factor that favours P(E) metrics when W > 1 and favours R(E) metrics when W < 1. If W = 1.0 (default value) basic F-Score is computed. It is equivalent to F(E) = (2 * P(E) * R(E)) / (P(E) + R(E)). :parameter iterable evaluated_sentences: Sentences of evaluated extract. :parameter iterable reference_sentences: Sentences of reference extract. :returns float: Returns 0.0 <= P(E) <= 1.0
f2394:m0
def precision(evaluated_sentences, reference_sentences):
return _divide_evaluation(reference_sentences, evaluated_sentences)<EOL>
Intrinsic method of evaluation for extracts. It is computed as P(E) = A / B, where: - A is count of common sentences occurring in both extracts. - B is count of sentences in evaluated extract. :parameter iterable evaluated_sentences: Sentences of evaluated extract. :parameter iterable reference_sentences: Sentences of reference extract. :returns float: Returns 0.0 <= P(E) <= 1.0
f2394:m1
def recall(evaluated_sentences, reference_sentences):
return _divide_evaluation(evaluated_sentences, reference_sentences)<EOL>
Intrinsic method of evaluation for extracts. It is computed as R(E) = A / C, where: - A is count of common sentences in both extracts. - C is count of sentences in reference extract. :parameter iterable evaluated_sentences: Sentences of evaluated extract. :parameter iterable reference_sentences: Sentences of reference extract. :returns float: Returns 0.0 <= R(E) <= 1.0
f2394:m2
def null_stemmer(object):
return to_unicode(object).lower()<EOL>
Converts given object to unicode with lower letters.
f2396:m0
def unicode_compatible(cls):
if PY3:<EOL><INDENT>cls.__str__ = cls.__unicode__<EOL>cls.__bytes__ = lambda self: self.__str__().encode("<STR_LIT:utf-8>")<EOL><DEDENT>else:<EOL><INDENT>cls.__str__ = lambda self: self.__unicode__().encode("<STR_LIT:utf-8>")<EOL><DEDENT>return cls<EOL>
Decorator for unicode compatible classes. Method ``__unicode__`` has to be implemented to work decorator as expected.
f2398:m0
@property<EOL><INDENT>def magnitude(self):<DEDENT>
return math.sqrt(sum(t**<NUM_LIT:2> for t in self._terms.values()))<EOL>
Lenght/norm/magnitude of vector representation of document. This is usually denoted by ||d||.
f2404:c0:m1
def most_frequent_terms(self, count=<NUM_LIT:0>):
<EOL>terms = sorted(self._terms.items(), key=lambda i: -i[<NUM_LIT:1>])<EOL>terms = tuple(i[<NUM_LIT:0>] for i in terms)<EOL>if count == <NUM_LIT:0>:<EOL><INDENT>return terms<EOL><DEDENT>elif count > <NUM_LIT:0>:<EOL><INDENT>return terms[:count]<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>")<EOL><DEDENT>
Returns ``count`` of terms sorted by their frequency in descending order. :parameter int count: Max. number of returned terms. Value 0 means no limit (default).
f2404:c0:m3
def term_frequency(self, term):
return self._terms.get(term, <NUM_LIT:0>)<EOL>
Returns frequency of term in document. :returns int: Returns count of words in document.
f2404:c0:m4
def normalized_term_frequency(self, term, smooth=<NUM_LIT:0.0>):
frequency = self.term_frequency(term) / self._max_frequency<EOL>return smooth + (<NUM_LIT:1.0> - smooth)*frequency<EOL>
Returns normalized frequency of term in document. http://nlp.stanford.edu/IR-book/html/htmledition/maximum-tf-normalization-1.html :parameter float smooth: 0.0 <= smooth <= 1.0, generally set to 0.4, although some early work used the value 0.5. The term is a smoothing term whose role is to damp the contribution of the second term. It may be viewed as a scaling down of TF by the largest TF value in document. :returns float: 0.0 <= frequency <= 1.0, where 0 means no occurence in document and 1 the most frequent term in document.
f2404:c0:m5
def cached_property(getter):
@wraps(getter)<EOL>def decorator(self):<EOL><INDENT>key = "<STR_LIT>" + getter.__name__<EOL>if not hasattr(self, key):<EOL><INDENT>setattr(self, key, getter(self))<EOL><DEDENT>return getattr(self, key)<EOL><DEDENT>return property(decorator)<EOL>
Decorator that converts a method into memoized property. The decorator works as expected only for classes with attribute '__dict__' and immutable properties.
f2411:m2
def _create_matrix(self, document):
sentences_as_words = [self._to_words_set(sent) for sent in document.sentences]<EOL>sentences_count = len(sentences_as_words)<EOL>weights = numpy.zeros((sentences_count, sentences_count))<EOL>for i, words_i in enumerate(sentences_as_words):<EOL><INDENT>for j, words_j in enumerate(sentences_as_words):<EOL><INDENT>weights[i, j] = self._rate_sentences_edge(words_i, words_j)<EOL><DEDENT><DEDENT>weights /= weights.sum(axis=<NUM_LIT:1>)[:, numpy.newaxis]<EOL>return numpy.full((sentences_count, sentences_count), (<NUM_LIT:1.>-self.damping) / sentences_count)+ self.damping * weights<EOL>
Create a stochastic matrix for TextRank. Element at row i and column j of the matrix corresponds to the similarity of sentence i and j, where the similarity is computed as the number of common words between them, divided by their sum of logarithm of their lengths. After such matrix is created, it is turned into a stochastic matrix by normalizing over columns i.e. making the columns sum to one. TextRank uses PageRank algorithm with damping, so a damping factor is incorporated as explained in TextRank's paper. The resulting matrix is a stochastic matrix ready for power method.
f2414:c0:m5
def compute_tf(self, sentences):
content_words = self._get_all_content_words_in_doc(sentences)<EOL>content_words_count = len(content_words)<EOL>content_words_freq = self._compute_word_freq(content_words)<EOL>content_word_tf = dict((w, f / content_words_count) for w, f in content_words_freq.items())<EOL>return content_word_tf<EOL>
Computes the normalized term frequency as explained in http://www.tfidf.com/ :type sentences: [sumy.models.dom.Sentence]
f2415:c0:m8
def _kl_divergence(self, summary_freq, doc_freq):
sum_val = <NUM_LIT:0><EOL>for w in summary_freq:<EOL><INDENT>frequency = doc_freq.get(w)<EOL>if frequency: <EOL><INDENT>sum_val += frequency * math.log(frequency / summary_freq[w])<EOL><DEDENT><DEDENT>return sum_val<EOL>
Note: Could import scipy.stats and use scipy.stats.entropy(doc_freq, summary_freq) but this gives equivalent value without the import
f2415:c0:m10
def _find_index_of_best_sentence(self, kls):
return kls.index(min(kls))<EOL>
the best sentence is the one with the smallest kl_divergence
f2415:c0:m11
def _create_matrix(self, sentences, threshold, tf_metrics, idf_metrics):
<EOL>sentences_count = len(sentences)<EOL>matrix = numpy.zeros((sentences_count, sentences_count))<EOL>degrees = numpy.zeros((sentences_count, ))<EOL>for row, (sentence1, tf1) in enumerate(zip(sentences, tf_metrics)):<EOL><INDENT>for col, (sentence2, tf2) in enumerate(zip(sentences, tf_metrics)):<EOL><INDENT>matrix[row, col] = self.cosine_similarity(sentence1, sentence2, tf1, tf2, idf_metrics)<EOL>if matrix[row, col] > threshold:<EOL><INDENT>matrix[row, col] = <NUM_LIT:1.0><EOL>degrees[row] += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>matrix[row, col] = <NUM_LIT:0><EOL><DEDENT><DEDENT><DEDENT>for row in range(sentences_count):<EOL><INDENT>for col in range(sentences_count):<EOL><INDENT>if degrees[row] == <NUM_LIT:0>:<EOL><INDENT>degrees[row] = <NUM_LIT:1><EOL><DEDENT>matrix[row][col] = matrix[row][col] / degrees[row]<EOL><DEDENT><DEDENT>return matrix<EOL>
Creates matrix of shape |sentences|×|sentences|.
f2417:c0:m8
@staticmethod<EOL><INDENT>def cosine_similarity(sentence1, sentence2, tf1, tf2, idf_metrics):<DEDENT>
unique_words1 = frozenset(sentence1)<EOL>unique_words2 = frozenset(sentence2)<EOL>common_words = unique_words1 & unique_words2<EOL>numerator = <NUM_LIT:0.0><EOL>for term in common_words:<EOL><INDENT>numerator += tf1[term]*tf2[term] * idf_metrics[term]**<NUM_LIT:2><EOL><DEDENT>denominator1 = sum((tf1[t]*idf_metrics[t])**<NUM_LIT:2> for t in unique_words1)<EOL>denominator2 = sum((tf2[t]*idf_metrics[t])**<NUM_LIT:2> for t in unique_words2)<EOL>if denominator1 > <NUM_LIT:0> and denominator2 > <NUM_LIT:0>:<EOL><INDENT>return numerator / (math.sqrt(denominator1) * math.sqrt(denominator2))<EOL><DEDENT>else:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>
We compute idf-modified-cosine(sentence1, sentence2) here. It's cosine similarity of these two sentences (vectors) A, B computed as cos(x, y) = A . B / (|A| . |B|) Sentences are represented as vector TF*IDF metrics. :param sentence1: Iterable object where every item represents word of 1st sentence. :param sentence2: Iterable object where every item represents word of 2nd sentence. :type tf1: dict :param tf1: Term frequencies of words from 1st sentence. :type tf2: dict :param tf2: Term frequencies of words from 2nd sentence :type idf_metrics: dict :param idf_metrics: Inverted document metrics of the sentences. Every sentence is treated as document for this algorithm. :rtype: float :return: Returns -1.0 for opposite similarity, 1.0 for the same sentence and zero for no similarity between sentences.
f2417:c0:m9
def __remove_trailing_zeros(self, collection):
index = len(collection) - <NUM_LIT:1><EOL>while index >= <NUM_LIT:0> and collection[index] == <NUM_LIT:0>:<EOL><INDENT>index -= <NUM_LIT:1><EOL><DEDENT>return collection[:index + <NUM_LIT:1>]<EOL>
Removes trailing zeroes from indexable collection of numbers
f2419:c0:m7
def _create_dictionary(self, document):
words = map(self.normalize_word, document.words)<EOL>unique_words = frozenset(self.stem_word(w) for w in words if w not in self._stop_words)<EOL>return dict((w, i) for i, w in enumerate(unique_words))<EOL>
Creates mapping key = word, value = row index
f2422:c0:m4
def _create_matrix(self, document, dictionary):
sentences = document.sentences<EOL>words_count = len(dictionary)<EOL>sentences_count = len(sentences)<EOL>if words_count < sentences_count:<EOL><INDENT>message = (<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>)<EOL>warn(message % (words_count, sentences_count))<EOL><DEDENT>matrix = numpy.zeros((words_count, sentences_count))<EOL>for col, sentence in enumerate(sentences):<EOL><INDENT>for word in map(self.stem_word, sentence.words):<EOL><INDENT>if word in dictionary:<EOL><INDENT>row = dictionary[word]<EOL>matrix[row, col] += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>return matrix<EOL>
Creates matrix of shape |unique words|×|sentences| where cells contains number of occurences of words (rows) in senteces (cols).
f2422:c0:m5
def _compute_term_frequency(self, matrix, smooth=<NUM_LIT>):
assert <NUM_LIT:0.0> <= smooth < <NUM_LIT:1.0><EOL>max_word_frequencies = numpy.max(matrix, axis=<NUM_LIT:0>)<EOL>rows, cols = matrix.shape<EOL>for row in range(rows):<EOL><INDENT>for col in range(cols):<EOL><INDENT>max_word_frequency = max_word_frequencies[col]<EOL>if max_word_frequency != <NUM_LIT:0>:<EOL><INDENT>frequency = matrix[row, col]/max_word_frequency<EOL>matrix[row, col] = smooth + (<NUM_LIT:1.0> - smooth)*frequency<EOL><DEDENT><DEDENT><DEDENT>return matrix<EOL>
Computes TF metrics for each sentence (column) in the given matrix. You can read more about smoothing parameter at URL below: http://nlp.stanford.edu/IR-book/html/htmledition/maximum-tf-normalization-1.html
f2422:c0:m6
def _compute_tf(self, sentences):
content_words = self._get_all_content_words_in_doc(sentences)<EOL>content_words_count = len(content_words)<EOL>content_words_freq = self._compute_word_freq(content_words)<EOL>content_word_tf = dict((k, v / content_words_count) for (k, v) in content_words_freq.items())<EOL>return content_word_tf<EOL>
Computes the normalized term frequency as explained in http://www.tfidf.com/
f2423:c0:m9
def _count_words(self, words):
bonus_words_count = <NUM_LIT:0><EOL>stigma_words_count = <NUM_LIT:0><EOL>for word in words:<EOL><INDENT>if word in self._bonus_words:<EOL><INDENT>bonus_words_count +=<NUM_LIT:1><EOL><DEDENT>if word in self._stigma_words:<EOL><INDENT>stigma_words_count += <NUM_LIT:1><EOL><DEDENT><DEDENT>return bonus_words_count, stigma_words_count<EOL>
Counts number of bonus/stigma words. :param iterable words: Collection of words. :returns pair: Tuple with number of words (bonus words, stigma words).
f2425:c0:m3
def wrap_star_digger(item, type_str, data_name='<STR_LIT>'):
ret = []<EOL>if type(item) == dict:<EOL><INDENT>if '<STR_LIT>' in item and item['<STR_LIT>'] == type_str and data_name in item: <EOL><INDENT>if len(item[data_name]) > <NUM_LIT:1>:<EOL><INDENT>pass<EOL><DEDENT>return item[data_name]<EOL><DEDENT>else:<EOL><INDENT>for k in item:<EOL><INDENT>sub_ret = wrap_star_digger(item[k], type_str, data_name)<EOL>if sub_ret:<EOL><INDENT>ret.extend(sub_ret)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif type(item) == list:<EOL><INDENT>for i in item:<EOL><INDENT>sub_ret = wrap_star_digger(i, type_str, data_name)<EOL>if sub_ret:<EOL><INDENT>ret.extend(sub_ret)<EOL><DEDENT><DEDENT><DEDENT>return ret<EOL>
code used to extract data from Bing's wrap star :param item: wrap star obj :param type_str: target type string :param data_name: target data label, might be "Entities", "Properties", 'Value' :return: list of all matched target, arranged in occurance
f2429:m3
def get_item_metric_pair(item_lst, metric_lst, id_lst):
query_dic = {} <EOL>for index in range(len(metric_lst)):<EOL><INDENT>current_id = id_lst[index]<EOL>current_bleu = metric_lst[index]<EOL>current_rank_score = item_lst[index]<EOL>if current_id in query_dic:<EOL><INDENT>query_dic[current_id].append((current_rank_score, current_bleu))<EOL><DEDENT>else:<EOL><INDENT>query_dic[current_id] = []<EOL>query_dic[current_id].append((current_rank_score, current_bleu))<EOL><DEDENT><DEDENT>return query_dic<EOL>
align bleu and specific score in item_lst, reconstruct the data as (rank_score, bleu) pairs, query_dic. Detail: query dict is input parameter used by metrics: top-x-bleu, kendall-tau query dict is reconstructed dict type data container, query dict's key is qid and value is list type, whose elements are tuple eg: count of words, bleu score pairs :param item_lst: the score value lst that used to rank candidates :param metric_lst: the metric value aligned with item_lst :return: query_dic
f2430:m2
def top_x_bleu(query_dic, mark, x=<NUM_LIT:1>):
all_total = <NUM_LIT:0.0><EOL>with open(top_bleu_path + mark, '<STR_LIT:w>') as writer:<EOL><INDENT>for k in query_dic:<EOL><INDENT>candidate_lst = query_dic[k]<EOL>top_x = sorted(candidate_lst, key=lambda a: a[<NUM_LIT:0>], reverse=True)[:x]<EOL>total = <NUM_LIT:0><EOL>for t in top_x:<EOL><INDENT>total += t[<NUM_LIT:1>]<EOL><DEDENT>ave_bleu = total / x<EOL>writer.write('<STR_LIT>' % (k, ave_bleu, x, str(top_x)))<EOL>all_total += ave_bleu<EOL>if k in contrast_dic:<EOL><INDENT>contrast_dic[k].append(str(ave_bleu))<EOL><DEDENT>else:<EOL><INDENT>contrast_dic[k] = []<EOL>contrast_dic[k].append(str(ave_bleu))<EOL><DEDENT><DEDENT><DEDENT>result_string = '<STR_LIT>' % (mark, x, all_total / len(query_dic))<EOL>print(result_string)<EOL>return ['<STR_LIT>', result_string]<EOL>
Calculate the top x average bleu value predictions ranking by item, x default is set above :param query_dic: dict, key is qid, value is (item, bleu) tuple list, which will be ranked by 'item' as key :param mark:string, which indicates which method is evaluated, also used as output file name here. :param x:int, define top x :return:average bleu score
f2430:m3
def kendall_tau(query_dic, mark):
total = <NUM_LIT:0.0><EOL>with open(kendall_tau_path + mark, '<STR_LIT:w>') as writer:<EOL><INDENT>for k in query_dic:<EOL><INDENT>candidate_lst = query_dic[k]<EOL>ordered_lst = sorted(candidate_lst, key=lambda a: a[<NUM_LIT:0>], reverse=True)<EOL>rank_lst = [can[<NUM_LIT:1>] for can in ordered_lst]<EOL>tau_value = calculate_lst_kendall(rank_lst)<EOL>writer.write('<STR_LIT>' % (k, tau_value))<EOL>total += tau_value<EOL><DEDENT><DEDENT>result_string = '<STR_LIT>' % (mark, total / len(query_dic))<EOL>print(result_string)<EOL>return ['<STR_LIT>', result_string]<EOL>
Calculate kendall_tau metric result of a method :param query_dic: dict, key is qid, value is (item, bleu) tuple list, which will be ranked by 'item' as key :param mark: string, which indicates which method is evaluated, also used as output file name here. :return: average kendall score
f2430:m5
def top_x_meteor(query_dic, mark, x=<NUM_LIT:1>):
all_total = <NUM_LIT:0.0><EOL>with open(top_meteor_path + mark, '<STR_LIT:w>') as writer:<EOL><INDENT>for k in query_dic:<EOL><INDENT>candidate_lst = query_dic[k]<EOL>top_x = sorted(candidate_lst, key=lambda a: a[<NUM_LIT:0>], reverse=True)[:x]<EOL>total = <NUM_LIT:0><EOL>for t in top_x:<EOL><INDENT>total += t[<NUM_LIT:1>]<EOL><DEDENT>ave_value = total / x<EOL>writer.write('<STR_LIT>' % (k, ave_value, x, str(top_x)))<EOL>all_total += ave_value<EOL>if k in contrast_dic:<EOL><INDENT>contrast_dic[k].append(str(ave_value))<EOL><DEDENT>else:<EOL><INDENT>contrast_dic[k] = []<EOL>contrast_dic[k].append(str(ave_value))<EOL><DEDENT><DEDENT><DEDENT>result_string = '<STR_LIT>' % (mark, x, all_total / len(query_dic))<EOL>print(result_string)<EOL>return ['<STR_LIT>', result_string]<EOL>
Calculate METEOR score of the top result :param query_dic: dict, key is qid, value is (item, meteor) tuple list, which will be ranked by 'item' as key :param mark: string, which indicates which method is evaluated, also used as output file name here. :param x: int, define top x :return: average meteor score
f2430:m6
def BP(candidate, references):
c = len(candidate)<EOL>ref_lens = (len(reference) for reference in references)<EOL>r = min(ref_lens, key=lambda ref_len: (abs(ref_len - c), ref_len))<EOL>if c > r:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>return math.exp(<NUM_LIT:1> - r / c)<EOL><DEDENT>
calculate brevity penalty
f2433:m0
def MP(candidate, references, n):
counts = Counter(ngrams(candidate, n))<EOL>if not counts:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>max_counts = {}<EOL>for reference in references:<EOL><INDENT>reference_counts = Counter(ngrams(reference, n))<EOL>for ngram in counts:<EOL><INDENT>max_counts[ngram] = max(max_counts.get(ngram, <NUM_LIT:0>), reference_counts[ngram])<EOL><DEDENT><DEDENT>clipped_counts = dict((ngram, min(count, max_counts[ngram])) for ngram, count in list(counts.items()))<EOL>return sum(clipped_counts.values()) / sum(counts.values())<EOL>
calculate modified precision
f2433:m1
def bleu(candidate, references, weights):
p_ns = ( MP(candidate, references, i) for i, _ in enumerate(weights, start=<NUM_LIT:1>))<EOL>s = []<EOL>for w, p_n in zip(weights, p_ns):<EOL><INDENT>try:<EOL><INDENT>s.append(w * math.log(p_n))<EOL><DEDENT>except ValueError:<EOL><INDENT>s.append(<NUM_LIT:0>)<EOL><DEDENT><DEDENT>s = math.fsum(s)<EOL>bp = BP(candidate, references)<EOL>return bp * math.exp(s)<EOL>
Calculate BLEU for a single sentence, comment by atma The result of this code is same as the most popular perl script eg: weight = [0.25, 0.25, 0.25, 0.25] can = 'It is a guide to action which ensures that the military always obeys the commands of the party'.lower().split() ref1 = 'It is a guide to action that ensures that the military will forever heed Party commands'.lower().split() ref2 = 'It is the guiding principle which guarantees the military forces always being under the command of the Party'.lower().split() ref = [ref1, ref2] print bleu(can, ref, weight) :param candidate: word list of one sentence, eg: ['I', 'like', 'eat', 'apple'] :param references: list of ref, each is a list of word, eg [['I', 'like', 'eat', 'apple'],['I', 'like', 'apple']] :param weights: a list of weight :return: return the bleu score
f2433:m2
def dict_merge(*dict_list):
result = {}<EOL>for d in dict_list:<EOL><INDENT>result.update(d)<EOL><DEDENT>return result<EOL>
Given zero or more dicts, shallow copy and merge them into a new dict, with precedence to dictionary values later in the dict list. Helpful mainly before Python 3.5. https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
f2437:m0
def new_uid(bits=<NUM_LIT:64>):
return "<STR_LIT>".join(_RANDOM.sample("<STR_LIT>",<EOL>int(bits / <NUM_LIT>) + <NUM_LIT:1>))<EOL>
A random alphanumeric value with at least the specified bits of randomness. We use base 36, i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
f2437:m1
def iso_timestamp():
return datetime.now().isoformat() + '<STR_LIT>'<EOL>
ISO timestamp. With the Z for usual clarity. Example: 2015-09-12T08:41:12.397217Z
f2437:m2
def new_timestamped_uid(bits=<NUM_LIT:32>):
return "<STR_LIT>" % (re.sub('<STR_LIT>', '<STR_LIT>', datetime.now().isoformat()).replace("<STR_LIT:.>", "<STR_LIT>"), new_uid(bits))<EOL>
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of randomness. The advantage of this is it sorts nicely by time, while still being unique. Example: 20150912T084555Z-378465-43vtwbx
f2437:m3
def abbreviate_str(string, max_len=<NUM_LIT>, indicator="<STR_LIT>"):
if not string or not max_len or len(string) <= max_len:<EOL><INDENT>return string<EOL><DEDENT>elif max_len <= len(indicator):<EOL><INDENT>return string[<NUM_LIT:0>:max_len]<EOL><DEDENT>else:<EOL><INDENT>return string[<NUM_LIT:0>:max_len - len(indicator)] + indicator<EOL><DEDENT>
Abbreviate a string, adding an indicator like an ellipsis if required.
f2437:m4
def abbreviate_list(items, max_items=<NUM_LIT:10>, item_max_len=<NUM_LIT>, joiner="<STR_LIT:U+002CU+0020>", indicator="<STR_LIT>"):
if not items:<EOL><INDENT>return items<EOL><DEDENT>else:<EOL><INDENT>shortened = [abbreviate_str("<STR_LIT:%s>" % item, max_len=item_max_len) for item in items[<NUM_LIT:0>:max_items]]<EOL>if len(items) > max_items:<EOL><INDENT>shortened.append(indicator)<EOL><DEDENT>return joiner.join(shortened)<EOL><DEDENT>
Abbreviate a list, truncating each element and adding an indicator at the end if the whole list was truncated. Set item_max_len to None or 0 not to truncate items.
f2437:m5
def expand_variables(template_str, value_map, transformer=None):
if template_str is None:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>if transformer is None:<EOL><INDENT>transformer = lambda v: v<EOL><DEDENT>try:<EOL><INDENT>transformed_value_map = {k: transformer(value_map[k]) for k in value_map}<EOL>return Template(template_str).substitute(transformed_value_map)<EOL><DEDENT>except Exception as e:<EOL><INDENT>raise ValueError("<STR_LIT>" % (template_str, e))<EOL><DEDENT><DEDENT>
Expand a template string like "blah blah $FOO blah" using given value mapping.
f2437:m6
def shell_expand_variables(template_str, value_map):
return expand_variables(template_str, value_map, transformer=pipes.quote)<EOL>
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed to ensure shell safety.
f2437:m7
def shell_expand_to_popen(template, values):
return [expand_variables(item, values) for item in shlex.split(template)]<EOL>
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
f2437:m8
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
if backup_suffix and os.path.exists(path):<EOL><INDENT>backup_path = path + backup_suffix<EOL>if os.path.islink(backup_path):<EOL><INDENT>os.unlink(backup_path)<EOL><DEDENT>elif os.path.isdir(backup_path):<EOL><INDENT>shutil.rmtree(backup_path)<EOL><DEDENT>shutil.move(path, backup_path)<EOL><DEDENT>
Move the given file or directory to the same name, with a backup suffix. If backup_suffix not supplied, move it to the extension ".bak". NB: If backup_suffix is supplied and is None, don't do anything.
f2437:m9
def make_all_dirs(path, mode=<NUM_LIT>):
<EOL>try:<EOL><INDENT>os.makedirs(path, mode=mode)<EOL><DEDENT>except OSError as e:<EOL><INDENT>if e.errno == errno.EEXIST and os.path.isdir(path):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>return path<EOL>
Ensure local dir, with all its parent dirs, are created. Unlike os.makedirs(), will not fail if the path already exists.
f2437:m10
def make_parent_dirs(path, mode=<NUM_LIT>):
parent = os.path.dirname(path)<EOL>if parent:<EOL><INDENT>make_all_dirs(parent, mode)<EOL><DEDENT>return path<EOL>
Ensure parent directories of a file are created as needed.
f2437:m11
@contextmanager<EOL>def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix="<STR_LIT>"):
if dest_path == os.devnull:<EOL><INDENT>yield dest_path<EOL><DEDENT>else:<EOL><INDENT>tmp_path = ("<STR_LIT:%s>" + suffix) % (dest_path, new_uid())<EOL>if make_parents:<EOL><INDENT>make_parent_dirs(tmp_path)<EOL><DEDENT>yield tmp_path<EOL>if not os.path.exists(tmp_path):<EOL><INDENT>raise IOError("<STR_LIT>" % (dest_path, tmp_path))<EOL><DEDENT>if backup_suffix:<EOL><INDENT>move_to_backup(dest_path, backup_suffix=backup_suffix)<EOL><DEDENT>if os.path.isdir(dest_path):<EOL><INDENT>shutil.rmtree(dest_path)<EOL><DEDENT>shutil.move(tmp_path, dest_path)<EOL><DEDENT>
A context manager for convenience in writing a file or directory in an atomic way. Set up a temporary name, then rename it after the operation is done, optionally making a backup of the previous file or directory, if present.
f2437:m12
def temp_output_file(prefix="<STR_LIT>", suffix="<STR_LIT>", dir=None, make_parents=False, always_clean=False):
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,<EOL>always_clean=always_clean)<EOL>
A context manager for convenience in creating a temporary file, which is deleted when exiting the context. Usage: with temp_output_file() as (fd, path): ...
f2437:m13
def temp_output_dir(prefix="<STR_LIT>", suffix="<STR_LIT>", dir=None, make_parents=False, always_clean=False):
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,<EOL>always_clean=always_clean)<EOL>
A context manager for convenience in creating a temporary directory, which is deleted when exiting the context. Usage: with temp_output_dir() as dirname: ...
f2437:m14
def read_string_from_file(path, encoding="<STR_LIT:utf8>"):
with codecs.open(path, "<STR_LIT:rb>", encoding=encoding) as f:<EOL><INDENT>value = f.read()<EOL><DEDENT>return value<EOL>
Read entire contents of file into a string.
f2437:m16
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="<STR_LIT:utf8>"):
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:<EOL><INDENT>with codecs.open(tmp_path, "<STR_LIT:wb>", encoding=encoding) as f:<EOL><INDENT>f.write(string)<EOL><DEDENT><DEDENT>
Write entire file with given string contents, atomically. Keeps backup by default.
f2437:m17
def set_file_mtime(path, mtime, atime=None):
if not atime:<EOL><INDENT>atime = mtime<EOL><DEDENT>f = open(path, '<STR_LIT:a>')<EOL>try:<EOL><INDENT>os.utime(path, (atime, mtime))<EOL><DEDENT>finally:<EOL><INDENT>f.close()<EOL><DEDENT>
Set access and modification times on a file.
f2437:m18
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:<EOL><INDENT>shutil.copyfile(source_path, tmp_path)<EOL>set_file_mtime(tmp_path, os.path.getmtime(source_path))<EOL><DEDENT>
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
f2437:m19
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
if os.path.isdir(source_path):<EOL><INDENT>with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:<EOL><INDENT>shutil.copytree(source_path, tmp_path, symlinks=symlinks)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)<EOL><DEDENT>
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done. Unlike shutil.copytree, this will not fail on a file.
f2437:m20
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
if make_parents:<EOL><INDENT>make_parent_dirs(dest_path)<EOL><DEDENT>move_to_backup(dest_path, backup_suffix=backup_suffix)<EOL>shutil.move(source_path, dest_path)<EOL>
Move file. With a few extra options.
f2437:m21
def rmtree_or_file(path, ignore_errors=False, onerror=None):
<EOL>if ignore_errors and not os.path.exists(path):<EOL><INDENT>return<EOL><DEDENT>if os.path.isdir(path) and not os.path.islink(path):<EOL><INDENT>shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)<EOL><DEDENT>else:<EOL><INDENT>os.unlink(path)<EOL><DEDENT>
rmtree fails on files or symlinks. This removes the target, whatever it is.
f2437:m22
def chmod_native(path, mode_expression, recursive=False):
popenargs = ["<STR_LIT>"]<EOL>if recursive:<EOL><INDENT>popenargs.append("<STR_LIT>")<EOL><DEDENT>popenargs.append(mode_expression)<EOL>popenargs.append(path)<EOL>subprocess.check_call(popenargs)<EOL>
This is ugly and will only work on POSIX, but the built-in Python os.chmod support is very minimal, and neither supports fast recursive chmod nor "+X" type expressions, both of which are slow for large trees. So just shell out.
f2437:m23
def file_sha1(path):
sha1 = hashlib.sha1()<EOL>with open(path, "<STR_LIT:rb>") as f:<EOL><INDENT>while True:<EOL><INDENT>block = f.read(<NUM_LIT:2> ** <NUM_LIT:10>)<EOL>if not block:<EOL><INDENT>break<EOL><DEDENT>sha1.update(block)<EOL><DEDENT>return sha1.hexdigest()<EOL><DEDENT>
Compute SHA1 hash of a file.
f2437:m24
def _mockable_print(arg):
print(arg)<EOL>
A print function that can be mocked in tests. Args: arg: the thing to print
f2440:m0
def run():
prose_wc(setup(sys.argv[<NUM_LIT:1>:]))<EOL>
Entry point for the command, running wordcount with CLI args.
f2440:m1
def setup(argv):
parser = argparse.ArgumentParser(<EOL>description='<STR_LIT>',<EOL>epilog='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', action='<STR_LIT:store_true>',<EOL>dest='<STR_LIT>',<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', action='<STR_LIT:store_true>',<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', nargs='<STR_LIT:?>',<EOL>choices=['<STR_LIT>', '<STR_LIT>', '<STR_LIT:default>'], default='<STR_LIT:default>',<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', type=int, nargs='<STR_LIT:?>', default=<NUM_LIT:4>,<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT:file>', type=argparse.FileType('<STR_LIT:rb>'),<EOL>help='<STR_LIT>')<EOL>return parser.parse_args(argv)<EOL>
Sets up the ArgumentParser. Args: argv: an array of arguments
f2440:m2
def prose_wc(args):
if args.file is None:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>if args.split_hyphens:<EOL><INDENT>INTERSTITIAL_PUNCTUATION.append(re.compile(r'<STR_LIT:->'))<EOL><DEDENT>content = args.file.read().decode('<STR_LIT:utf-8>')<EOL>filename = args.file.name<EOL>body = strip_frontmatter(content)<EOL>parsed = markdown_to_text(body)<EOL>result = wc(filename, body, parsed=parsed,<EOL>is_jekyll=(body != content))<EOL>if (args.update and<EOL>filename != '<STR_LIT>' and<EOL>result['<STR_LIT>']['<STR_LIT:type>'] == '<STR_LIT>'):<EOL><INDENT>update_file(filename, result, content, args.indent)<EOL><DEDENT>else:<EOL><INDENT>_mockable_print({<EOL>'<STR_LIT>': yaml.safe_dump(result, default_flow_style=False,<EOL>indent=args.indent),<EOL>'<STR_LIT>': json.dumps(result, indent=args.indent),<EOL>'<STR_LIT:default>': default_dump(result),<EOL>}[args.format])<EOL><DEDENT>return <NUM_LIT:0><EOL>
Processes data provided to print a count object, or update a file. Args: args: an ArgumentParser object returned by setup()
f2440:m3
def markdown_to_text(body):
<EOL>md = markdown.markdown(body, extensions=[<EOL>'<STR_LIT>'<EOL>])<EOL>soup = BeautifulSoup(md, '<STR_LIT>')<EOL>return soup.get_text()<EOL>
Converts markdown to text. Args: body: markdown (or plaintext, or maybe HTML) input Returns: Plaintext with all tags and frills removed
f2440:m4
def strip_frontmatter(contents):
if contents[:<NUM_LIT:3>] == '<STR_LIT>':<EOL><INDENT>contents = re.split('<STR_LIT>', contents, <NUM_LIT:2>)[<NUM_LIT:2>].strip()<EOL><DEDENT>return contents<EOL>
Strips Jekyll frontmatter Args: contents: the contents of a Jekyll post with frontmatter Returns: The contents of the file without frontmatter
f2440:m5
def wc(filename, contents, parsed=None, is_jekyll=False):
if is_jekyll:<EOL><INDENT>fmt = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>fmt = '<STR_LIT>'<EOL><DEDENT>body = parsed.strip() if parsed else contents.strip()<EOL>words = re.sub(r'<STR_LIT>', '<STR_LIT:U+0020>', body, re.MULTILINE)<EOL>for punctuation in INTERSTITIAL_PUNCTUATION:<EOL><INDENT>words = re.sub(punctuation, '<STR_LIT:U+0020>', words)<EOL><DEDENT>punct = re.compile('<STR_LIT>', re.U)<EOL>words = punct.sub('<STR_LIT>', words)<EOL>real_characters = re.sub(r'<STR_LIT>', '<STR_LIT>', words)<EOL>paragraphs = [<NUM_LIT:1> if len(x) == <NUM_LIT:0> else <NUM_LIT:0> for x in<EOL>contents.strip().splitlines()]<EOL>for index, paragraph in enumerate(paragraphs):<EOL><INDENT>if paragraph == <NUM_LIT:1> and paragraphs[index + <NUM_LIT:1>] == <NUM_LIT:1>:<EOL><INDENT>paragraphs[index] = <NUM_LIT:0><EOL><DEDENT><DEDENT>return {<EOL>'<STR_LIT>': {<EOL>'<STR_LIT:file>': filename,<EOL>'<STR_LIT:type>': fmt,<EOL>'<STR_LIT>': sum(paragraphs) + <NUM_LIT:1>,<EOL>'<STR_LIT>': len(re.split('<STR_LIT>', words)),<EOL>'<STR_LIT>': len(real_characters),<EOL>'<STR_LIT>': len(words),<EOL>}<EOL>}<EOL>
Count the words, characters, and paragraphs in a string. Args: contents: the original string to count filename (optional): the filename as provided to the CLI parsed (optional): a parsed string, expected to be plaintext only is_jekyll: whether the original contents were from a Jekyll file Returns: An object containing the various counts
f2440:m6
def update_file(filename, result, content, indent):
<EOL>parts = re.split('<STR_LIT>', content, <NUM_LIT:2>)<EOL>frontmatter = yaml.safe_load(parts[<NUM_LIT:1>])<EOL>frontmatter['<STR_LIT>'] = result['<STR_LIT>']<EOL>parts[<NUM_LIT:1>] = '<STR_LIT>'.format(<EOL>yaml.safe_dump(frontmatter, default_flow_style=False, indent=indent))<EOL>result = '<STR_LIT>'.join(parts)<EOL>with open(filename, '<STR_LIT:wb>') as f:<EOL><INDENT>f.write(result.encode('<STR_LIT:utf-8>'))<EOL><DEDENT>print('<STR_LIT>'.format(filename))<EOL>
Updates a Jekyll file to contain the counts form an object This just converts the results to YAML and adds to the Jekyll frontmatter. Args: filename: the Jekyll file to update result: the results object from `wc` content: the contents of the original file indent: the indentation level for dumping YAML
f2440:m7
def default_dump(result):
result['<STR_LIT>']['<STR_LIT>'] = (<EOL>'<STR_LIT>' if result['<STR_LIT>']['<STR_LIT>'] == <NUM_LIT:1> else '<STR_LIT>')<EOL>return ('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(<EOL>**result['<STR_LIT>']))<EOL>
Prints a tab-separated, human-readable report of the results. Args: result: the results object from `wc` Returns: A string with the formatted result
f2440:m8
def chi2_adaptive_binning(features_0, features_1, number_of_splits_list, systematics_fraction=<NUM_LIT:0.0>, title="<STR_LIT:title>", name="<STR_LIT:name>", PLOT=True, DEBUG=False, transform='<STR_LIT>'):
max_number_of_splits = np.max(number_of_splits_list)<EOL>no_0 = features_0.shape[<NUM_LIT:0>]<EOL>no_1 = features_1.shape[<NUM_LIT:0>]<EOL>print("<STR_LIT>", features_0.shape)<EOL>no_dim = features_0.shape[<NUM_LIT:1>]<EOL>label_0 = np.zeros((no_0, <NUM_LIT:1>))<EOL>label_1 = np.ones((no_1, <NUM_LIT:1>))<EOL>data_0 = np.c_[features_0, label_0]<EOL>data_1 = np.c_[features_1, label_1]<EOL>features = np.r_[features_0, features_1]<EOL>labels = np.r_[label_0, label_1]<EOL>data = np.r_[data_0, data_1]<EOL>data_same = np.c_[features, labels]<EOL>assert np.sum(data != data_same) == <NUM_LIT:0><EOL>assert (no_dim == data.shape[<NUM_LIT:1>]-<NUM_LIT:1>)<EOL>if no_dim == <NUM_LIT:2>:<EOL><INDENT>plt.scatter(features[:, <NUM_LIT:0>], features[:, <NUM_LIT:1>], <NUM_LIT:0.1>)<EOL>plt.savefig('<STR_LIT>')<EOL>plt.clf()<EOL><DEDENT>if transform == '<STR_LIT>':<EOL><INDENT>features = preprocessing.scale(features)<EOL>data = np.c_[features, labels]<EOL><DEDENT>if transform == '<STR_LIT>':<EOL><INDENT>data_new = norm_highD_searchsorted(data[:, <NUM_LIT:0>])<EOL>for D in range(<NUM_LIT:1>, no_dim):<EOL><INDENT>temp = norm_highD_searchsorted(data[:, D])<EOL>data_new = np.c_[data_new, temp]<EOL><DEDENT>data_new = np.c_[data_new, np.r_[label_0, label_1]]<EOL>print("<STR_LIT>", data)<EOL>data = data_new<EOL>print("<STR_LIT>", data)<EOL><DEDENT>np.random.shuffle(data)<EOL>assert (no_dim == data.shape[<NUM_LIT:1>]-<NUM_LIT:1>)<EOL>labels = data[:, -<NUM_LIT:1>]<EOL>X_values = data[:, :-<NUM_LIT:1>]<EOL>X_max = np.amax(data, axis=<NUM_LIT:0>)[:-<NUM_LIT:1>]<EOL>X_min = np.amin(data, axis=<NUM_LIT:0>)[:-<NUM_LIT:1>]<EOL>X_total_width = (np.subtract(X_max, X_min))<EOL>del data<EOL>if transform == '<STR_LIT>':<EOL><INDENT>X_values = X_values - X_min[None, :]<EOL>X_values = X_values / X_total_width[None, :]<EOL><DEDENT>if True:<EOL><INDENT>X_min = [<NUM_LIT:0.>]*no_dim<EOL>X_total_width = [<NUM_LIT:1.>]*no_dim<EOL><DEDENT>data = np.concatenate((X_values, labels[:, None]), axis=<NUM_LIT:1>)<EOL>if no_dim == <NUM_LIT:2>:<EOL><INDENT>plt.scatter(data[:, <NUM_LIT:0>], data[:, <NUM_LIT:1>], <NUM_LIT:0.1>)<EOL>plt.savefig('<STR_LIT>')<EOL><DEDENT>starting_boundary = []<EOL>for i in range(no_dim):<EOL><INDENT>starting_boundary.append([<NUM_LIT:0.0>, <NUM_LIT:1.0>])<EOL><DEDENT>bin_boundaries_dict = {'<STR_LIT:0>': np.array(starting_boundary)}<EOL>bin_points_dict = {'<STR_LIT:0>': data}<EOL>for split_number in range(<NUM_LIT:1>, <NUM_LIT:1>+max_number_of_splits):<EOL><INDENT>for bin_key, bin_boundary in bin_boundaries_dict.items():<EOL><INDENT>if str(split_number-<NUM_LIT:1>) in bin_key:<EOL><INDENT>variances = np.var(bin_points_dict[bin_key][:, :-<NUM_LIT:1>], axis=<NUM_LIT:0>)<EOL>dim_to_be_sliced = np.argmax(variances)<EOL>median = np.median(<EOL>bin_points_dict[bin_key][:, dim_to_be_sliced])<EOL>a_bin_boundary, b_bin_boundary = bin_boundary.copy(), bin_boundary.copy()<EOL>a_bin_boundary[dim_to_be_sliced, <NUM_LIT:1>] = median<EOL>b_bin_boundary[dim_to_be_sliced, <NUM_LIT:0>] = median<EOL>bin_boundaries_dict[str(split_number) +<EOL>bin_key[<NUM_LIT:1>:]+'<STR_LIT:a>'] = a_bin_boundary<EOL>bin_boundaries_dict[str(split_number) +<EOL>bin_key[<NUM_LIT:1>:]+'<STR_LIT:b>'] = b_bin_boundary<EOL>a_points, b_points = [], []<EOL>for event_number in range(bin_points_dict[bin_key].shape[<NUM_LIT:0>]):<EOL><INDENT>if bin_points_dict[bin_key][event_number, dim_to_be_sliced] < median:<EOL><INDENT>a_points.append(<EOL>bin_points_dict[bin_key][event_number, :].tolist())<EOL><DEDENT>else:<EOL><INDENT>b_points.append(<EOL>bin_points_dict[bin_key][event_number, :].tolist())<EOL><DEDENT><DEDENT>bin_points_dict[str(split_number) +<EOL>bin_key[<NUM_LIT:1>:]+'<STR_LIT:a>'] = np.array(a_points)<EOL>bin_points_dict[str(split_number) +<EOL>bin_key[<NUM_LIT:1>:]+'<STR_LIT:b>'] = np.array(b_points)<EOL>if len(a_points) == <NUM_LIT:0>:<EOL><INDENT>del bin_points_dict[str(split_number)+bin_key[<NUM_LIT:1>:]+'<STR_LIT:a>']<EOL>del bin_boundaries_dict[str(split_number)+bin_key[<NUM_LIT:1>:]+'<STR_LIT:a>']<EOL><DEDENT>if len(b_points) == <NUM_LIT:0>:<EOL><INDENT>del bin_points_dict[str(split_number)+bin_key[<NUM_LIT:1>:]+'<STR_LIT:b>']<EOL>del bin_boundaries_dict[str(split_number)+bin_key[<NUM_LIT:1>:]+'<STR_LIT:b>']<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if PLOT:<EOL><INDENT>pickle.dump(bin_boundaries_dict, open("<STR_LIT>", "<STR_LIT:wb>"))<EOL><DEDENT>bins_sample01_dict = {}<EOL>signed_Scp2_dict = {}<EOL>results_list = []<EOL>for number_of_splits in number_of_splits_list:<EOL><INDENT>print("<STR_LIT>", number_of_splits,<EOL>"<STR_LIT>", systematics_fraction)<EOL>bins_sample0, bins_sample1 = [], []<EOL>for bin_key, bin_points in bin_points_dict.items():<EOL><INDENT>if str(number_of_splits) in bin_key:<EOL><INDENT>labels_in_bin = bin_points[:, -<NUM_LIT:1>]<EOL>bin_sample0 = np.count_nonzero(labels_in_bin == <NUM_LIT:0>)<EOL>bin_sample1 = np.count_nonzero(labels_in_bin == <NUM_LIT:1>)<EOL>if(systematics_fraction*float(bin_sample0) != <NUM_LIT:0.>):<EOL><INDENT>bin_sample0 += int(round(np.random.normal(<NUM_LIT:0.>,<EOL>systematics_fraction*float(bin_sample0))))<EOL><DEDENT>if(systematics_fraction*float(bin_sample1) != <NUM_LIT:0.>):<EOL><INDENT>bin_sample1 += int(round(np.random.normal(<NUM_LIT:0.>,<EOL>systematics_fraction*float(bin_sample1))))<EOL><DEDENT>bins_sample01_dict[bin_key] = [bin_sample0, bin_sample1]<EOL>signed_Scp2_dict[bin_key] = np.square(float(bin_sample1-bin_sample0))/(float(bin_sample1)+float(bin_sample0)+np.square(<EOL>float(bin_sample1)*systematics_fraction)+np.square(float(bin_sample1)*systematics_fraction))*np.sign(bin_sample1-bin_sample0)<EOL>bins_sample0.append(bin_sample0)<EOL>bins_sample1.append(bin_sample1)<EOL><DEDENT><DEDENT>bins_sample0, bins_sample1 = np.array(<EOL>bins_sample0, dtype=float), np.array(bins_sample1, dtype=float)<EOL>print("<STR_LIT>", bins_sample0,<EOL>"<STR_LIT>", bins_sample1)<EOL>Scp2 = ((bins_sample1-bins_sample0)**<NUM_LIT:2>) / (bins_sample1+bins_sample0 +<EOL>(systematics_fraction*bins_sample1)**<NUM_LIT:2>+(systematics_fraction*bins_sample0)**<NUM_LIT:2>)<EOL>if DEBUG:<EOL><INDENT>print(Scp2)<EOL><DEDENT>Chi2 = np.nansum(Scp2)<EOL>if DEBUG:<EOL><INDENT>print("<STR_LIT>")<EOL>print(Chi2)<EOL><DEDENT>dof = bins_sample0.shape[<NUM_LIT:0>]-<NUM_LIT:1><EOL>pvalue = <NUM_LIT:1> - stats.chi2.cdf(Chi2, dof)<EOL>print("<STR_LIT>", Scp2,<EOL>"<STR_LIT>", Chi2, "<STR_LIT>", pvalue, "<STR_LIT>")<EOL>if DEBUG:<EOL><INDENT>print(bins_sample0)<EOL>print(bins_sample1)<EOL>print("<STR_LIT>".format(str(Chi2/dof)))<EOL>print("<STR_LIT>".format(str(pvalue)))<EOL><DEDENT>results_list.append(pvalue)<EOL>if PLOT:<EOL><INDENT>if no_dim == <NUM_LIT:1>:<EOL><INDENT>chi2_plots.adaptive_binning_1Dplot(bin_boundaries_dict, data, number_of_splits, title+"<STR_LIT:U+0020>"+str(no_dim) + "<STR_LIT>"+str(<EOL>number_of_splits) + "<STR_LIT>", name+"<STR_LIT:_>"+str(no_dim) + "<STR_LIT>"+str(number_of_splits)+"<STR_LIT>")<EOL><DEDENT>if no_dim == <NUM_LIT:2>:<EOL><INDENT>chi2_plots.adaptive_binning_2Dplot(bin_boundaries_dict, signed_Scp2_dict, number_of_splits, X_values, title+"<STR_LIT:U+0020>"+str(no_dim) + "<STR_LIT:D>"+str(<EOL>number_of_splits) + "<STR_LIT>", name+"<STR_LIT:_>"+str(no_dim) + "<STR_LIT>"+str(number_of_splits)+"<STR_LIT>", X_min=X_min, X_total_width=X_total_width)<EOL><DEDENT>if no_dim > <NUM_LIT:1>:<EOL><INDENT>chi2_plots.adaptive_binning_2D1Dplot(bin_boundaries_dict, bins_sample01_dict, number_of_splits, X_values, title+"<STR_LIT:U+0020>"+str(<EOL>no_dim) + "<STR_LIT:D>"+str(number_of_splits) + "<STR_LIT>", name+"<STR_LIT:_>"+str(no_dim) + "<STR_LIT>"+str(number_of_splits)+"<STR_LIT>", no_dim)<EOL><DEDENT><DEDENT><DEDENT>return results_list<EOL>
This function takes in two 2D arrays with all features being columns
f2446:m2
def get_members(self, sort='<STR_LIT>'):
return self._session.fetch_items("<STR_LIT>", User.from_json, <NUM_LIT:1000>, group_id=self.id, sort=sort, fields=User.__slots__ + User.USER_FIELDS)<EOL>
:param: sort {id_asc, id_desc, time_asc, time_desc} string Docs: https://vk.com/dev/groups.getMembers
f2451:c0:m2
@staticmethod<EOL><INDENT>def _get_user_groups(session, user_id, filter):<DEDENT>
return session.fetch_items('<STR_LIT>', Group.from_json, count=<NUM_LIT:1000>, user_id=user_id, filter=filter, extended=<NUM_LIT:1>, fields="<STR_LIT:U+002C>".join(Group.GROUP_FIELDS))<EOL>
https://vk.com/dev/groups.get :param filter: {admin, editor, moder, groups, publics, events} :yield: Groups
f2451:c0:m13
def __contains__(self, user_instance):
if not isinstance(user_instance, User):<EOL><INDENT>raise TypeError("<STR_LIT>".format(user_instance))<EOL><DEDENT>return bool(self._session.fetch("<STR_LIT>", group_id=self.id, user_id=user_instance.id))<EOL>
https://vk.com/dev/groups.isMember
f2451:c0:m16
@classmethod<EOL><INDENT>def from_json(cls, session, photo_json):<DEDENT>
photo = cls()<EOL>photo.id = photo_json.get('<STR_LIT:id>')<EOL>photo.album_id = photo_json.get('<STR_LIT>')<EOL>photo.owner_id = photo_json.get('<STR_LIT>')<EOL>photo.user_id = photo_json.get('<STR_LIT>')<EOL>photo.text = photo_json.get('<STR_LIT:text>')<EOL>photo.type = "<STR_LIT>"<EOL>photo.date = photo_json.get('<STR_LIT:date>')<EOL>photo.photo_75 = photo_json.get('<STR_LIT>')<EOL>photo.photo_130 = photo_json.get('<STR_LIT>')<EOL>photo.photo_604 = photo_json.get('<STR_LIT>')<EOL>photo.photo_807 = photo_json.get('<STR_LIT>')<EOL>photo.photo_1280 = photo_json.get('<STR_LIT>')<EOL>photo.photo_2560 = photo_json.get('<STR_LIT>')<EOL>photo._session = session<EOL>return photo<EOL>
https://vk.com/dev/objects/photo
f2452:c0:m0
@staticmethod<EOL><INDENT>def _get_photos(session, user_or_group_id):<DEDENT>
response = session.fetch_items("<STR_LIT>", Photo.from_json, count=<NUM_LIT:200>, owner_id=user_or_group_id)<EOL>return response<EOL>
https://vk.com/dev/photos.getAll
f2452:c0:m2
@staticmethod<EOL><INDENT>def _get_owner_cover_photo_upload_server(session, group_id, crop_x=<NUM_LIT:0>, crop_y=<NUM_LIT:0>, crop_x2=<NUM_LIT>, crop_y2=<NUM_LIT:200>):<DEDENT>
group_id = abs(group_id)<EOL>response = session.fetch("<STR_LIT>", group_id=group_id, crop_x=crop_x, crop_y=crop_y, crop_x2=crop_x2, crop_y2=crop_y2)<EOL>return response['<STR_LIT>']<EOL>
https://vk.com/dev/photos.getOwnerCoverPhotoUploadServer
f2452:c0:m3
@staticmethod<EOL><INDENT>def _save_owner_cover_photo(session, hash, photo):<DEDENT>
response = session.fetch('<STR_LIT>', hash=hash, photo=photo)<EOL>return response<EOL>
https://vk.com/dev/photos.saveOwnerCoverPhoto
f2452:c0:m4
@staticmethod<EOL><INDENT>def _get_wall_upload_server(session, group_id):<DEDENT>
response = session.fetch("<STR_LIT>", group_id=group_id)<EOL>return response['<STR_LIT>']<EOL>
https://vk.com/dev/photos.getWallUploadServer
f2452:c0:m5
@staticmethod<EOL><INDENT>def _get_save_wall_photo(session, photo, server, hash, user_id=None, group_id=None):<DEDENT>
if group_id < <NUM_LIT:0>:<EOL><INDENT>group_id = abs(group_id)<EOL><DEDENT>response = session.fetch("<STR_LIT>", photo=photo, server=server, hash=hash, user_id=user_id, group_id=group_id)[<NUM_LIT:0>]<EOL>return response['<STR_LIT:id>'], response['<STR_LIT>']<EOL>
https://vk.com/dev/photos.saveWallPhoto
f2452:c0:m6
@staticmethod<EOL><INDENT>def _get_messages_upload_server(session, peer_id):<DEDENT>
response = session.fetch("<STR_LIT>", peer_id=peer_id)<EOL>return response['<STR_LIT>']<EOL>
https://vk.com/dev/photos.getMessagesUploadServer
f2452:c0:m8
@staticmethod<EOL><INDENT>def _get_save_messages_photo(session, photo, server, hash):<DEDENT>
response = session.fetch("<STR_LIT>", photo=photo, server=server, hash=hash)[<NUM_LIT:0>]<EOL>return response['<STR_LIT:id>'], response['<STR_LIT>']<EOL>
https://vk.com/dev/photos.saveMessagesPhoto
f2452:c0:m9
@staticmethod<EOL><INDENT>def _get_friends(session, user_id):<DEDENT>
response = session.fetch('<STR_LIT>', user_id=user_id)<EOL>return response["<STR_LIT>"]<EOL>
https://vk.com/dev/friends.get
f2454:c0:m0
@staticmethod<EOL><INDENT>def _get_friends_count(session, user_id):<DEDENT>
response = session.fetch('<STR_LIT>', user_id=user_id, count=<NUM_LIT:1>)<EOL>return response["<STR_LIT:count>"]<EOL>
https://vk.com/dev/friends.get
f2454:c0:m1
@staticmethod<EOL><INDENT>def _send_message(session, user_id, message=None, image_files=None):<DEDENT>
assert any([message, image_files])<EOL>attachment_items = None<EOL>if image_files:<EOL><INDENT>attachment_items = Photo._upload_messages_photos_for_group(session, user_id, image_files)<EOL><DEDENT>message_id = session.fetch("<STR_LIT>", user_id=user_id, message=message, attachment=attachment_items, random_id=random.randint(<NUM_LIT:1>, <NUM_LIT:10>**<NUM_LIT:6>))<EOL>return message_id<EOL>
https://vk.com/dev/messages.send
f2456:c0:m1
@staticmethod<EOL><INDENT>def set_typing(session, user_id):<DEDENT>
session.fetch("<STR_LIT>", user_id=user_id, type="<STR_LIT>")<EOL>
https://vk.com/dev/messages.setActivity
f2456:c0:m2
@staticmethod<EOL><INDENT>def get_dialog(session, unread=False, important=False, unanswered=False):<DEDENT>
response = session.fetch("<STR_LIT>", unread=unread, important=important, unanswered=unanswered)<EOL>dialog_json_items = response["<STR_LIT>"]<EOL>return (Message.from_json(session, dialog_json["<STR_LIT:message>"]) for dialog_json in dialog_json_items)<EOL>
https://vk.com/dev/messages.getDialogs
f2456:c0:m3
def get_url_implicit_flow_user(client_id, scope,<EOL>redirect_uri='<STR_LIT>', display='<STR_LIT>',<EOL>response_type='<STR_LIT>', version=None, state=None, revoke=<NUM_LIT:1>):
url = "<STR_LIT>"<EOL>params = {<EOL>"<STR_LIT>": client_id,<EOL>"<STR_LIT>": scope,<EOL>"<STR_LIT>": redirect_uri,<EOL>"<STR_LIT>": display,<EOL>"<STR_LIT>": response_type,<EOL>"<STR_LIT:version>": version,<EOL>"<STR_LIT:state>": state,<EOL>"<STR_LIT>": revoke<EOL>}<EOL>params = {key: value for key, value in params.items() if value is not None}<EOL>return u"<STR_LIT>".format(url=url, params=urlencode(params))<EOL>
https://vk.com/dev/implicit_flow_user :return: url
f2457:m0
def get_url_authcode_flow_user(client_id, redirect_uri, display="<STR_LIT>", scope=None, state=None):
url = "<STR_LIT>"<EOL>params = {<EOL>"<STR_LIT>": client_id,<EOL>"<STR_LIT>": redirect_uri,<EOL>"<STR_LIT>": display,<EOL>"<STR_LIT>": "<STR_LIT:code>"<EOL>}<EOL>if scope:<EOL><INDENT>params['<STR_LIT>'] = scope<EOL><DEDENT>if state:<EOL><INDENT>params['<STR_LIT:state>'] = state<EOL><DEDENT>return u"<STR_LIT>".format(url=url, params=urlencode(params))<EOL>
Authorization Code Flow for User Access Token Use Authorization Code Flow to run VK API methods from the server side of an application. Access token received this way is not bound to an ip address but set of permissions that can be granted is limited for security reasons. Args: client_id (int): Application id. redirect_uri (str): Address to redirect user after authorization. display (str): Sets authorization page appearance. Sets: {`page`, `popup`, `mobile`} Defaults to `page` scope (:obj:`str`, optional): Permissions bit mask, to check on authorization and request if necessary. More scope: https://vk.com/dev/permissions state (:obj:`str`, optional): An arbitrary string that will be returned together with authorization result. Returns: str: Url Examples: >>> vk.get_url_authcode_flow_user(1, 'http://example.com/', scope="wall,email") 'https://oauth.vk.com/authorize?client_id=1&display=page&redirect_uri=http://example.com/&scope=wall,email&response_type=code .. _Docs: https://vk.com/dev/authcode_flow_user
f2457:m1