language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def _yt_comments_call(self, video_id: str, max_results: int, pageToken: str = None): """ Call YouTube API to get comments from a video. Args: - video_id: YouTube video id - max_results: maximum number of comments to retrieve Returns: - comments: list of comments """ response = self.api.commentThreads().list( part="snippet,replies", maxResults=max_results, pageToken=pageToken, videoId=video_id).execute() self.call_counter += 1 return response
def _yt_comments_call(self, video_id: str, max_results: int, pageToken: str = None): """ Call YouTube API to get comments from a video. Args: - video_id: YouTube video id - max_results: maximum number of comments to retrieve Returns: - comments: list of comments """ response = self.api.commentThreads().list( part="snippet,replies", maxResults=max_results, pageToken=pageToken, videoId=video_id).execute() self.call_counter += 1 return response
Python
def normalize_raw_text(data: List[RawText]) -> List[Dict[Any, Any]]: """ Normalize RawText to a list of dictionaries. Args: - data: A list of RawText objects. Returns: - A list of dictionaries. """ new_data = [] for i in data: i = i.dict() i["created_at"] = i["created_at"].isoformat() i["collected_at"] = i["collected_at"].isoformat() new_data.append(i) return new_data
def normalize_raw_text(data: List[RawText]) -> List[Dict[Any, Any]]: """ Normalize RawText to a list of dictionaries. Args: - data: A list of RawText objects. Returns: - A list of dictionaries. """ new_data = [] for i in data: i = i.dict() i["created_at"] = i["created_at"].isoformat() i["collected_at"] = i["collected_at"].isoformat() new_data.append(i) return new_data
Python
def check_words(text: str, words: list): """Check if the text only contains words from the list. Args: - text: The text to check. - words: The words to check for. Returns: - True if the text contains all the words. """ for word in words: if word not in text: return False return True
def check_words(text: str, words: list): """Check if the text only contains words from the list. Args: - text: The text to check. - words: The words to check for. Returns: - True if the text contains all the words. """ for word in words: if word not in text: return False return True
Python
def prepare_data_to_px(df: pd.DataFrame): """ Prepare the data to be used in a plotly graph. Args: - data: A pandas dataframe with each column being an annotator and each row being a label. Returns: - A pandas dataframe with the following columns: Annotator, Label, Count. """ data = [] for annotator in df.columns: for label, count in df[annotator].value_counts().items(): data.append({"Annotator": annotator, "Label": label, "Count": count}) return pd.DataFrame(data)
def prepare_data_to_px(df: pd.DataFrame): """ Prepare the data to be used in a plotly graph. Args: - data: A pandas dataframe with each column being an annotator and each row being a label. Returns: - A pandas dataframe with the following columns: Annotator, Label, Count. """ data = [] for annotator in df.columns: for label, count in df[annotator].value_counts().items(): data.append({"Annotator": annotator, "Label": label, "Count": count}) return pd.DataFrame(data)
Python
def dict_serialize_date(data: List[Dict[Any, Any]], keys: List[Any]): """Serialize keys that are dates in a list of dictionaries to ISO 8601. Args: - data: A list of RawText objects. - keys: A list of keys to serialize. Returns: - A list of dictionaries. """ new_data = [] for i in data: for key in keys: if key in i: if isinstance(i[key], datetime.datetime): i[key] = i[key].isoformat() new_data.append(i) return new_data
def dict_serialize_date(data: List[Dict[Any, Any]], keys: List[Any]): """Serialize keys that are dates in a list of dictionaries to ISO 8601. Args: - data: A list of RawText objects. - keys: A list of keys to serialize. Returns: - A list of dictionaries. """ new_data = [] for i in data: for key in keys: if key in i: if isinstance(i[key], datetime.datetime): i[key] = i[key].isoformat() new_data.append(i) return new_data
Python
def predict(self, text: str) -> Dict[str, float]: """ Predict toxicity of a comment. Parameters: text (str): The text to predict toxicity for. Returns: A dictionary containing the predicted toxicity scores. (e.g. {'TOXICITY': 0.5}) """ _logger.debug(f"Predicting for text: {text}") analyze_request = { "comment": {"text": text}, "languages": ["pt"], "requestedAttributes": {"TOXICITY": {}} } response = self.client.comments().analyze(body=analyze_request).execute() _logger.debug(f"Got response: {response}") try: toxicity = response["attributeScores"]["TOXICITY"]["summaryScore"]["value"] toxicity = round(toxicity, 4) except Exception as error: _logger.error(f"Error getting toxicity: {error}") toxicity = None time.sleep(self.wait_time) _logger.debug(f"Finished predicting for text: {text}") return {"TOXICITY": toxicity}
def predict(self, text: str) -> Dict[str, float]: """ Predict toxicity of a comment. Parameters: text (str): The text to predict toxicity for. Returns: A dictionary containing the predicted toxicity scores. (e.g. {'TOXICITY': 0.5}) """ _logger.debug(f"Predicting for text: {text}") analyze_request = { "comment": {"text": text}, "languages": ["pt"], "requestedAttributes": {"TOXICITY": {}} } response = self.client.comments().analyze(body=analyze_request).execute() _logger.debug(f"Got response: {response}") try: toxicity = response["attributeScores"]["TOXICITY"]["summaryScore"]["value"] toxicity = round(toxicity, 4) except Exception as error: _logger.error(f"Error getting toxicity: {error}") toxicity = None time.sleep(self.wait_time) _logger.debug(f"Finished predicting for text: {text}") return {"TOXICITY": toxicity}
Python
def percent_agreement(reliability_data: Union[List[List[Any]], pd.DataFrame]) -> float: """Calculate the percentage agreement between raters. Args: - reliability_data: The reliability data. Format is as follows: [[1, 1, 0, ..., 0, 0, 1], # CoderA [1, 1, 1, ..., 0, 1, 0], # CoderB [1, 1, 0, ..., 0, 1, 0]] # CoderC each row is a list of annotations by a given annotator Returns: - The percentage agreement as a float. """ if isinstance(reliability_data, pd.DataFrame): reliability_data = reliability_data.transpose().values.tolist() elif isinstance(reliability_data, list): reliability_data = np.array(reliability_data).T.tolist() else: raise ValueError("reliability_data must be a list or a DataFrame.") agree_on = 0 non_agree_on = 0 for annotations in zip(*reliability_data): if len(set(annotations)) == 1: agree_on += 1 else: non_agree_on += 1 return agree_on / (agree_on + non_agree_on)
def percent_agreement(reliability_data: Union[List[List[Any]], pd.DataFrame]) -> float: """Calculate the percentage agreement between raters. Args: - reliability_data: The reliability data. Format is as follows: [[1, 1, 0, ..., 0, 0, 1], # CoderA [1, 1, 1, ..., 0, 1, 0], # CoderB [1, 1, 0, ..., 0, 1, 0]] # CoderC each row is a list of annotations by a given annotator Returns: - The percentage agreement as a float. """ if isinstance(reliability_data, pd.DataFrame): reliability_data = reliability_data.transpose().values.tolist() elif isinstance(reliability_data, list): reliability_data = np.array(reliability_data).T.tolist() else: raise ValueError("reliability_data must be a list or a DataFrame.") agree_on = 0 non_agree_on = 0 for annotations in zip(*reliability_data): if len(set(annotations)) == 1: agree_on += 1 else: non_agree_on += 1 return agree_on / (agree_on + non_agree_on)
Python
def apply_all(self, text: str) -> str: """Applies all anonymization methods to a given text. Args: - The text to be anonymized. Returns: - The anonymized text. """ _logger.debug(f"Anonymizing text: {text}") text = self.remove_users(text) text = self.remove_urls(text) text = self.remove_hashtags(text) text = self.remove_names(text) _logger.debug(f"Anonymized text: {text}") return text
def apply_all(self, text: str) -> str: """Applies all anonymization methods to a given text. Args: - The text to be anonymized. Returns: - The anonymized text. """ _logger.debug(f"Anonymizing text: {text}") text = self.remove_users(text) text = self.remove_urls(text) text = self.remove_hashtags(text) text = self.remove_names(text) _logger.debug(f"Anonymized text: {text}") return text
Python
def remove_users(self, text: str) -> str: """Regex that removes user mentions on a given text. Args: - The text to be anonymized. Returns: - The anonymized text. """ return re.sub(r'@\w+', self.user_placeholder, text)
def remove_users(self, text: str) -> str: """Regex that removes user mentions on a given text. Args: - The text to be anonymized. Returns: - The anonymized text. """ return re.sub(r'@\w+', self.user_placeholder, text)
Python
def remove_urls(self, text: str) -> str: """Regex that removes urls on a given text. Args: - The text to be anonymized. Returns: - The anonymized text. """ return re.sub(r'http\S+', self.url_placeholder, text)
def remove_urls(self, text: str) -> str: """Regex that removes urls on a given text. Args: - The text to be anonymized. Returns: - The anonymized text. """ return re.sub(r'http\S+', self.url_placeholder, text)
Python
def remove_hashtags(self, text: str) -> str: """Regex that removes hashtags on a given text. Args: - The text to be anonymized. Returns: - The anonymized text. """ return re.sub(r'#\S+', self.hashtag_placeholder, text)
def remove_hashtags(self, text: str) -> str: """Regex that removes hashtags on a given text. Args: - The text to be anonymized. Returns: - The anonymized text. """ return re.sub(r'#\S+', self.hashtag_placeholder, text)
Python
def majority_vote(annotations: List[Any]): """Returns the majority vote of the annotations. Args: - annotations: A list of annotations. Returns: - The majority vote of the annotations. """ # Raise an warning if the annotation list is even if len(annotations) % 2 == 0: warnings.warn("The annotation list is even. The returned vote will be random.") return max(Counter(annotations).items(), key=lambda x: x[1])[0]
def majority_vote(annotations: List[Any]): """Returns the majority vote of the annotations. Args: - annotations: A list of annotations. Returns: - The majority vote of the annotations. """ # Raise an warning if the annotation list is even if len(annotations) % 2 == 0: warnings.warn("The annotation list is even. The returned vote will be random.") return max(Counter(annotations).items(), key=lambda x: x[1])[0]
Python
def at_least_one(annotations: List[bool]): """Returns True if at least one of the annotations is True. Args: - annotations: A list of annotations. Returns: - True if at least one of the annotations is True. """ return any(annotations)
def at_least_one(annotations: List[bool]): """Returns True if at least one of the annotations is True. Args: - annotations: A list of annotations. Returns: - True if at least one of the annotations is True. """ return any(annotations)
Python
def all_labeled_spans(annotations: List[Any]): """Returns all spans that are labeled by at least one annotator. Args: - annotations: A list of annotations. Returns: - A list of spans (e.g. [0, 1, 2, ...]). """ spans = [] for annotation in annotations: spans.extend(annotation) spans = list(set(spans)) spans.sort() return spans
def all_labeled_spans(annotations: List[Any]): """Returns all spans that are labeled by at least one annotator. Args: - annotations: A list of annotations. Returns: - A list of spans (e.g. [0, 1, 2, ...]). """ spans = [] for annotation in annotations: spans.extend(annotation) spans = list(set(spans)) spans.sort() return spans
Python
def fit(self, dataNoise, dataConvolution, prior_method = "vague", priors=None, bias = None, **kwargs): """ Fit the model to the posterior distribution Parameters ------------ dataNoise: list 1D array witht he data of the noise dataConvolution: list 1D array witht he data of the convolution **kwargs: Arguments to be passed to the *DynamicNestedSampler* and *run_nested* functions from the dynesty package Returns ------------ Nothing """ self.data = dataNoise self.datac = dataConvolution if bias == None: m = np.min([dataNoise,dataConvolution]) if m < 0: self.bias = m - 0.01 else: self.bias = 0 else: self.bias = bias #separate kargs for the two different samplers functions #nested sampler function nestedsampler_args = [k for k, v in inspect.signature(dn.NestedSampler).parameters.items()] nestedsampler_dict = {k: kwargs.pop(k) for k in dict(kwargs) if k in nestedsampler_args} if not ("sample" in nestedsampler_dict.keys()): nestedsampler_dict["sample"] = "rslice" #run nested function run_nested_args = [k for k, v in inspect.signature(dn.NestedSampler).parameters.items()] run_nested_dict = {k: kwargs.pop(k) for k in dict(kwargs) if k in run_nested_args} #make fit gdposteriormodelgamma.__init__(self,dataNoise,dataConvolution,self.K,self.Kc) if prior_method == "vague": if priors == None and self.priors == []: m = np.mean(dataNoise-self.bias) v = np.var(dataNoise-self.bias) m2 = np.mean(dataConvolution-self.bias) v2 = np.var(dataConvolution-self.bias) self.priors = [10*(v/m)**0.5, 1.1, 10*(m**2/v)**0.5, 1.1, 10*(v2/m2)**0.5, 1.1, 10*(m2**2/v2)**0.5, 1.1] elif self.priors == []: self.priors = priors dynestyModel = dn.NestedSampler(self.logLikelihood, self.prior, 3*self.K+3*self.Kc, **nestedsampler_dict) elif prior_method == "uniform": if priors == None and self.priors == []: m = np.mean(dataNoise-self.bias) v = np.var(dataNoise-self.bias) m2 = np.mean(dataConvolution-self.bias) v2 = np.var(dataConvolution-self.bias) self.priors = [0, 10*(v/m)**0.5, 0, 10*(m**2/v)**0.5, 0, 10*(v2/m2)**0.5, 0, 10*(m2**2/v2)**0.5] elif self.priors == []: self.priors = priors dynestyModel = dn.NestedSampler(self.logLikelihood, self.prior_uniform, 3*self.K+3*self.Kc, **nestedsampler_dict) dynestyModel.run_nested(**run_nested_dict) self.results = {} self.results["samples"] = dynestyModel.results["samples"] self.results["logwt"] = dynestyModel.results["logwt"] self.results["evidence"] = dynestyModel.results["logz"][-1] weightMax = np.max(self.results["logwt"]) self.weights = np.exp(self.results["logwt"]-weightMax) self.weights = self.weights/np.sum(self.weights) self.samples = self.results["samples"] self.fitted = True return
def fit(self, dataNoise, dataConvolution, prior_method = "vague", priors=None, bias = None, **kwargs): """ Fit the model to the posterior distribution Parameters ------------ dataNoise: list 1D array witht he data of the noise dataConvolution: list 1D array witht he data of the convolution **kwargs: Arguments to be passed to the *DynamicNestedSampler* and *run_nested* functions from the dynesty package Returns ------------ Nothing """ self.data = dataNoise self.datac = dataConvolution if bias == None: m = np.min([dataNoise,dataConvolution]) if m < 0: self.bias = m - 0.01 else: self.bias = 0 else: self.bias = bias #separate kargs for the two different samplers functions #nested sampler function nestedsampler_args = [k for k, v in inspect.signature(dn.NestedSampler).parameters.items()] nestedsampler_dict = {k: kwargs.pop(k) for k in dict(kwargs) if k in nestedsampler_args} if not ("sample" in nestedsampler_dict.keys()): nestedsampler_dict["sample"] = "rslice" #run nested function run_nested_args = [k for k, v in inspect.signature(dn.NestedSampler).parameters.items()] run_nested_dict = {k: kwargs.pop(k) for k in dict(kwargs) if k in run_nested_args} #make fit gdposteriormodelgamma.__init__(self,dataNoise,dataConvolution,self.K,self.Kc) if prior_method == "vague": if priors == None and self.priors == []: m = np.mean(dataNoise-self.bias) v = np.var(dataNoise-self.bias) m2 = np.mean(dataConvolution-self.bias) v2 = np.var(dataConvolution-self.bias) self.priors = [10*(v/m)**0.5, 1.1, 10*(m**2/v)**0.5, 1.1, 10*(v2/m2)**0.5, 1.1, 10*(m2**2/v2)**0.5, 1.1] elif self.priors == []: self.priors = priors dynestyModel = dn.NestedSampler(self.logLikelihood, self.prior, 3*self.K+3*self.Kc, **nestedsampler_dict) elif prior_method == "uniform": if priors == None and self.priors == []: m = np.mean(dataNoise-self.bias) v = np.var(dataNoise-self.bias) m2 = np.mean(dataConvolution-self.bias) v2 = np.var(dataConvolution-self.bias) self.priors = [0, 10*(v/m)**0.5, 0, 10*(m**2/v)**0.5, 0, 10*(v2/m2)**0.5, 0, 10*(m2**2/v2)**0.5] elif self.priors == []: self.priors = priors dynestyModel = dn.NestedSampler(self.logLikelihood, self.prior_uniform, 3*self.K+3*self.Kc, **nestedsampler_dict) dynestyModel.run_nested(**run_nested_dict) self.results = {} self.results["samples"] = dynestyModel.results["samples"] self.results["logwt"] = dynestyModel.results["logwt"] self.results["evidence"] = dynestyModel.results["logz"][-1] weightMax = np.max(self.results["logwt"]) self.weights = np.exp(self.results["logwt"]-weightMax) self.weights = self.weights/np.sum(self.weights) self.samples = self.results["samples"] self.fitted = True return
Python
def prune(self, order = -1): """ Prune the number of samples to remove samples with weights orders of magnitude lower than the main one. This may speed up drawing samples when the number of draws is huge. Parameters -------------- order: int, order of manitude below which prune the samples returns: nothing """ if order == -1: weightMax = np.max(self.results["logwt"]) self.weights = np.exp(self.results["logwt"]-weightMax) self.weights = self.weights/np.sum(self.weights) self.samples = self.results["samples"] else: weightMax = np.max(self.results["logwt"]) self.weights = np.exp(self.results["logwt"]-weightMax) self.weights = self.weights/np.sum(self.weights) select = (self.weights>self.weights.max()*10**-order) self.weights = self.weights[select] self.weights = self.weights/np.sum(self.weights) self.samples = self.results["samples"][select] return
def prune(self, order = -1): """ Prune the number of samples to remove samples with weights orders of magnitude lower than the main one. This may speed up drawing samples when the number of draws is huge. Parameters -------------- order: int, order of manitude below which prune the samples returns: nothing """ if order == -1: weightMax = np.max(self.results["logwt"]) self.weights = np.exp(self.results["logwt"]-weightMax) self.weights = self.weights/np.sum(self.weights) self.samples = self.results["samples"] else: weightMax = np.max(self.results["logwt"]) self.weights = np.exp(self.results["logwt"]-weightMax) self.weights = self.weights/np.sum(self.weights) select = (self.weights>self.weights.max()*10**-order) self.weights = self.weights[select] self.weights = self.weights/np.sum(self.weights) self.samples = self.results["samples"][select] return
Python
def save(self, name): """ Pickle save the model. Parameters ---------------- name: string, name in which to store the model Return: nothing """ if self.fitted: pickling_on = open(name+".pickle","wb") pk.dump({"K":self.K, "Kc":self.Kc, "weights":self.results["logwt"], "samples":self.results["samples"], "evidence":self.results["evidence"], "bias":self.bias}, pickling_on) pickling_on.close() else: print("The model has not been fitted so there is nothing to save.") return
def save(self, name): """ Pickle save the model. Parameters ---------------- name: string, name in which to store the model Return: nothing """ if self.fitted: pickling_on = open(name+".pickle","wb") pk.dump({"K":self.K, "Kc":self.Kc, "weights":self.results["logwt"], "samples":self.results["samples"], "evidence":self.results["evidence"], "bias":self.bias}, pickling_on) pickling_on.close() else: print("The model has not been fitted so there is nothing to save.") return
Python
def load(self, name): """ Pickle load the model. Parameters ---------------- name: string, name from which to recover the model Return: nothing """ pickle_off = open(name+".pickle","rb") aux = pk.load(pickle_off) pickle_off.close() self.K = aux["K"] self.Kc = aux["Kc"] self.results = {} self.results["logwt"] = aux["weights"] self.results["samples"] = aux["samples"] self.results["evidence"] = aux["evidence"] self.bias = aux["bias"] self.prune() self.fitted = True return
def load(self, name): """ Pickle load the model. Parameters ---------------- name: string, name from which to recover the model Return: nothing """ pickle_off = open(name+".pickle","rb") aux = pk.load(pickle_off) pickle_off.close() self.K = aux["K"] self.Kc = aux["Kc"] self.results = {} self.results["logwt"] = aux["weights"] self.results["samples"] = aux["samples"] self.results["evidence"] = aux["evidence"] self.bias = aux["bias"] self.prune() self.fitted = True return
Python
def sample_autofluorescence(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the noise distribution Parameters -------------- size: int, number of samples to be drawn style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw pos: if style = "single", draw from the posterior from which to choose Returns: list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size, bias = 0))+self.bias elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples)), p=self.weights) return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size,pos=pos, bias = 0))+self.bias else: return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size,pos=pos, bias = 0))+self.bias return
def sample_autofluorescence(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the noise distribution Parameters -------------- size: int, number of samples to be drawn style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw pos: if style = "single", draw from the posterior from which to choose Returns: list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size, bias = 0))+self.bias elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples)), p=self.weights) return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size,pos=pos, bias = 0))+self.bias else: return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size,pos=pos, bias = 0))+self.bias return
Python
def sample_deconvolution(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the deconvolved distribution Parameters -------------- size: int, number of samples to be drawn style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw pos: if style = "single", draw from the posterior from which to choose Returns: list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size, bias = 0))+self.bias elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples)), p=self.weights) return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size,pos=pos, bias = 0))+self.bias else: return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size,pos=pos, bias = 0))+self.bias return
def sample_deconvolution(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the deconvolved distribution Parameters -------------- size: int, number of samples to be drawn style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw pos: if style = "single", draw from the posterior from which to choose Returns: list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size, bias = 0))+self.bias elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples)), p=self.weights) return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size,pos=pos, bias = 0))+self.bias else: return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size,pos=pos, bias = 0))+self.bias return
Python
def sample_convolution(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the convolved distribution Parameters -------------- size: int, number of samples to be drawn style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw pos: if style = "single", draw from the posterior from which to choose Returns: list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size, bias = 0))+2*self.bias elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples)), p=self.weights) return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size,pos=pos, bias = 0))+2*self.bias else: return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size,pos=pos, bias = 0))+2*self.bias return
def sample_convolution(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the convolved distribution Parameters -------------- size: int, number of samples to be drawn style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw pos: if style = "single", draw from the posterior from which to choose Returns: list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size, bias = 0))+2*self.bias elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples)), p=self.weights) return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size,pos=pos, bias = 0))+2*self.bias else: return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,weights=self.weights,size=size,pos=pos, bias = 0))+2*self.bias return
Python
def score_autofluorescence(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(len(self.weights),p=self.weights) y = np.zeros(len(x)) for k in range(self.K): thetastar = self.samples[i,self.K+k] kconststar = self.samples[i,2*self.K+k] y += self.samples[i,k]*gamma.pdf(x,a=kconststar,scale=thetastar) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def score_autofluorescence(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(len(self.weights),p=self.weights) y = np.zeros(len(x)) for k in range(self.K): thetastar = self.samples[i,self.K+k] kconststar = self.samples[i,2*self.K+k] y += self.samples[i,k]*gamma.pdf(x,a=kconststar,scale=thetastar) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
Python
def score_deconvolution(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the deconvolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(len(self.weights),p=self.weights) y = np.zeros(len(x)) for j in range(self.Kc): thetastar = self.samples[i,3*self.K+self.Kc+j] kconststar = self.samples[i,3*self.K+2*self.Kc+j] y += self.samples[i,3*self.K+j]*gamma.pdf(x,a=kconststar,scale=thetastar) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def score_deconvolution(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the deconvolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(len(self.weights),p=self.weights) y = np.zeros(len(x)) for j in range(self.Kc): thetastar = self.samples[i,3*self.K+self.Kc+j] kconststar = self.samples[i,3*self.K+2*self.Kc+j] y += self.samples[i,3*self.K+j]*gamma.pdf(x,a=kconststar,scale=thetastar) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
Python
def score_convolution(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(len(self.weights),p=self.weights) y = np.zeros(len(x)) for j in range(self.Kc): for k in range(self.K): theta1 = self.samples[i,self.K+k] theta2 = self.samples[i,3*self.K+self.Kc+j] k1 = self.samples[i,2*self.K+k] k2 = self.samples[i,3*self.K+2*self.Kc+j] mu = theta1*k1+theta2*k2 s = theta1*theta1*k1+theta2*theta2*k2 thetastar = s/mu kconststar = mu*mu/s y += self.samples[i,k]*self.samples[i,3*self.K+j]*gamma.pdf(x,a=kconststar,scale=thetastar) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def score_convolution(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(len(self.weights),p=self.weights) y = np.zeros(len(x)) for j in range(self.Kc): for k in range(self.K): theta1 = self.samples[i,self.K+k] theta2 = self.samples[i,3*self.K+self.Kc+j] k1 = self.samples[i,2*self.K+k] k2 = self.samples[i,3*self.K+2*self.Kc+j] mu = theta1*k1+theta2*k2 s = theta1*theta1*k1+theta2*theta2*k2 thetastar = s/mu kconststar = mu*mu/s y += self.samples[i,k]*self.samples[i,3*self.K+j]*gamma.pdf(x,a=kconststar,scale=thetastar) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
Python
def save(self, name): """ Pickle save the model. Parameters ---------------- name: string, name in which to store the model Return: nothing """ if self.fitted: pickling_on = open(name+".pickle","wb") pk.dump({"K":self.K, "Kc":self.Kc, "alpha": self.alpha, "alphac": self.alphac, "iterations": self.iterations, "ignored_iterations": self.ignored_iterations, "priortheta_k": self.priortheta_k, "priortheta_theta": self.priortheta_theta, "priork_k": self.priork_k, "priork_theta": self.priork_theta, "priortheta_kc": self.priortheta_kc, "priortheta_thetac": self.priortheta_thetac, "priortheta_thetac": self.priortheta_thetac, "priork_thetac": self.priork_thetac, "bias":self.bias, "chains":self.chains, "samples":self.samples}, pickling_on) pickling_on.close() else: print("The model has not been fitted so there is nothing to save.") return
def save(self, name): """ Pickle save the model. Parameters ---------------- name: string, name in which to store the model Return: nothing """ if self.fitted: pickling_on = open(name+".pickle","wb") pk.dump({"K":self.K, "Kc":self.Kc, "alpha": self.alpha, "alphac": self.alphac, "iterations": self.iterations, "ignored_iterations": self.ignored_iterations, "priortheta_k": self.priortheta_k, "priortheta_theta": self.priortheta_theta, "priork_k": self.priork_k, "priork_theta": self.priork_theta, "priortheta_kc": self.priortheta_kc, "priortheta_thetac": self.priortheta_thetac, "priortheta_thetac": self.priortheta_thetac, "priork_thetac": self.priork_thetac, "bias":self.bias, "chains":self.chains, "samples":self.samples}, pickling_on) pickling_on.close() else: print("The model has not been fitted so there is nothing to save.") return
Python
def load(self, name): """ Pickle load the model. Parameters ---------------- name: string, name from which to recover the model Return: nothing """ pickle_off = open(name+".pickle","rb") aux = pk.load(pickle_off) pickle_off.close() self.K = aux["K"] self.Kc = aux ["Kc"] self.alpha = aux["alpha"] self.alphac = aux["alphac"] self.iterations = aux["iterations"] self.ignored_iterations = aux["ignored_iterations"] self.chains = aux["chains"] self.samples = aux["samples"] self.priortheta_k = aux["priortheta_k"] self.priortheta_theta = aux["priortheta_theta"] self.priork_k = aux["priork_k"] self.priork_theta = aux["priork_theta"] self.priortheta_kc = aux["priortheta_kc"] self.priortheta_thetac = aux["priortheta_thetac"] self.priortheta_thetac = aux["priortheta_thetac"] self.priork_thetac = aux["priork_thetac"] self.bias = aux["bias"] self.fitted = True return
def load(self, name): """ Pickle load the model. Parameters ---------------- name: string, name from which to recover the model Return: nothing """ pickle_off = open(name+".pickle","rb") aux = pk.load(pickle_off) pickle_off.close() self.K = aux["K"] self.Kc = aux ["Kc"] self.alpha = aux["alpha"] self.alphac = aux["alphac"] self.iterations = aux["iterations"] self.ignored_iterations = aux["ignored_iterations"] self.chains = aux["chains"] self.samples = aux["samples"] self.priortheta_k = aux["priortheta_k"] self.priortheta_theta = aux["priortheta_theta"] self.priork_k = aux["priork_k"] self.priork_theta = aux["priork_theta"] self.priortheta_kc = aux["priortheta_kc"] self.priortheta_thetac = aux["priortheta_thetac"] self.priortheta_thetac = aux["priortheta_thetac"] self.priork_thetac = aux["priork_thetac"] self.bias = aux["bias"] self.fitted = True return
Python
def sample_autofluorescence(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the noise distribution Parameters ------------- size: int, number of samples to be drawn Returns ------------- list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,size=size, bias=0))+self.bias elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples))) return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias else: return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
def sample_autofluorescence(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the noise distribution Parameters ------------- size: int, number of samples to be drawn Returns ------------- list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,size=size, bias=0))+self.bias elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples))) return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias else: return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
Python
def sample_deconvolution(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the deconvolved distribution Parameters ------------- size: int, number of samples to be drawn Returns ------------- list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,size=size, bias=0))+self.bias elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples))) return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias else: return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
def sample_deconvolution(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the deconvolved distribution Parameters ------------- size: int, number of samples to be drawn Returns ------------- list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,size=size, bias=0))+self.bias elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples))) return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias else: return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
Python
def sample_convolution(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the convolved distribution Parameters ------------- size: int, number of samples to be drawn Returns ------------- list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,size=size, bias=0))+self.bias elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples))) return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias else: return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
def sample_convolution(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the convolved distribution Parameters ------------- size: int, number of samples to be drawn Returns ------------- list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,size=size, bias=0))+self.bias elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples))) return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias else: return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
Python
def score_autofluorescence(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(self.iterations) y = np.zeros(len(x)) for k in range(self.K): thetastar = self.samples[i,self.K+k] kconststar = self.samples[i,2*self.K+k] y += self.samples[i,k]*gamma.pdf(x,a=kconststar,scale=thetastar) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def score_autofluorescence(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(self.iterations) y = np.zeros(len(x)) for k in range(self.K): thetastar = self.samples[i,self.K+k] kconststar = self.samples[i,2*self.K+k] y += self.samples[i,k]*gamma.pdf(x,a=kconststar,scale=thetastar) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
Python
def score_deconvolution(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the deconvolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(self.iterations) y = np.zeros(len(x)) for j in range(self.Kc): thetastar = self.samples[i,3*self.K+self.Kc+j] kconststar = self.samples[i,3*self.K+2*self.Kc+j] y += self.samples[i,3*self.K+j]*gamma.pdf(x,a=kconststar,scale=thetastar) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def score_deconvolution(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the deconvolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(self.iterations) y = np.zeros(len(x)) for j in range(self.Kc): thetastar = self.samples[i,3*self.K+self.Kc+j] kconststar = self.samples[i,3*self.K+2*self.Kc+j] y += self.samples[i,3*self.K+j]*gamma.pdf(x,a=kconststar,scale=thetastar) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
Python
def score_convolution(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(self.iterations) y = np.zeros(len(x)) for j in range(self.Kc): for k in range(self.K): theta1 = self.samples[i,self.K+k] theta2 = self.samples[i,3*self.K+self.Kc+j] k1 = self.samples[i,2*self.K+k] k2 = self.samples[i,3*self.K+2*self.Kc+j] mu = theta1*k1+theta2*k2 s = theta1*theta1*k1+theta2*theta2*k2 thetastar = s/mu kconststar = mu*mu/s y += self.samples[i,k]*self.samples[i,3*self.K+j]*gamma.pdf(x,a=kconststar,scale=thetastar) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def score_convolution(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(self.iterations) y = np.zeros(len(x)) for j in range(self.Kc): for k in range(self.K): theta1 = self.samples[i,self.K+k] theta2 = self.samples[i,3*self.K+self.Kc+j] k1 = self.samples[i,2*self.K+k] k2 = self.samples[i,3*self.K+2*self.Kc+j] mu = theta1*k1+theta2*k2 s = theta1*theta1*k1+theta2*theta2*k2 thetastar = s/mu kconststar = mu*mu/s y += self.samples[i,k]*self.samples[i,3*self.K+j]*gamma.pdf(x,a=kconststar,scale=thetastar) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
Python
def save(self, name): """ Pickle save the model. Parameters ---------------- name: string, name in which to store the model Return: nothing """ if self.fitted: pickling_on = open(name+".pickle","wb") pk.dump({"K":self.K, "Kc":self.Kc, "priors": self.priors, "iterations": self.iterations, "ignored_iterations": self.ignored_iterations, "chains":self.chains, "samples":self.samples}, pickling_on) pickling_on.close() else: print("The model has not been fitted so there is nothing to save.") return
def save(self, name): """ Pickle save the model. Parameters ---------------- name: string, name in which to store the model Return: nothing """ if self.fitted: pickling_on = open(name+".pickle","wb") pk.dump({"K":self.K, "Kc":self.Kc, "priors": self.priors, "iterations": self.iterations, "ignored_iterations": self.ignored_iterations, "chains":self.chains, "samples":self.samples}, pickling_on) pickling_on.close() else: print("The model has not been fitted so there is nothing to save.") return
Python
def load(self, name): """ Pickle load the model. Parameters ---------------- name: string, name from which to recover the model Return: nothing """ pickle_off = open(name+".pickle","rb") aux = pk.load(pickle_off) pickle_off.close() self.K = aux["K"] self.Kc = aux ["Kc"] self.priors = aux["priors"] self.iterations = aux["iterations"] self.ignored_iterations = aux["ignored_iterations"] self.chains = aux["chains"] self.samples = aux["samples"] self.fitted = True return
def load(self, name): """ Pickle load the model. Parameters ---------------- name: string, name from which to recover the model Return: nothing """ pickle_off = open(name+".pickle","rb") aux = pk.load(pickle_off) pickle_off.close() self.K = aux["K"] self.Kc = aux ["Kc"] self.priors = aux["priors"] self.iterations = aux["iterations"] self.ignored_iterations = aux["ignored_iterations"] self.chains = aux["chains"] self.samples = aux["samples"] self.fitted = True return
Python
def sample_autofluorescence(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the noise distribution Parameters -------------- size: int, number of samples to be drawn style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw pos: if style = "single", draw from the posterior from which to choose Returns: list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_autofluorescence(self.samples,self.K,self.Kc,size=size)) elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples))) return np.array(sample_autofluorescence(self.samples,self.K,self.Kc,size=size,pos=pos)) else: return np.array(sample_autofluorescence(self.samples,self.K,self.Kc,size=size,pos=pos)) return
def sample_autofluorescence(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the noise distribution Parameters -------------- size: int, number of samples to be drawn style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw pos: if style = "single", draw from the posterior from which to choose Returns: list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_autofluorescence(self.samples,self.K,self.Kc,size=size)) elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples))) return np.array(sample_autofluorescence(self.samples,self.K,self.Kc,size=size,pos=pos)) else: return np.array(sample_autofluorescence(self.samples,self.K,self.Kc,size=size,pos=pos)) return
Python
def sample_deconvolution(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the deconvolved distribution Parameters -------------- size: int, number of samples to be drawn style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw pos: if style = "single", draw from the posterior from which to choose Returns: list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_deconvolution(self.samples,self.K,self.Kc,size=size)) elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples))) return np.array(sample_deconvolution(self.samples,self.K,self.Kc,size=size,pos=pos)) else: return np.array(sample_deconvolution(self.samples,self.K,self.Kc,size=size,pos=pos)) return
def sample_deconvolution(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the deconvolved distribution Parameters -------------- size: int, number of samples to be drawn style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw pos: if style = "single", draw from the posterior from which to choose Returns: list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_deconvolution(self.samples,self.K,self.Kc,size=size)) elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples))) return np.array(sample_deconvolution(self.samples,self.K,self.Kc,size=size,pos=pos)) else: return np.array(sample_deconvolution(self.samples,self.K,self.Kc,size=size,pos=pos)) return
Python
def sample_convolution(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the convolved distribution Parameters -------------- size: int, number of samples to be drawn style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw pos: if style = "single", draw from the posterior from which to choose Returns: list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_convolution(self.samples,self.K,self.Kc,size=size)) elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples))) return np.array(sample_convolution(self.samples,self.K,self.Kc,size=size,pos=pos)) else: return np.array(sample_convolution(self.samples,self.K,self.Kc,size=size,pos=pos)) return
def sample_convolution(self, size = 1, style = "full", pos = None): """ Generate samples from the fitted posterior distribution according to the convolved distribution Parameters -------------- size: int, number of samples to be drawn style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw pos: if style = "single", draw from the posterior from which to choose Returns: list: list, 1D array with *size* samples from the model """ if style=="full": return np.array(sample_convolution(self.samples,self.K,self.Kc,size=size)) elif style=="single": if pos == None: pos = np.random.choice(range(len(self.samples))) return np.array(sample_convolution(self.samples,self.K,self.Kc,size=size,pos=pos)) else: return np.array(sample_convolution(self.samples,self.K,self.Kc,size=size,pos=pos)) return
Python
def score_autofluorescence(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(self.iterations) y = np.zeros(len(x)) for k in range(self.K): mu = self.samples[i,self.K+k] sigma = self.samples[i,2*self.K+k] y += self.samples[i,k]*norm.pdf(x,loc=mu,scale=sigma) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def score_autofluorescence(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(self.iterations) y = np.zeros(len(x)) for k in range(self.K): mu = self.samples[i,self.K+k] sigma = self.samples[i,2*self.K+k] y += self.samples[i,k]*norm.pdf(x,loc=mu,scale=sigma) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
Python
def score_deconvolution(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the deconvolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(self.iterations) y = np.zeros(len(x)) for j in range(self.Kc): mu = self.samples[i,3*self.K+self.Kc+j] sigma = self.samples[i,3*self.K+2*self.Kc+j] y += self.samples[i,3*self.K+j]*norm.pdf(x,loc=mu,scale=sigma) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def score_deconvolution(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the deconvolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(self.iterations) y = np.zeros(len(x)) for j in range(self.Kc): mu = self.samples[i,3*self.K+self.Kc+j] sigma = self.samples[i,3*self.K+2*self.Kc+j] y += self.samples[i,3*self.K+j]*norm.pdf(x,loc=mu,scale=sigma) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
Python
def score_convolution(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(self.iterations) y = np.zeros(len(x)) for j in range(self.Kc): for k in range(self.K): mu1 = self.samples[i,self.K+k] mu2 = self.samples[i,3*self.K+self.Kc+j] sigma1 = self.samples[i,2*self.K+k] sigma2 = self.samples[i,3*self.K+2*self.Kc+j] mu = mu1 s = np.sqrt(sigma1**2+sigma2**2) y += self.samples[i,k]*self.samples[i,3*self.K+j]*norm.pdf(x,loc=mu,scale=s) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def score_convolution(self, x, percentiles = [5, 95], size = 100): """ Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution Parameters ------------- x: list/array, positions where to evaluate the distribution percentiles: list/array, percentiles to be evaluated size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability Returns ------------- list: list, 2D array with the mean and all the percentile evaluations at all points in x """ yT = [] for l in range(size): i = np.random.choice(self.iterations) y = np.zeros(len(x)) for j in range(self.Kc): for k in range(self.K): mu1 = self.samples[i,self.K+k] mu2 = self.samples[i,3*self.K+self.Kc+j] sigma1 = self.samples[i,2*self.K+k] sigma2 = self.samples[i,3*self.K+2*self.Kc+j] mu = mu1 s = np.sqrt(sigma1**2+sigma2**2) y += self.samples[i,k]*self.samples[i,3*self.K+j]*norm.pdf(x,loc=mu,scale=s) yT.append(y) return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
Python
def send_mail(address, passwd): """ Send verification emails based on below template. """ message = ( "Hey there!\n\nYour HOBY Feedback account is ready to be logged " + "into. If you have any problems logging in, please contact the " + "operations staff who created your account." + "\n\nURL: https://feedback.hobynye.org\nPassword: {}\n\n" + "Please change this password once you have logged in.\n\n" + "Thanks,\nHOBY NYE\n" + "\n\nThis message was automatically generated by HOBY Feedback" ) message = message.format(passwd) email = MIMEText(message) email["To"] = address email["From"] = "HOBY Feedback <{}>".format(app.config["EMAIL_USER"]) email["Subject"] = "Your HOBY Feedback Account" email["Date"] = formatdate() server = smtplib.SMTP_SSL("smtp.gmail.com", 465) server.login(app.config["EMAIL_USER"], app.config["EMAIL_PASS"]) server.send_message(email) server.quit()
def send_mail(address, passwd): """ Send verification emails based on below template. """ message = ( "Hey there!\n\nYour HOBY Feedback account is ready to be logged " + "into. If you have any problems logging in, please contact the " + "operations staff who created your account." + "\n\nURL: https://feedback.hobynye.org\nPassword: {}\n\n" + "Please change this password once you have logged in.\n\n" + "Thanks,\nHOBY NYE\n" + "\n\nThis message was automatically generated by HOBY Feedback" ) message = message.format(passwd) email = MIMEText(message) email["To"] = address email["From"] = "HOBY Feedback <{}>".format(app.config["EMAIL_USER"]) email["Subject"] = "Your HOBY Feedback Account" email["Date"] = formatdate() server = smtplib.SMTP_SSL("smtp.gmail.com", 465) server.login(app.config["EMAIL_USER"], app.config["EMAIL_PASS"]) server.send_message(email) server.quit()
Python
def check_certs(certs): """Checks if certificate and configuration match""" path = certs + "*.crt" # add check for crt and opvn files # if crt dont exist, user will be not checked names = [os.path.splitext(os.path.basename(item))[0] for item in glob.glob(path)] for user in names: try: cert_crt = open(CERT_DIR + user + ".crt", "r").read() except FileNotFoundError as err: print( f"{Colors.BADRED}{Colors.BOLD}Cannot open crt file for {user}. {err}{Colors.ENDC}" ) exit() try: cert_ovpn = open(CERT_DIR + PREFIX + user + "-config.ovpn", "r").read() except FileNotFoundError as err: print( f"{Colors.BADRED}Cannot open ovpn file for {user}. {err}{Colors.ENDC}" ) exit() try: cert_crt_match = REG_CRT.search(cert_crt).group("cert") except AttributeError as err: print(f" {user}: Bad - regexp cannot match! {err}") try: cert_ovpn_match = REG_OVPN.search(cert_ovpn).group("cert") except AttributeError as err: print(f"{Colors.BADRED}Bad - regexp cannot match! {err}{Colors.ENDC}") if cert_crt_match == cert_ovpn_match: print(f"{Colors.OKGREEN}{user}: OK{Colors.ENDC}") else: print(f"{Colors.BADRED}{user}: BAD{Colors.ENDC}")
def check_certs(certs): """Checks if certificate and configuration match""" path = certs + "*.crt" # add check for crt and opvn files # if crt dont exist, user will be not checked names = [os.path.splitext(os.path.basename(item))[0] for item in glob.glob(path)] for user in names: try: cert_crt = open(CERT_DIR + user + ".crt", "r").read() except FileNotFoundError as err: print( f"{Colors.BADRED}{Colors.BOLD}Cannot open crt file for {user}. {err}{Colors.ENDC}" ) exit() try: cert_ovpn = open(CERT_DIR + PREFIX + user + "-config.ovpn", "r").read() except FileNotFoundError as err: print( f"{Colors.BADRED}Cannot open ovpn file for {user}. {err}{Colors.ENDC}" ) exit() try: cert_crt_match = REG_CRT.search(cert_crt).group("cert") except AttributeError as err: print(f" {user}: Bad - regexp cannot match! {err}") try: cert_ovpn_match = REG_OVPN.search(cert_ovpn).group("cert") except AttributeError as err: print(f"{Colors.BADRED}Bad - regexp cannot match! {err}{Colors.ENDC}") if cert_crt_match == cert_ovpn_match: print(f"{Colors.OKGREEN}{user}: OK{Colors.ENDC}") else: print(f"{Colors.BADRED}{user}: BAD{Colors.ENDC}")
Python
def colorWipe(strip, color, wait_s=50): """Wipe color across display a pixel at a time.""" for i in range(strip.numPixels()): strip.setPixelColor(i, color) strip.show() time.sleep(wait_s/1000.0)
def colorWipe(strip, color, wait_s=50): """Wipe color across display a pixel at a time.""" for i in range(strip.numPixels()): strip.setPixelColor(i, color) strip.show() time.sleep(wait_s/1000.0)
Python
def blink(pos, times, color, wait_s=50): """Wipe color across display a pixel at a time.""" for i in range(times): strip.setPixelColor(pos+i, color) strip.show() time.sleep(wait_s/1000.0) strip.setPixelColor(pos+i, Color(0, 0, 0)) strip.show() time.sleep(wait_s/1000.0)
def blink(pos, times, color, wait_s=50): """Wipe color across display a pixel at a time.""" for i in range(times): strip.setPixelColor(pos+i, color) strip.show() time.sleep(wait_s/1000.0) strip.setPixelColor(pos+i, Color(0, 0, 0)) strip.show() time.sleep(wait_s/1000.0)
Python
def _get_topic_names_and_types(self, expected, timeout): """Make sure discovery has found all 'expected' topics.""" start = time.monotonic() while True: topics = self.this_node.get_topic_names_and_types() now = time.monotonic() if all(expected_topic in topics for expected_topic in expected): return topics elif (now - start) < timeout: continue else: return None
def _get_topic_names_and_types(self, expected, timeout): """Make sure discovery has found all 'expected' topics.""" start = time.monotonic() while True: topics = self.this_node.get_topic_names_and_types() now = time.monotonic() if all(expected_topic in topics for expected_topic in expected): return topics elif (now - start) < timeout: continue else: return None
Python
def _fake_glob_scores(nb_vid, scale=1): """Creates fake global scores for test nb_vid (int): number of videos "generated" scale (float): variance of generated global scores Returns: (float array): fake global scores """ glob_scores = np.random.normal(scale=scale, size=nb_vid) return glob_scores
def _fake_glob_scores(nb_vid, scale=1): """Creates fake global scores for test nb_vid (int): number of videos "generated" scale (float): variance of generated global scores Returns: (float array): fake global scores """ glob_scores = np.random.normal(scale=scale, size=nb_vid) return glob_scores
Python
def _fake_s(nb_s, multiple_scales=True): """Returns random s parameters nb_s (int): number of s parameters required multiple_scales (bool): wether to draw s parameters or set all to 1 Returns: (float array): random independant s parameters """ if multiple_scales: return np.random.gamma(4, scale=0.3, size=nb_s) return np.ones(nb_s)
def _fake_s(nb_s, multiple_scales=True): """Returns random s parameters nb_s (int): number of s parameters required multiple_scales (bool): wether to draw s parameters or set all to 1 Returns: (float array): random independant s parameters """ if multiple_scales: return np.random.gamma(4, scale=0.3, size=nb_s) return np.ones(nb_s)
Python
def fetch_data(): """Fetches the data from the Comparisons model Returns: - comparison_data: list of [ contributor_id: int, video_id_1: int, video_id_2: int, criteria: str, score: float, weight: float ] """ comparison_data = [ [ ccs.comparison.user_id, ccs.comparison.entity_1_id, ccs.comparison.entity_2_id, ccs.criteria, ccs.score, ccs.weight, ] for ccs in ComparisonCriteriaScore.objects .filter(comparison__user__in=User.trusted_users()) .prefetch_related("comparison") ] return comparison_data
def fetch_data(): """Fetches the data from the Comparisons model Returns: - comparison_data: list of [ contributor_id: int, video_id_1: int, video_id_2: int, criteria: str, score: float, weight: float ] """ comparison_data = [ [ ccs.comparison.user_id, ccs.comparison.entity_1_id, ccs.comparison.entity_2_id, ccs.criteria, ccs.score, ccs.weight, ] for ccs in ComparisonCriteriaScore.objects .filter(comparison__user__in=User.trusted_users()) .prefetch_related("comparison") ] return comparison_data
Python
def save_data(video_scores, contributor_rating_scores): """ Saves in the scores for Entities and ContributorRatings """ EntityCriteriaScore.objects.all().delete() EntityCriteriaScore.objects.bulk_create( [ EntityCriteriaScore( entity_id=video_id, criteria=criteria, score=score, uncertainty=uncertainty, ) for video_id, criteria, score, uncertainty in video_scores ] ) rating_ids = { (contributor_id, video_id): rating_id for rating_id, contributor_id, video_id in ContributorRating.objects.all().values_list( "id", "user_id", "entity_id" ) } ratings_to_create = set( (contributor_id, video_id) for contributor_id, video_id, _, _, _ in contributor_rating_scores if (contributor_id, video_id) not in rating_ids ) created_ratings = ContributorRating.objects.bulk_create( [ ContributorRating( entity_id=video_id, user_id=contributor_id, ) for contributor_id, video_id in ratings_to_create ] ) rating_ids.update( {(rating.user_id, rating.entity_id): rating.id for rating in created_ratings} ) ContributorRatingCriteriaScore.objects.all().delete() ContributorRatingCriteriaScore.objects.bulk_create( [ ContributorRatingCriteriaScore( contributor_rating_id=rating_ids[(contributor_id, video_id)], criteria=criteria, score=score, uncertainty=uncertainty, ) for contributor_id, video_id, criteria, score, uncertainty in contributor_rating_scores ] )
def save_data(video_scores, contributor_rating_scores): """ Saves in the scores for Entities and ContributorRatings """ EntityCriteriaScore.objects.all().delete() EntityCriteriaScore.objects.bulk_create( [ EntityCriteriaScore( entity_id=video_id, criteria=criteria, score=score, uncertainty=uncertainty, ) for video_id, criteria, score, uncertainty in video_scores ] ) rating_ids = { (contributor_id, video_id): rating_id for rating_id, contributor_id, video_id in ContributorRating.objects.all().values_list( "id", "user_id", "entity_id" ) } ratings_to_create = set( (contributor_id, video_id) for contributor_id, video_id, _, _, _ in contributor_rating_scores if (contributor_id, video_id) not in rating_ids ) created_ratings = ContributorRating.objects.bulk_create( [ ContributorRating( entity_id=video_id, user_id=contributor_id, ) for contributor_id, video_id in ratings_to_create ] ) rating_ids.update( {(rating.user_id, rating.entity_id): rating.id for rating in created_ratings} ) ContributorRatingCriteriaScore.objects.all().delete() ContributorRatingCriteriaScore.objects.bulk_create( [ ContributorRatingCriteriaScore( contributor_rating_id=rating_ids[(contributor_id, video_id)], criteria=criteria, score=score, uncertainty=uncertainty, ) for contributor_id, video_id, criteria, score, uncertainty in contributor_rating_scores ] )
Python
def lookups(self, request, model_admin): """ Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar. """ return ( ('1', 'Yes'), ('0', 'No'), )
def lookups(self, request, model_admin): """ Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar. """ return ( ('1', 'Yes'), ('0', 'No'), )
Python
def api_get_tournesol_scores(): """Get a dataframe with all videos from tournesol..""" response = requests.get( f"https://api.tournesol.app/video/?limit=9999&unsafe=true" ).json() df = pd.DataFrame.from_dict(response["results"]) for crit in CRITERIA: df[crit] = df.apply(lambda x: get_score(x, crit), axis=1) df.drop(columns=["criteria_scores"], inplace=True) df["tournesol_score"] = df[CRITERIA].sum(axis=1) return df
def api_get_tournesol_scores(): """Get a dataframe with all videos from tournesol..""" response = requests.get( f"https://api.tournesol.app/video/?limit=9999&unsafe=true" ).json() df = pd.DataFrame.from_dict(response["results"]) for crit in CRITERIA: df[crit] = df.apply(lambda x: get_score(x, crit), axis=1) df.drop(columns=["criteria_scores"], inplace=True) df["tournesol_score"] = df[CRITERIA].sum(axis=1) return df
Python
def refresh_youtube_metadata(video): """ Fetch and update video metadata from Youtube API. This function is similar to `video.refresh_youtube_metatada(force=True)` but this code needs to be duplicated here, as versioned model used in migrations do not include model custom methods. """ video.last_metadata_request_at = timezone.now() video.save(update_fields=["last_metadata_request_at"]) try: metadata = get_video_metadata(video.video_id, compute_language=False) except VideoNotFound: metadata = {} if not metadata: return fields = [ "name", "description", "publication_date", "uploader", "views", "duration", "metadata_timestamp", ] for f in fields: setattr(video, f, metadata[f]) logging.info("Saving metadata for video %s. Duration: %s", video.video_id, video.duration) video.save(update_fields=fields)
def refresh_youtube_metadata(video): """ Fetch and update video metadata from Youtube API. This function is similar to `video.refresh_youtube_metatada(force=True)` but this code needs to be duplicated here, as versioned model used in migrations do not include model custom methods. """ video.last_metadata_request_at = timezone.now() video.save(update_fields=["last_metadata_request_at"]) try: metadata = get_video_metadata(video.video_id, compute_language=False) except VideoNotFound: metadata = {} if not metadata: return fields = [ "name", "description", "publication_date", "uploader", "views", "duration", "metadata_timestamp", ] for f in fields: setattr(video, f, metadata[f]) logging.info("Saving metadata for video %s. Duration: %s", video.video_id, video.duration) video.save(update_fields=fields)
Python
def forward_func(apps, schema_editor): """ Refresh video metadata where duration is missing, due to previous implementation """ Video = apps.get_model("tournesol", "Video") for video in Video.objects.filter(duration__isnull=True).iterator(): refresh_youtube_metadata(video)
def forward_func(apps, schema_editor): """ Refresh video metadata where duration is missing, due to previous implementation """ Video = apps.get_model("tournesol", "Video") for video in Video.objects.filter(duration__isnull=True).iterator(): refresh_youtube_metadata(video)
Python
def extract_grad(model): """returns list of gradients of a model model (float tensor): torch tensor with gradients Returns: (float tensor list): list of gradients of the model """ return [p.grad for p in [model]]
def extract_grad(model): """returns list of gradients of a model model (float tensor): torch tensor with gradients Returns: (float tensor list): list of gradients of the model """ return [p.grad for p in [model]]
Python
def scalar_product(l_grad1, l_grad2): """scalar product of 2 lists of gradients l_grad1 (float tensor list): list of gradients of a model l_grad2 (float tensor list): list of gradients of a model Returns: (float): scalar product of the gradients """ s = 0 for g1, g2 in zip(l_grad1, l_grad2): s += (g1 * g2).sum() return round_loss(s, 4)
def scalar_product(l_grad1, l_grad2): """scalar product of 2 lists of gradients l_grad1 (float tensor list): list of gradients of a model l_grad2 (float tensor list): list of gradients of a model Returns: (float): scalar product of the gradients """ s = 0 for g1, g2 in zip(l_grad1, l_grad2): s += (g1 * g2).sum() return round_loss(s, 4)
Python
def replace_coordinate(tens, score, idx): """Replaces one coordinate of the tensor Args: tens (float tensor): local model score (scalar tensor): score to put in tens idx (int): index of score to replace Returns: (float tensor): same tensor as input but backward pointing to -score """ size = len(tens) left, _, right = torch.split(tens, [idx, 1, size - idx - 1]) return torch.cat([left, score, right])
def replace_coordinate(tens, score, idx): """Replaces one coordinate of the tensor Args: tens (float tensor): local model score (scalar tensor): score to put in tens idx (int): index of score to replace Returns: (float tensor): same tensor as input but backward pointing to -score """ size = len(tens) left, _, right = torch.split(tens, [idx, 1, size - idx - 1]) return torch.cat([left, score, right])
Python
def _global_uncert(values, prior=4, weight=5): """Returns posterior value of median prior(float): value of prior median weight (int): weight of prior values (float list): data to take median of Returns: (float): global uncertainty for one video """ full_values = values + [prior] * weight return median(full_values) / len(values) ** 0.5
def _global_uncert(values, prior=4, weight=5): """Returns posterior value of median prior(float): value of prior median weight (int): weight of prior values (float list): data to take median of Returns: (float): global uncertainty for one video """ full_values = values + [prior] * weight return median(full_values) / len(values) ** 0.5
Python
def _get_hessian_fun_loc(licch, uid, vidx): """Gives loss in function of local model for hessian computation Args: licch (Licchavi()): licchavi object id_node (int): id of user vidx (int): index of video, ie index of parameter Returns: (scalar tensor -> float) function giving loss according to one score """ def get_loss(score): """Used to compute its second derivative to get uncertainty input (float scalar tensor): one score Returns: (float scalar tensor): partial loss for 1 user, 1 video """ new_model = replace_coordinate(licch.nodes[uid].model, score, vidx) licch.nodes[uid].model = new_model fit_loss, _, gen_loss = loss_fit_s_gen(licch, vidx, uid) return fit_loss + gen_loss return get_loss
def _get_hessian_fun_loc(licch, uid, vidx): """Gives loss in function of local model for hessian computation Args: licch (Licchavi()): licchavi object id_node (int): id of user vidx (int): index of video, ie index of parameter Returns: (scalar tensor -> float) function giving loss according to one score """ def get_loss(score): """Used to compute its second derivative to get uncertainty input (float scalar tensor): one score Returns: (float scalar tensor): partial loss for 1 user, 1 video """ new_model = replace_coordinate(licch.nodes[uid].model, score, vidx) licch.nodes[uid].model = new_model fit_loss, _, gen_loss = loss_fit_s_gen(licch, vidx, uid) return fit_loss + gen_loss return get_loss
Python
def _random_signs(epsilon, nb_vids): """Returns a tensor whith binary random coordinates epsilon (float): scores increment before computing gradient nb_vids (int): length of output tensor Returns: (float tensor): coordinates are +/-epsilon randomly """ rand = torch.randint(2, size=(1, nb_vids))[0] - 0.5 return rand * 2 * epsilon
def _random_signs(epsilon, nb_vids): """Returns a tensor whith binary random coordinates epsilon (float): scores increment before computing gradient nb_vids (int): length of output tensor Returns: (float tensor): coordinates are +/-epsilon randomly """ rand = torch.randint(2, size=(1, nb_vids))[0] - 0.5 return rand * 2 * epsilon
Python
def check_equilibrium_glob(epsilon, licch): """Returns proportion of global scores which have converged Args: licch (Licchavi()): licchavi object Returns: (float): fraction of scores at equilibrium """ nbvid = len(licch.vid_vidx) incr = _random_signs(epsilon, nbvid) def _one_side_glob(increment): """increment (float tensor): coordinates are +/- epsilon""" for node in licch.nodes.values(): node.opt.zero_grad(set_to_none=True) # node optimizer licch.opt_gen.zero_grad(set_to_none=True) # general optimizer # adding epsilon to scores with torch.no_grad(): licch.global_model += increment gen_loss, reg_loss = loss_gen_reg(licch) loss = gen_loss + reg_loss loss.backward() derivs = licch.global_model.grad # removing epsilon from scores with torch.no_grad(): licch.global_model -= increment return derivs * increment derivs1 = _one_side_glob(incr) derivs2 = _one_side_glob(-incr) equilibrated = torch.logical_and(derivs1 > 0, derivs2 > 0) frac_glob = torch.count_nonzero(equilibrated) / nbvid return frac_glob.item()
def check_equilibrium_glob(epsilon, licch): """Returns proportion of global scores which have converged Args: licch (Licchavi()): licchavi object Returns: (float): fraction of scores at equilibrium """ nbvid = len(licch.vid_vidx) incr = _random_signs(epsilon, nbvid) def _one_side_glob(increment): """increment (float tensor): coordinates are +/- epsilon""" for node in licch.nodes.values(): node.opt.zero_grad(set_to_none=True) # node optimizer licch.opt_gen.zero_grad(set_to_none=True) # general optimizer # adding epsilon to scores with torch.no_grad(): licch.global_model += increment gen_loss, reg_loss = loss_gen_reg(licch) loss = gen_loss + reg_loss loss.backward() derivs = licch.global_model.grad # removing epsilon from scores with torch.no_grad(): licch.global_model -= increment return derivs * increment derivs1 = _one_side_glob(incr) derivs2 = _one_side_glob(-incr) equilibrated = torch.logical_and(derivs1 > 0, derivs2 > 0) frac_glob = torch.count_nonzero(equilibrated) / nbvid return frac_glob.item()
Python
def check_one(vid, comp_glob, comp_loc): """prints global and local scores for one video""" print("all we have on video: ", vid) for score in comp_glob: if score[0] == vid: print(score) for score in comp_loc: if score[1] == vid: print(score)
def check_one(vid, comp_glob, comp_loc): """prints global and local scores for one video""" print("all we have on video: ", vid) for score in comp_glob: if score[0] == vid: print(score) for score in comp_loc: if score[1] == vid: print(score)
Python
def licch_stats(licch): """gives some statistics about Licchavi object""" print("LICCH_SATS") licch.check() # some tests h = licch.history print("nb_nodes", licch.nb_nodes) licch.stat_s() # print stats on s parameters with torch.no_grad(): gen_s = licch.all_nodes("s") l_s = [s.item() for s in gen_s] plot_density(l_s, "s parameters", PATH_PLOTS, "s_params.png") plot_metrics([h], path=PATH_PLOTS)
def licch_stats(licch): """gives some statistics about Licchavi object""" print("LICCH_SATS") licch.check() # some tests h = licch.history print("nb_nodes", licch.nb_nodes) licch.stat_s() # print stats on s parameters with torch.no_grad(): gen_s = licch.all_nodes("s") l_s = [s.item() for s in gen_s] plot_density(l_s, "s parameters", PATH_PLOTS, "s_params.png") plot_metrics([h], path=PATH_PLOTS)
Python
def scores_stats(glob_scores): """gives statistics on global scores glob_scores: torch tensor of global scores """ print("SCORES_STATS") var = torch.var(glob_scores) mini, maxi = (torch.min(glob_scores).item(), torch.max(glob_scores).item()) print("minimax:", mini, maxi) print("variance of global scores :", var.item()) with torch.no_grad(): plot_density( glob_scores.cpu(), "Global scores", PATH_PLOTS, "scores.png" )
def scores_stats(glob_scores): """gives statistics on global scores glob_scores: torch tensor of global scores """ print("SCORES_STATS") var = torch.var(glob_scores) mini, maxi = (torch.min(glob_scores).item(), torch.max(glob_scores).item()) print("minimax:", mini, maxi) print("variance of global scores :", var.item()) with torch.no_grad(): plot_density( glob_scores.cpu(), "Global scores", PATH_PLOTS, "scores.png" )
Python
def s_stats(licch): """ Prints and plots about s parameters """ if licch.test_mode: s_predicted = [s.detach().item() for s in licch.all_nodes('s')] plot_s_predict_gt(s_predicted, licch.s_gt, PATH_PLOTS)
def s_stats(licch): """ Prints and plots about s parameters """ if licch.test_mode: s_predicted = [s.detach().item() for s in licch.all_nodes('s')] plot_s_predict_gt(s_predicted, licch.s_gt, PATH_PLOTS)
Python
def migrate_forward(apps, schema_editor): """ Create the criteria of the `videos` poll. """ Criteria = apps.get_model("tournesol", "Criteria") CriteriaLocale = apps.get_model("tournesol", "CriteriaLocale") CriteriaRank = apps.get_model("tournesol", "CriteriaRank") Poll = apps.get_model("tournesol", "Poll") default_poll = Poll.objects.get(name="videos") for idx, c_data in enumerate(CRITERIAS): criteria = Criteria.objects.create(name=c_data["name"]) CriteriaLocale.objects.create( criteria=criteria, language="en", label=c_data["label_en"] ) CriteriaLocale.objects.create( criteria=criteria, language="fr", label=c_data["label_fr"] ) CriteriaRank.objects.create( criteria=criteria, poll=default_poll, rank=(len(CRITERIAS) - idx) * 10, optional=c_data.get("optional", True), )
def migrate_forward(apps, schema_editor): """ Create the criteria of the `videos` poll. """ Criteria = apps.get_model("tournesol", "Criteria") CriteriaLocale = apps.get_model("tournesol", "CriteriaLocale") CriteriaRank = apps.get_model("tournesol", "CriteriaRank") Poll = apps.get_model("tournesol", "Poll") default_poll = Poll.objects.get(name="videos") for idx, c_data in enumerate(CRITERIAS): criteria = Criteria.objects.create(name=c_data["name"]) CriteriaLocale.objects.create( criteria=criteria, language="en", label=c_data["label_en"] ) CriteriaLocale.objects.create( criteria=criteria, language="fr", label=c_data["label_fr"] ) CriteriaRank.objects.create( criteria=criteria, poll=default_poll, rank=(len(CRITERIAS) - idx) * 10, optional=c_data.get("optional", True), )
Python
def create_all(): """Create all dynamic fields for all models.""" subclasses = WithDynamicFields.__subclasses__() for scl in subclasses: if not scl.fields_created: scl.fields_created = True scl._create_fields()
def create_all(): """Create all dynamic fields for all models.""" subclasses = WithDynamicFields.__subclasses__() for scl in subclasses: if not scl.fields_created: scl.fields_created = True scl._create_fields()
Python
def filter_reduce(lst, fcn, name="_"): """Reduce a list of filters.""" lst_orig = lst lst = [x for x in lst if x is not None] if not lst: logging.warning( f"{name} query with en empty list of operands, returning None: {lst_orig}" ) return None return reduce(fcn, lst)
def filter_reduce(lst, fcn, name="_"): """Reduce a list of filters.""" lst_orig = lst lst = [x for x in lst if x is not None] if not lst: logging.warning( f"{name} query with en empty list of operands, returning None: {lst_orig}" ) return None return reduce(fcn, lst)
Python
def migrate_forward(apps, schema_editor): """ Set the `uid` and the `type` field of all entities present in the database (supposedly only YT videos are present). """ Entity = apps.get_model("tournesol", "Entity") for entity in Entity.objects.iterator(): entity.uid = "{}:{}".format(ENTITY_UID_YT_NAMESPACE, entity.video_id) entity.type = ENTITY_TYPE_VIDEO entity.save(update_fields=["uid", "type"])
def migrate_forward(apps, schema_editor): """ Set the `uid` and the `type` field of all entities present in the database (supposedly only YT videos are present). """ Entity = apps.get_model("tournesol", "Entity") for entity in Entity.objects.iterator(): entity.uid = "{}:{}".format(ENTITY_UID_YT_NAMESPACE, entity.video_id) entity.type = ENTITY_TYPE_VIDEO entity.save(update_fields=["uid", "type"])
Python
def _show(self, msg, level): """Utility for handling logging messages msg (str): info message level (float): minimum level of verbosity to show -msg """ if self.verb >= level: loginf(msg)
def _show(self, msg, level): """Utility for handling logging messages msg (str): info message level (float): minimum level of verbosity to show -msg """ if self.verb >= level: loginf(msg)
Python
def output_scores(self): """Returns video scores both global and local Returns : - (tensor of all vIDS , tensor of global video scores) - (list of tensor of local vIDs, list of tensors of local video scores) """ loc_scores = [] list_vids_batchs = [] with torch.no_grad(): glob_scores = self.global_model for node in self.nodes.values(): input = one_hot_vids(self.vid_vidx, node.vids, self.device) output = predict(input, node.model) loc_scores.append(output) list_vids_batchs.append(node.vids) vids_batch = list(self.vid_vidx.keys()) return (vids_batch, glob_scores), (list_vids_batchs, loc_scores)
def output_scores(self): """Returns video scores both global and local Returns : - (tensor of all vIDS , tensor of global video scores) - (list of tensor of local vIDs, list of tensors of local video scores) """ loc_scores = [] list_vids_batchs = [] with torch.no_grad(): glob_scores = self.global_model for node in self.nodes.values(): input = one_hot_vids(self.vid_vidx, node.vids, self.device) output = predict(input, node.model) loc_scores.append(output) list_vids_batchs.append(node.vids) vids_batch = list(self.vid_vidx.keys()) return (vids_batch, glob_scores), (list_vids_batchs, loc_scores)
Python
def save_models(self, fullpath): """Saves age and global and local weights, detached (no gradients)""" loginf("Saving models") local_data = { id: (node.s, node.model.detach(), node.age) # s # model # age for id, node in self.nodes.items() } saved_data = ( self.criteria, self.vid_vidx, self.global_model.detach(), local_data, ) torch.save(saved_data, fullpath) loginf("Models saved")
def save_models(self, fullpath): """Saves age and global and local weights, detached (no gradients)""" loginf("Saving models") local_data = { id: (node.s, node.model.detach(), node.age) # s # model # age for id, node in self.nodes.items() } saved_data = ( self.criteria, self.vid_vidx, self.global_model.detach(), local_data, ) torch.save(saved_data, fullpath) loginf("Models saved")
Python
def _update_hist(self, epoch, fit, s, gen, reg): """Updates history (at end of epoch)""" self.history["fit"].append(round_loss(fit)) self.history["s"].append(round_loss(s)) self.history["gen"].append(round_loss(gen)) self.history["reg"].append(round_loss(reg)) norm = model_norm(self.global_model, pow=(2, 0.5)) self.history["l2_norm"].append(round_loss(norm, 3)) grad_gen = extract_grad(self.global_model) if epoch > 1: # no previous model for first epoch scal_grad = scalar_product(self.last_grad, grad_gen) self.history["grad_sp"].append(scal_grad) else: self.history["grad_sp"].append(0) # default value for first epoch self.last_grad = deepcopy(extract_grad(self.global_model)) grad_norm = scalar_product(grad_gen, grad_gen) self.history["grad_norm"].append(grad_norm)
def _update_hist(self, epoch, fit, s, gen, reg): """Updates history (at end of epoch)""" self.history["fit"].append(round_loss(fit)) self.history["s"].append(round_loss(s)) self.history["gen"].append(round_loss(gen)) self.history["reg"].append(round_loss(reg)) norm = model_norm(self.global_model, pow=(2, 0.5)) self.history["l2_norm"].append(round_loss(norm, 3)) grad_gen = extract_grad(self.global_model) if epoch > 1: # no previous model for first epoch scal_grad = scalar_product(self.last_grad, grad_gen) self.history["grad_sp"].append(scal_grad) else: self.history["grad_sp"].append(0) # default value for first epoch self.last_grad = deepcopy(extract_grad(self.global_model)) grad_norm = scalar_product(grad_gen, grad_gen) self.history["grad_norm"].append(grad_norm)
Python
def _do_step(self, fit_step): """Makes step for appropriate optimizer(s)""" if fit_step: # updating local or global alternatively for node in self.nodes.values(): node.opt.step() # node optimizer else: self.opt_gen.step()
def _do_step(self, fit_step): """Makes step for appropriate optimizer(s)""" if fit_step: # updating local or global alternatively for node in self.nodes.values(): node.opt.step() # node optimizer else: self.opt_gen.step()
Python
def check(self): """Performs some tests on internal parameters adequation""" # population check b1 = self.nb_nodes == len(self.nodes) # history check reference = self.history["fit"] b2 = all([len(v) == len(reference) for v in self.history.values()]) if b1 and b2: loginf("No Problem") else: logging.warning("Coherency problem in Licchavi object ")
def check(self): """Performs some tests on internal parameters adequation""" # population check b1 = self.nb_nodes == len(self.nodes) # history check reference = self.history["fit"] b2 = all([len(v) == len(reference) for v in self.history.values()]) if b1 and b2: loginf("No Problem") else: logging.warning("Coherency problem in Licchavi object ")
Python
def forward_func(apps, schema_editor): """ Create missing instances of ContributorRating for all existing Comparisons (including non-trusted users, for which no rating has been computed by ML), in order to store the 'is_public' flag related to every pair (user, video). """ ContributorRating = apps.get_model("tournesol", "ContributorRating") Comparison = apps.get_model("tournesol", "Comparison") user_video_pairs = set( Comparison.objects.all().values_list("user_id", "video_1_id").distinct() ) user_video_pairs.update( Comparison.objects.all().values_list("user_id", "video_2_id").distinct() ) ContributorRating.objects.bulk_create( [ ContributorRating(user_id=user_id, video_id=video_id) for (user_id, video_id) in user_video_pairs ], batch_size=1000, ignore_conflicts=True, )
def forward_func(apps, schema_editor): """ Create missing instances of ContributorRating for all existing Comparisons (including non-trusted users, for which no rating has been computed by ML), in order to store the 'is_public' flag related to every pair (user, video). """ ContributorRating = apps.get_model("tournesol", "ContributorRating") Comparison = apps.get_model("tournesol", "Comparison") user_video_pairs = set( Comparison.objects.all().values_list("user_id", "video_1_id").distinct() ) user_video_pairs.update( Comparison.objects.all().values_list("user_id", "video_2_id").distinct() ) ContributorRating.objects.bulk_create( [ ContributorRating(user_id=user_id, video_id=video_id) for (user_id, video_id) in user_video_pairs ], batch_size=1000, ignore_conflicts=True, )
Python
def predict(input, tens, mask=None): """Predicts score according to a model Args: input (bool 2D tensor): one line is a one-hot encoded video index tens (float tensor): tensor = model mask (bool tensor): one element is bool for using this comparison Returns: (2D float tensor): score of the videos according to the model """ if input.shape[1] == 0: # if empty input return torch.zeros((1, 1)) if mask is not None: return torch.where(mask, torch.matmul(input.float(), tens), torch.zeros(1)) return torch.matmul(input.float(), tens)
def predict(input, tens, mask=None): """Predicts score according to a model Args: input (bool 2D tensor): one line is a one-hot encoded video index tens (float tensor): tensor = model mask (bool tensor): one element is bool for using this comparison Returns: (2D float tensor): score of the videos according to the model """ if input.shape[1] == 0: # if empty input return torch.zeros((1, 1)) if mask is not None: return torch.where(mask, torch.matmul(input.float(), tens), torch.zeros(1)) return torch.matmul(input.float(), tens)
Python
def _bbt_loss(t, r): """Binomial Bradley-Terry loss function (used for test only) Used only for testing Args: t (float tensor): batch of (s * (ya - yb)) r (float tensor): batch of ratings given by user. Returns: (float tensor): sum of empirical losses for all comparisons of one user """ two = torch.tensor(2) losses = torch.log(abs(torch.sinh(t) / t)) + r * t + torch.log(two) return sum(losses)
def _bbt_loss(t, r): """Binomial Bradley-Terry loss function (used for test only) Used only for testing Args: t (float tensor): batch of (s * (ya - yb)) r (float tensor): batch of ratings given by user. Returns: (float tensor): sum of empirical losses for all comparisons of one user """ two = torch.tensor(2) losses = torch.log(abs(torch.sinh(t) / t)) + r * t + torch.log(two) return sum(losses)
Python
def _approx_bbt_loss(t, r): """Approximated Binomial Bradley-Terry loss function (used in Licchavi) Args: t (float tensor): batch of (s * (ya - yb)) r (float tensor): batch of ratings given by user. Returns: (float tensor): sum of empirical losses for all comparisons of one user """ small = abs(t) <= 0.01 medium = torch.logical_and((abs(t) < 10), (abs(t) > 0.01)) big = abs(t) >= 10 zer = torch.zeros(1) loss = 0 loss += torch.where( small, t ** 2 / 6 + r * t + torch.log(torch.tensor(2)), zer ).sum() tt = torch.where(t != 0, t, torch.ones(1)) # trick to avoid zeros so NaNs loss += torch.where(medium, torch.log(2 * torch.sinh(tt) / tt) + r * tt, zer).sum() loss += torch.where(big, abs(tt) - torch.log(abs(tt)) + r * tt, zer).sum() return loss
def _approx_bbt_loss(t, r): """Approximated Binomial Bradley-Terry loss function (used in Licchavi) Args: t (float tensor): batch of (s * (ya - yb)) r (float tensor): batch of ratings given by user. Returns: (float tensor): sum of empirical losses for all comparisons of one user """ small = abs(t) <= 0.01 medium = torch.logical_and((abs(t) < 10), (abs(t) > 0.01)) big = abs(t) >= 10 zer = torch.zeros(1) loss = 0 loss += torch.where( small, t ** 2 / 6 + r * t + torch.log(torch.tensor(2)), zer ).sum() tt = torch.where(t != 0, t, torch.ones(1)) # trick to avoid zeros so NaNs loss += torch.where(medium, torch.log(2 * torch.sinh(tt) / tt) + r * tt, zer).sum() loss += torch.where(big, abs(tt) - torch.log(abs(tt)) + r * tt, zer).sum() return loss
Python
def models_dist(model1, model2, pow=(1, 1), mask=None, vidx=-1): """distance between 2 models (l1 by default) Args: model1 (float tensor): scoring model model2 (float tensor): scoring model pow (float, float): (internal power, external power) mask (bool tensor): subspace in which to compute distance vidx (int): video index if only one is computed (-1 for all) Returns: (scalar float tensor): distance between the 2 models """ q, p = pow if vidx == -1: # if we want several coordinates if mask is None: # if we want all coordinates dist = ((model1 - model2) ** q).abs().sum() ** p else: dist = (((model1 - model2) * mask) ** q).abs().sum() ** p else: # if we want only one coordinate dist = abs(model1[vidx] - model2[vidx]) ** (q * p) return dist
def models_dist(model1, model2, pow=(1, 1), mask=None, vidx=-1): """distance between 2 models (l1 by default) Args: model1 (float tensor): scoring model model2 (float tensor): scoring model pow (float, float): (internal power, external power) mask (bool tensor): subspace in which to compute distance vidx (int): video index if only one is computed (-1 for all) Returns: (scalar float tensor): distance between the 2 models """ q, p = pow if vidx == -1: # if we want several coordinates if mask is None: # if we want all coordinates dist = ((model1 - model2) ** q).abs().sum() ** p else: dist = (((model1 - model2) * mask) ** q).abs().sum() ** p else: # if we want only one coordinate dist = abs(model1[vidx] - model2[vidx]) ** (q * p) return dist
Python
def model_norm(model, pow=(2, 1), vidx=-1): """norm of a model (l2 squared by default) Args: model (float tensor): scoring model pow (float, float): (internal power, external power) vidx (int): video index if only one is computed (-1 for all) Returns: (float scalar tensor): norm of the model """ q, p = pow if vidx != -1: # if we use only one video return abs(model[vidx]) ** (q * p) return (model ** q).abs().sum() ** p
def model_norm(model, pow=(2, 1), vidx=-1): """norm of a model (l2 squared by default) Args: model (float tensor): scoring model pow (float, float): (internal power, external power) vidx (int): video index if only one is computed (-1 for all) Returns: (float scalar tensor): norm of the model """ q, p = pow if vidx != -1: # if we use only one video return abs(model[vidx]) ** (q * p) return (model ** q).abs().sum() ** p
Python
def loss_fit_s_gen(licch, vidx=-1, uid=-1): """Computes local and generalisation terms of loss Args: licch (Licchavi()): licchavi object vidx (int): video index if we are interested in partial loss (-1 for all indexes) uid (int): user ID if we are interested in partial loss (-1 for all users) Returns: (float tensor): sum of local terms of loss (float tensor): generalisation term of loss """ fit_loss, s_loss, gen_loss = 0, 0, 0 if uid != -1: # if we want only one user node = licch.nodes[uid] fit_loss += get_fit_loss( node.model, # local model node.s, # s node.vid1, # id_batch1 node.vid2, # id_batch2 node.r, # r_batch vidx, ) if vidx == -1: # only if all loss is computed s_loss += get_s_loss(node.s) # FIXME not accessed? g = models_dist( node.model, # local model licch.global_model, # general model mask=node.mask, # mask vidx=vidx, # video index if we want partial loss ) gen_loss += node.w * g # node weight * generalisation term else: # if we want all users for node in licch.nodes.values(): fit_loss += get_fit_loss( node.model, # local model node.s, # s node.vid1, # id_batch1 node.vid2, # id_batch2 node.r, # r_batch vidx, ) if vidx == -1: # only if all loss is computed s_loss += get_s_loss(node.s) g = models_dist( node.model, # local model licch.global_model, # general model mask=node.mask, # mask vidx=vidx, # video index if we want partial loss ) gen_loss += node.w * g # node weight * generalisation term return fit_loss, s_loss, gen_loss
def loss_fit_s_gen(licch, vidx=-1, uid=-1): """Computes local and generalisation terms of loss Args: licch (Licchavi()): licchavi object vidx (int): video index if we are interested in partial loss (-1 for all indexes) uid (int): user ID if we are interested in partial loss (-1 for all users) Returns: (float tensor): sum of local terms of loss (float tensor): generalisation term of loss """ fit_loss, s_loss, gen_loss = 0, 0, 0 if uid != -1: # if we want only one user node = licch.nodes[uid] fit_loss += get_fit_loss( node.model, # local model node.s, # s node.vid1, # id_batch1 node.vid2, # id_batch2 node.r, # r_batch vidx, ) if vidx == -1: # only if all loss is computed s_loss += get_s_loss(node.s) # FIXME not accessed? g = models_dist( node.model, # local model licch.global_model, # general model mask=node.mask, # mask vidx=vidx, # video index if we want partial loss ) gen_loss += node.w * g # node weight * generalisation term else: # if we want all users for node in licch.nodes.values(): fit_loss += get_fit_loss( node.model, # local model node.s, # s node.vid1, # id_batch1 node.vid2, # id_batch2 node.r, # r_batch vidx, ) if vidx == -1: # only if all loss is computed s_loss += get_s_loss(node.s) g = models_dist( node.model, # local model licch.global_model, # general model mask=node.mask, # mask vidx=vidx, # video index if we want partial loss ) gen_loss += node.w * g # node weight * generalisation term return fit_loss, s_loss, gen_loss
Python
def loss_gen_reg(licch, vidx=-1): """Computes generalisation and regularisation terms of loss Args: licch (Licchavi()): licchavi object vidx (int): video index if we are interested in partial loss (-1 for all indexes) Returns: (float tensor): generalisation term of loss (float tensor): regularisation loss (of general model) """ gen_loss, reg_loss = 0, 0 for node in licch.nodes.values(): g = models_dist( node.model, # local model licch.global_model, # general model mask=node.mask, # mask vidx=vidx, ) gen_loss += node.w * g # node weight * generalisation term reg_loss = licch.w0 * model_norm(licch.global_model, vidx=vidx) return gen_loss, reg_loss
def loss_gen_reg(licch, vidx=-1): """Computes generalisation and regularisation terms of loss Args: licch (Licchavi()): licchavi object vidx (int): video index if we are interested in partial loss (-1 for all indexes) Returns: (float tensor): generalisation term of loss (float tensor): regularisation loss (of general model) """ gen_loss, reg_loss = 0, 0 for node in licch.nodes.values(): g = models_dist( node.model, # local model licch.global_model, # general model mask=node.mask, # mask vidx=vidx, ) gen_loss += node.w * g # node weight * generalisation term reg_loss = licch.w0 * model_norm(licch.global_model, vidx=vidx) return gen_loss, reg_loss
Python
def round_loss(tens, dec=0): """from an input scalar tensor or int/float returns rounded int/float""" if type(tens) is int or type(tens) is float: return round(tens, dec) else: return round(tens.item(), dec)
def round_loss(tens, dec=0): """from an input scalar tensor or int/float returns rounded int/float""" if type(tens) is int or type(tens) is float: return round(tens, dec) else: return round(tens.item(), dec)
Python
def remove_duplicated_email(apps, schema_editor): """ Find users with duplicated emails, and update email of the accounts associated with the fewer comparisons (to preserve only 1 per original address). Their 'email' value will be replaced with an invalid address derived from the username to guarantee uniqueness in the database. """ User = apps.get_model("core", "User") duplicated_emails = ( User.objects.values("email").alias(count=Count("id")).filter(count__gte=2) ) users_to_update = [ user for user in User.objects.filter(email__in=duplicated_emails).annotate( n_comparisons=Count("comparisons"), rank_comparisons=Window( expression=RowNumber(), partition_by=[F("email")], order_by=F("n_comparisons").desc(), ), ) if user.rank_comparisons > 1 ] for u in users_to_update: new_email = f"{u.username}@invalid" logging.info( 'Updating email for user "%s", from "%s" to "%s"', u.username, u.email, new_email, ) u.email = new_email u.save(update_fields=["email"])
def remove_duplicated_email(apps, schema_editor): """ Find users with duplicated emails, and update email of the accounts associated with the fewer comparisons (to preserve only 1 per original address). Their 'email' value will be replaced with an invalid address derived from the username to guarantee uniqueness in the database. """ User = apps.get_model("core", "User") duplicated_emails = ( User.objects.values("email").alias(count=Count("id")).filter(count__gte=2) ) users_to_update = [ user for user in User.objects.filter(email__in=duplicated_emails).annotate( n_comparisons=Count("comparisons"), rank_comparisons=Window( expression=RowNumber(), partition_by=[F("email")], order_by=F("n_comparisons").desc(), ), ) if user.rank_comparisons > 1 ] for u in users_to_update: new_email = f"{u.username}@invalid" logging.info( 'Updating email for user "%s", from "%s" to "%s"', u.username, u.email, new_email, ) u.email = new_email u.save(update_fields=["email"])
Python
def expand_dic(vid_vidx, l_vid_new): """Expands a dictionnary to include new videos IDs vid_vidx: dictionnary of {video ID: video idx} l_vid_new: int list of video ID Returns: - dictionnary of {video ID: video idx} updated (bigger) """ idx = len(vid_vidx) for vid_new in l_vid_new: if vid_new not in vid_vidx: vid_vidx[vid_new] = idx idx += 1 return vid_vidx
def expand_dic(vid_vidx, l_vid_new): """Expands a dictionnary to include new videos IDs vid_vidx: dictionnary of {video ID: video idx} l_vid_new: int list of video ID Returns: - dictionnary of {video ID: video idx} updated (bigger) """ idx = len(vid_vidx) for vid_new in l_vid_new: if vid_new not in vid_vidx: vid_vidx[vid_new] = idx idx += 1 return vid_vidx
Python
def save_to_pickle(obj, name="pickle"): """save python object to pickle file""" filename = "{}.p".format(name) with open(filename, "wb") as filehandler: pickle.dump(obj, filehandler)
def save_to_pickle(obj, name="pickle"): """save python object to pickle file""" filename = "{}.p".format(name) with open(filename, "wb") as filehandler: pickle.dump(obj, filehandler)
Python
def load_from_pickle(name="pickle"): """load python object from pickle file""" filename = "{}.p".format(name) with open(filename, "rb") as filehandler: obj = pickle.load(filehandler) return obj
def load_from_pickle(name="pickle"): """load python object from pickle file""" filename = "{}.p".format(name) with open(filename, "rb") as filehandler: obj = pickle.load(filehandler) return obj
Python
def comparison_already_exists(self, poll_id, request): """Return True if the comparison already exist, False instead.""" try: comparison = Comparison.get_comparison( request.user, poll_id, "{}{}{}".format( Entity.UID_YT_NAMESPACE, Entity.UID_DELIMITER, request.data["entity_a"]["video_id"], ), "{}{}{}".format( Entity.UID_YT_NAMESPACE, Entity.UID_DELIMITER, request.data["entity_b"]["video_id"], ), ) # if one field is missing, do not raise error yet and let django rest # framework checks the request integrity except KeyError: return False except ObjectDoesNotExist: return False if comparison: return True else: return False
def comparison_already_exists(self, poll_id, request): """Return True if the comparison already exist, False instead.""" try: comparison = Comparison.get_comparison( request.user, poll_id, "{}{}{}".format( Entity.UID_YT_NAMESPACE, Entity.UID_DELIMITER, request.data["entity_a"]["video_id"], ), "{}{}{}".format( Entity.UID_YT_NAMESPACE, Entity.UID_DELIMITER, request.data["entity_b"]["video_id"], ), ) # if one field is missing, do not raise error yet and let django rest # framework checks the request integrity except KeyError: return False except ObjectDoesNotExist: return False if comparison: return True else: return False
Python
def _plot_var(l_hist, l_metrics): ''' add curve of asked indexes of history to the plot ''' epochs = range(1, len(l_hist[0]['fit']) + 1) for metric in l_metrics: vals = np.asarray( [hist[metric] for hist in l_hist] ) vals_m, vals_l, vals_u = _means_bounds(vals) style, color = next(STYLES), next(COLORS) plt.plot( epochs, vals_m, label=METRICS[metric]["lab"], linestyle=style, color=color ) plt.fill_between(epochs, vals_u, vals_l, alpha=INTENS, color=color)
def _plot_var(l_hist, l_metrics): ''' add curve of asked indexes of history to the plot ''' epochs = range(1, len(l_hist[0]['fit']) + 1) for metric in l_metrics: vals = np.asarray( [hist[metric] for hist in l_hist] ) vals_m, vals_l, vals_u = _means_bounds(vals) style, color = next(STYLES), next(COLORS) plt.plot( epochs, vals_m, label=METRICS[metric]["lab"], linestyle=style, color=color ) plt.fill_between(epochs, vals_u, vals_l, alpha=INTENS, color=color)
Python
def _plotfull_var(l_hist, l_metrics, title=None, path=None): ''' plot metrics asked in -l_metrics and save if -path provided ''' _plot_var(l_hist, l_metrics) metric = l_metrics[0] _legendize(METRICS[metric]["ord"]) _title_save(title, path, suff="{}.png".format(METRICS[metric]["f_name"]))
def _plotfull_var(l_hist, l_metrics, title=None, path=None): ''' plot metrics asked in -l_metrics and save if -path provided ''' _plot_var(l_hist, l_metrics) metric = l_metrics[0] _legendize(METRICS[metric]["ord"]) _title_save(title, path, suff="{}.png".format(METRICS[metric]["f_name"]))
Python
def gradsp_var(l_hist, title=None, path=None): ''' plot scalar product of gradients between 2 consecutive epochs from a list of historys ''' _plotfull_var(l_hist, ['grad_sp', 'grad_norm'], title, path)
def gradsp_var(l_hist, title=None, path=None): ''' plot scalar product of gradients between 2 consecutive epochs from a list of historys ''' _plotfull_var(l_hist, ['grad_sp', 'grad_norm'], title, path)
Python
def error_var(l_hist, title=None, path=None): '''Plots difference between predictions and ground truths from a list of historys ''' _plotfull_var(l_hist, ['error_glob', 'error_loc'], title, path)
def error_var(l_hist, title=None, path=None): '''Plots difference between predictions and ground truths from a list of historys ''' _plotfull_var(l_hist, ['error_glob', 'error_loc'], title, path)
Python
def plot_metrics(l_hist, title=None, path=None): """plot and save the different metrics from list of historys""" loss_var(l_hist, title, path) l2_var(l_hist, title, path) gradsp_var(l_hist, title, path) if 'error_glob' in l_hist[0]: # if we are in test mode _plotfull_var(l_hist, ['error_glob'], title, path) _plotfull_var(l_hist, ['error_loc'], title, path)
def plot_metrics(l_hist, title=None, path=None): """plot and save the different metrics from list of historys""" loss_var(l_hist, title, path) l2_var(l_hist, title, path) gradsp_var(l_hist, title, path) if 'error_glob' in l_hist[0]: # if we are in test mode _plotfull_var(l_hist, ['error_glob'], title, path) _plotfull_var(l_hist, ['error_loc'], title, path)
Python
def plot_density(tens, title=None, path=None, name="hist.png"): """Saves histogram of repartition tens (tensor): data of which we want repartition """ arr = np.array(tens) _ = plt.hist(arr, density=False, label=title, bins=40) _legendize("Number", "Value") _title_save(title, path, name)
def plot_density(tens, title=None, path=None, name="hist.png"): """Saves histogram of repartition tens (tensor): data of which we want repartition """ arr = np.array(tens) _ = plt.hist(arr, density=False, label=title, bins=40) _legendize("Number", "Value") _title_save(title, path, name)
Python
def plot_s_predict_gt(s_predict, s_gt, path, name='s_correlation.png'): """ Saves cloud of point of s parameters (test mode only) s_predict (float list): predicted s parameters s_gt (float list): ground truth s parameters """ plt.plot(s_predict, s_gt, 'ro') _legendize('ground truths', 'predicted') _title_save('s parameters', path, name)
def plot_s_predict_gt(s_predict, s_gt, path, name='s_correlation.png'): """ Saves cloud of point of s parameters (test mode only) s_predict (float list): predicted s parameters s_gt (float list): ground truth s parameters """ plt.plot(s_predict, s_gt, 'ro') _legendize('ground truths', 'predicted') _title_save('s parameters', path, name)
Python
def plot_loc_uncerts(l_nb_comps, l_uncerts, path, name='uncertainties.png'): """ Saves local uncertainties in function of number of ratings l_nb_vids (int list): number of comparisons for each video by each user l_uncert (float list): uncertainty for each video of each user """ plt.plot(l_nb_comps, l_uncerts, 'ro') _legendize('uncertainty', 'number of comparisons') _title_save('Local uncertainties', path, name)
def plot_loc_uncerts(l_nb_comps, l_uncerts, path, name='uncertainties.png'): """ Saves local uncertainties in function of number of ratings l_nb_vids (int list): number of comparisons for each video by each user l_uncert (float list): uncertainty for each video of each user """ plt.plot(l_nb_comps, l_uncerts, 'ro') _legendize('uncertainty', 'number of comparisons') _title_save('Local uncertainties', path, name)
Python
def can_include(self): """Whether this module is allowed to include other modules. This is allowed only if the module was compiled from a file since include paths are relative to the file in which they are mentioned. """ return self.path is not None
def can_include(self): """Whether this module is allowed to include other modules. This is allowed only if the module was compiled from a file since include paths are relative to the file in which they are mentioned. """ return self.path is not None
Python
def add_include(self, name, module_spec): """Adds a module as an included module. :param name: Name under which the included module should be exposed in the current module. :param module_spec: ModuleSpec of the included module. """ assert name, 'name is required' assert self.can_include if name in self.includes: raise ThriftCompilerError( 'Cannot include module "%s" as "%s" in "%s". ' 'The name is already taken.' % (module_spec.name, name, self.path) ) self.includes[name] = module_spec self.scope.add_include(name, module_spec.scope, module_spec.surface)
def add_include(self, name, module_spec): """Adds a module as an included module. :param name: Name under which the included module should be exposed in the current module. :param module_spec: ModuleSpec of the included module. """ assert name, 'name is required' assert self.can_include if name in self.includes: raise ThriftCompilerError( 'Cannot include module "%s" as "%s" in "%s". ' 'The name is already taken.' % (module_spec.name, name, self.path) ) self.includes[name] = module_spec self.scope.add_include(name, module_spec.scope, module_spec.surface)
Python
def link(self): """Link all the types in this module and all included modules.""" if self.linked: return self self.linked = True included_modules = [] # Link includes for include in self.includes.values(): included_modules.append(include.link().surface) self.scope.add_surface('__includes__', tuple(included_modules)) self.scope.add_surface('__thrift_source__', self.thrift_source) # Link self for linker in LINKERS: linker(self.scope).link() self.scope.add_surface('loads', Deserializer(self.protocol)) self.scope.add_surface('dumps', Serializer(self.protocol)) return self
def link(self): """Link all the types in this module and all included modules.""" if self.linked: return self self.linked = True included_modules = [] # Link includes for include in self.includes.values(): included_modules.append(include.link().surface) self.scope.add_surface('__includes__', tuple(included_modules)) self.scope.add_surface('__thrift_source__', self.thrift_source) # Link self for linker in LINKERS: linker(self.scope).link() self.scope.add_surface('loads', Deserializer(self.protocol)) self.scope.add_surface('dumps', Serializer(self.protocol)) return self
Python
def sendMessage(self, msg, channel): """ This method posts a message to a public channel, private channel, or direct message/IM channel. """ logger.debug('send \"{}\" to \"{}\"'.format(msg, channel)) return self.client.api_call( "chat.postMessage", channel=channel, text=msg, as_user=True )
def sendMessage(self, msg, channel): """ This method posts a message to a public channel, private channel, or direct message/IM channel. """ logger.debug('send \"{}\" to \"{}\"'.format(msg, channel)) return self.client.api_call( "chat.postMessage", channel=channel, text=msg, as_user=True )