language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def index(cls, document, id_=None, bulk=False, force_insert=False, es=None, public_index=False): """ Overide elasticutils.index() to support more than one index for UserProfile model. """ if bulk and es is None: raise ValueError('bulk is True, but es is None') if es is None: es = get_es() es.index(document, index=cls.get_index(public_index), doc_type=cls.get_mapping_type(), id=id_, bulk=bulk, force_insert=force_insert)
def index(cls, document, id_=None, bulk=False, force_insert=False, es=None, public_index=False): """ Overide elasticutils.index() to support more than one index for UserProfile model. """ if bulk and es is None: raise ValueError('bulk is True, but es is None') if es is None: es = get_es() es.index(document, index=cls.get_index(public_index), doc_type=cls.get_mapping_type(), id=id_, bulk=bulk, force_insert=force_insert)
Python
def clean(self, value): """Groups are saved in lowercase because it's easy and consistent. """ value = super(GroupField, self).clean(value) if not re.match(r'^[a-zA-Z0-9 .:,-]*$', value): raise ValidationError(_(u'Groups can only contain alphanumeric ' 'characters, dashes, spaces.')) values = [g.strip() for g in value.lower().split(',') if g and ',' not in g] groups = [] for g in values: (group, created) = Group.objects.get_or_create(name=g) if not group.system: groups.append(group) return groups
def clean(self, value): """Groups are saved in lowercase because it's easy and consistent. """ value = super(GroupField, self).clean(value) if not re.match(r'^[a-zA-Z0-9 .:,-]*$', value): raise ValidationError(_(u'Groups can only contain alphanumeric ' 'characters, dashes, spaces.')) values = [g.strip() for g in value.lower().split(',') if g and ',' not in g] groups = [] for g in values: (group, created) = Group.objects.get_or_create(name=g) if not group.system: groups.append(group) return groups
Python
def form_valid(self, form): """Custom form validation to support email changing. If user is already authenticated and reaches this points, it's an email changing procedure. Validate that email is good and save it in the database. Otherwise continue with the default django-browserid verification. """ if not self.request.user.is_authenticated(): return super(BrowserIDVerify, self).form_valid(form) failure_url = urlparams(reverse('phonebook:profile_edit'), bid_login_failed=1) self.assertion = form.cleaned_data['assertion'] self.audience = get_audience(self.request) result = verify(self.assertion, self.audience) if not result: messages.error(self.request, _('Authentication failed.')) return redirect(failure_url) email = result['email'] if User.objects.filter(email=email).exists(): messages.error(self.request, _('Email already exists in the database.')) return redirect('phonebook:logout') user = self.request.user user.email = email user.save() return redirect('phonebook:profile_view', user.username)
def form_valid(self, form): """Custom form validation to support email changing. If user is already authenticated and reaches this points, it's an email changing procedure. Validate that email is good and save it in the database. Otherwise continue with the default django-browserid verification. """ if not self.request.user.is_authenticated(): return super(BrowserIDVerify, self).form_valid(form) failure_url = urlparams(reverse('phonebook:profile_edit'), bid_login_failed=1) self.assertion = form.cleaned_data['assertion'] self.audience = get_audience(self.request) result = verify(self.assertion, self.audience) if not result: messages.error(self.request, _('Authentication failed.')) return redirect(failure_url) email = result['email'] if User.objects.filter(email=email).exists(): messages.error(self.request, _('Email already exists in the database.')) return redirect('phonebook:logout') user = self.request.user user.email = email user.save() return redirect('phonebook:profile_view', user.username)
Python
def view_profile(request, username): """View a profile by username.""" data = {} privacy_mappings = {'anonymous': PUBLIC, 'mozillian': MOZILLIANS, 'employee': EMPLOYEES, 'privileged': PRIVILEGED, 'myself': None} privacy_level = None if (request.user.is_authenticated() and request.user.username == username): # own profile view_as = request.GET.get('view_as', 'myself') privacy_level = privacy_mappings.get(view_as, None) profile = UserProfile.objects.privacy_level(privacy_level).get(user__username=username) data['privacy_mode'] = view_as else: userprofile_query = UserProfile.objects.filter(user__username=username) public_profile_exists = userprofile_query.public().exists() profile_exists = userprofile_query.exists() profile_complete = userprofile_query.exclude(full_name='').exists() if not public_profile_exists: if not request.user.is_authenticated(): # you have to be authenticated to continue messages.warning(request, LOGIN_MESSAGE) return (login_required(view_profile, login_url=reverse('phonebook:home')) (request, username)) if not request.user.userprofile.is_vouched: # you have to be vouched to continue messages.error(request, GET_VOUCHED_MESSAGE) return redirect('phonebook:home') if not profile_exists or not profile_complete: raise Http404 profile = UserProfile.objects.get(user__username=username) profile.set_instance_privacy_level(PUBLIC) if request.user.is_authenticated(): profile.set_instance_privacy_level( request.user.userprofile.privacy_level) if (not profile.is_vouched and request.user.is_authenticated() and request.user.userprofile.is_vouched): data['vouch_form'] = ( forms.VouchForm(initial={'vouchee': profile.pk})) data['shown_user'] = profile.user data['profile'] = profile return render(request, 'phonebook/profile.html', data)
def view_profile(request, username): """View a profile by username.""" data = {} privacy_mappings = {'anonymous': PUBLIC, 'mozillian': MOZILLIANS, 'employee': EMPLOYEES, 'privileged': PRIVILEGED, 'myself': None} privacy_level = None if (request.user.is_authenticated() and request.user.username == username): # own profile view_as = request.GET.get('view_as', 'myself') privacy_level = privacy_mappings.get(view_as, None) profile = UserProfile.objects.privacy_level(privacy_level).get(user__username=username) data['privacy_mode'] = view_as else: userprofile_query = UserProfile.objects.filter(user__username=username) public_profile_exists = userprofile_query.public().exists() profile_exists = userprofile_query.exists() profile_complete = userprofile_query.exclude(full_name='').exists() if not public_profile_exists: if not request.user.is_authenticated(): # you have to be authenticated to continue messages.warning(request, LOGIN_MESSAGE) return (login_required(view_profile, login_url=reverse('phonebook:home')) (request, username)) if not request.user.userprofile.is_vouched: # you have to be vouched to continue messages.error(request, GET_VOUCHED_MESSAGE) return redirect('phonebook:home') if not profile_exists or not profile_complete: raise Http404 profile = UserProfile.objects.get(user__username=username) profile.set_instance_privacy_level(PUBLIC) if request.user.is_authenticated(): profile.set_instance_privacy_level( request.user.userprofile.privacy_level) if (not profile.is_vouched and request.user.is_authenticated() and request.user.userprofile.is_vouched): data['vouch_form'] = ( forms.VouchForm(initial={'vouchee': profile.pk})) data['shown_user'] = profile.user data['profile'] = profile return render(request, 'phonebook/profile.html', data)
Python
def logout(request): """Logout view that wraps Django's logout but always redirects. Django's contrib.auth.views logout method renders a template if the `next_page` argument is `None`, which we don't want. This view always returns an HTTP redirect instead. """ return auth.views.logout(request, template_name='phonebook/logout.html')
def logout(request): """Logout view that wraps Django's logout but always redirects. Django's contrib.auth.views logout method renders a template if the `next_page` argument is `None`, which we don't want. This view always returns an HTTP redirect instead. """ return auth.views.logout(request, template_name='phonebook/logout.html')
Python
def register(request): """Registers Users. Pulls out an invite code if it exists and auto validates the user if so. Single-purpose view. """ # TODO already vouched users can be re-vouched? if 'code' in request.GET: request.session['invite-code'] = request.GET['code'] if request.user.is_authenticated(): if not request.user.userprofile.is_vouched: update_invites(request) else: messages.info(request, _("You've been invited to join Mozillians.org! " "Sign in and then you can create a profile.")) return redirect('phonebook:home')
def register(request): """Registers Users. Pulls out an invite code if it exists and auto validates the user if so. Single-purpose view. """ # TODO already vouched users can be re-vouched? if 'code' in request.GET: request.session['invite-code'] = request.GET['code'] if request.user.is_authenticated(): if not request.user.userprofile.is_vouched: update_invites(request) else: messages.info(request, _("You've been invited to join Mozillians.org! " "Sign in and then you can create a profile.")) return redirect('phonebook:home')
Python
def validate_username(username): """Validate username. Import modules here to prevent dependency breaking. """ username = username.lower() UsernameBlacklist = get_model('users', 'UsernameBlacklist') if (UsernameBlacklist. objects.filter(value=username, is_regex=False).exists()): return False for regex_value in UsernameBlacklist.objects.filter(is_regex=True): if re.match(regex_value.value, username): return False return True
def validate_username(username): """Validate username. Import modules here to prevent dependency breaking. """ username = username.lower() UsernameBlacklist = get_model('users', 'UsernameBlacklist') if (UsernameBlacklist. objects.filter(value=username, is_regex=False).exists()): return False for regex_value in UsernameBlacklist.objects.filter(is_regex=True): if re.match(regex_value.value, username): return False return True
Python
def validate_website(url): """Validate and return a properly formatted website url.""" validate_url = URLValidator() if url and '://' not in url: url = u'http://%s' % url try: validate_url(url) except ValidationError: raise ValidationError(_lazy('Enter a valid URL.')) return url
def validate_website(url): """Validate and return a properly formatted website url.""" validate_url = URLValidator() if url and '://' not in url: url = u'http://%s' % url try: validate_url(url) except ValidationError: raise ValidationError(_lazy('Enter a valid URL.')) return url
Python
def encrypt(self, data): """Encrypt data return cipher. data: raw data""" self._new() return self.cipher.encrypt_and_digest(data)
def encrypt(self, data): """Encrypt data return cipher. data: raw data""" self._new() return self.cipher.encrypt_and_digest(data)
Python
def encrypt(self, data): """Encrypt data return cipher. data: raw data""" self._new() return self.cipher.encrypt(data)
def encrypt(self, data): """Encrypt data return cipher. data: raw data""" self._new() return self.cipher.encrypt(data)
Python
async def on_message(event: hikari.GuildMessageCreateEvent) -> None: """Listen for messages being created.""" if not event.is_human or not event.content or not event.content.startswith("!"): # Do not respond to bots, webhooks, or messages without content or without a prefix. return args = event.content[1:].split() if args[0] == "image": if len(args) == 1: # No more args where provided what = "" else: what = args[1] # Since uploading can take some time, we give a visual indicator to the user by typing async with bot.rest.trigger_typing(event.channel_id): await inspect_image(event, what.lstrip())
async def on_message(event: hikari.GuildMessageCreateEvent) -> None: """Listen for messages being created.""" if not event.is_human or not event.content or not event.content.startswith("!"): # Do not respond to bots, webhooks, or messages without content or without a prefix. return args = event.content[1:].split() if args[0] == "image": if len(args) == 1: # No more args where provided what = "" else: what = args[1] # Since uploading can take some time, we give a visual indicator to the user by typing async with bot.rest.trigger_typing(event.channel_id): await inspect_image(event, what.lstrip())
Python
async def inspect_image(event: hikari.GuildMessageCreateEvent, what: str) -> None: """Inspect the image and respond to the user.""" # Show the avatar for the given user ID: if user_match := re.match(r"<@!?(\d+)>", what): user_id = hikari.Snowflake(user_match.group(1)) user = bot.cache.get_user(user_id) or await bot.rest.fetch_user(user_id) await event.message.respond("User avatar", attachment=user.avatar_url or user.default_avatar_url) # Show the guild icon: elif what.casefold() in ("guild", "server", "here", "this"): guild = event.get_guild() if guild is None: await event.message.respond("Guild is missing from the cache :(") return if (icon_url := guild.icon_url) is None: await event.message.respond("This guild doesn't have an icon") else: await event.message.respond("Guild icon", attachment=icon_url) # Show the image for the given emoji if there is some content present: elif what: emoji = hikari.Emoji.parse(what) await event.message.respond(emoji.name, attachment=emoji) # If nothing was given, we should just return the avatar of the person who ran the command: else: await event.message.respond( "Your avatar", attachment=event.author.avatar_url or event.author.default_avatar_url )
async def inspect_image(event: hikari.GuildMessageCreateEvent, what: str) -> None: """Inspect the image and respond to the user.""" # Show the avatar for the given user ID: if user_match := re.match(r"<@!?(\d+)>", what): user_id = hikari.Snowflake(user_match.group(1)) user = bot.cache.get_user(user_id) or await bot.rest.fetch_user(user_id) await event.message.respond("User avatar", attachment=user.avatar_url or user.default_avatar_url) # Show the guild icon: elif what.casefold() in ("guild", "server", "here", "this"): guild = event.get_guild() if guild is None: await event.message.respond("Guild is missing from the cache :(") return if (icon_url := guild.icon_url) is None: await event.message.respond("This guild doesn't have an icon") else: await event.message.respond("Guild icon", attachment=icon_url) # Show the image for the given emoji if there is some content present: elif what: emoji = hikari.Emoji.parse(what) await event.message.respond(emoji.name, attachment=emoji) # If nothing was given, we should just return the avatar of the person who ran the command: else: await event.message.respond( "Your avatar", attachment=event.author.avatar_url or event.author.default_avatar_url )
Python
async def message(event: hikari.GuildMessageCreateEvent) -> None: """Listen for messages being created.""" if not event.is_human or not event.content: return # Command Framework 101 :D if event.content.startswith(PREFIX): if is_command("ping", event.content): await event.message.respond("Pong!") elif is_command("value", event.content): await event.message.respond(f"Current value: {dashboard.value}")
async def message(event: hikari.GuildMessageCreateEvent) -> None: """Listen for messages being created.""" if not event.is_human or not event.content: return # Command Framework 101 :D if event.content.startswith(PREFIX): if is_command("ping", event.content): await event.message.respond("Pong!") elif is_command("value", event.content): await event.message.respond(f"Current value: {dashboard.value}")
Python
def verify_types(session: nox.Session) -> None: """Verify the "type completeness" of types exported by the library using Pyright.""" session.install("-r", "dev-requirements.txt") session.install(".") session.run("python", "-m", "pyright", "--verifytypes", config.MAIN_PACKAGE, "--ignoreexternal")
def verify_types(session: nox.Session) -> None: """Verify the "type completeness" of types exported by the library using Pyright.""" session.install("-r", "dev-requirements.txt") session.install(".") session.run("python", "-m", "pyright", "--verifytypes", config.MAIN_PACKAGE, "--ignoreexternal")
Python
async def register_commands(event: hikari.StartingEvent) -> None: """Register ping and info commands.""" application = await bot.rest.fetch_application() commands = [ bot.rest.slash_command_builder("ping", "Get the bot's latency."), bot.rest.slash_command_builder("info", "Learn something about the bot."), bot.rest.slash_command_builder("ephemeral", "Send a very secret message."), ] await bot.rest.set_application_commands( application=application.id, commands=commands, guild=COMMAND_GUILD_ID, )
async def register_commands(event: hikari.StartingEvent) -> None: """Register ping and info commands.""" application = await bot.rest.fetch_application() commands = [ bot.rest.slash_command_builder("ping", "Get the bot's latency."), bot.rest.slash_command_builder("info", "Learn something about the bot."), bot.rest.slash_command_builder("ephemeral", "Send a very secret message."), ] await bot.rest.set_application_commands( application=application.id, commands=commands, guild=COMMAND_GUILD_ID, )
Python
def build_response(self) -> special_endpoints.InteractionMessageBuilder: """Get a message response builder for use in the REST server flow. !!! note For interactions received over the gateway `ModalInteraction.create_initial_response` should be used to set the interaction response message. Examples -------- ```py async def handle_modal_interaction(interaction: ModalInteraction) -> InteractionMessageBuilder: return ( interaction .build_response() .add_embed(Embed(description="Hi there")) .set_content("Konnichiwa") ) ``` Returns ------- hikari.api.special_endpoints.InteractionMessageBuilder Interaction message response builder object. """ return self.app.rest.interaction_message_builder(base_interactions.ResponseType.MESSAGE_CREATE)
def build_response(self) -> special_endpoints.InteractionMessageBuilder: """Get a message response builder for use in the REST server flow. !!! note For interactions received over the gateway `ModalInteraction.create_initial_response` should be used to set the interaction response message. Examples -------- ```py async def handle_modal_interaction(interaction: ModalInteraction) -> InteractionMessageBuilder: return ( interaction .build_response() .add_embed(Embed(description="Hi there")) .set_content("Konnichiwa") ) ``` Returns ------- hikari.api.special_endpoints.InteractionMessageBuilder Interaction message response builder object. """ return self.app.rest.interaction_message_builder(base_interactions.ResponseType.MESSAGE_CREATE)
Python
def build_deferred_response(self) -> special_endpoints.InteractionDeferredBuilder: """Get a deferred message response builder for use in the REST server flow. !!! note For interactions received over the gateway `ModalInteraction.create_initial_response` should be used to set the interaction response message. !!! note Unlike `hikari.api.special_endpoints.InteractionMessageBuilder`, the result of this call can be returned as is without any modifications being made to it. Returns ------- hikari.api.special_endpoints.InteractionDeferredBuilder Deferred interaction message response builder object. """ return self.app.rest.interaction_deferred_builder(base_interactions.ResponseType.DEFERRED_MESSAGE_CREATE)
def build_deferred_response(self) -> special_endpoints.InteractionDeferredBuilder: """Get a deferred message response builder for use in the REST server flow. !!! note For interactions received over the gateway `ModalInteraction.create_initial_response` should be used to set the interaction response message. !!! note Unlike `hikari.api.special_endpoints.InteractionMessageBuilder`, the result of this call can be returned as is without any modifications being made to it. Returns ------- hikari.api.special_endpoints.InteractionDeferredBuilder Deferred interaction message response builder object. """ return self.app.rest.interaction_deferred_builder(base_interactions.ResponseType.DEFERRED_MESSAGE_CREATE)
Python
def _train_one_epoch(model, train_dataset, batch_size, clip_norm, log_interval, optimizer, cuda, num_workers): """ Train the RNN for one epoch. """ device = torch.device("cuda:0" if cuda else "cpu") model.train() # Create dataloader train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=cuda) criterion = nn.CrossEntropyLoss() log_interval_loss = 0 num_examples = 0 start_time = time.time() for batch_idx, data_tuple in enumerate(train_dataloader): data, targets = data_tuple data = data.to(device) targets = targets.to(device) optimizer.zero_grad() output_distribution = model(data) loss = criterion(output_distribution, targets) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), clip_norm) optimizer.step() # Multiply mean loss by batch size to get total sum loss. log_interval_loss += (loss.detach().item() * data.size(0)) num_examples += len(targets) if batch_idx % log_interval == 0 and batch_idx > 0: cur_loss = log_interval_loss / num_examples elapsed = time.time() - start_time print('| {:5d}/{:5d} batches | lr {:02.4f} | ' 'ms/batch {:5.2f} | loss {:5.2f} | ppl {:8.2f}'.format( batch_idx, len(train_dataloader), optimizer.param_groups[0]['lr'], elapsed * 1000 / log_interval, cur_loss, math.exp(cur_loss))) log_interval_loss = 0 num_examples = 0 start_time = time.time()
def _train_one_epoch(model, train_dataset, batch_size, clip_norm, log_interval, optimizer, cuda, num_workers): """ Train the RNN for one epoch. """ device = torch.device("cuda:0" if cuda else "cpu") model.train() # Create dataloader train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=cuda) criterion = nn.CrossEntropyLoss() log_interval_loss = 0 num_examples = 0 start_time = time.time() for batch_idx, data_tuple in enumerate(train_dataloader): data, targets = data_tuple data = data.to(device) targets = targets.to(device) optimizer.zero_grad() output_distribution = model(data) loss = criterion(output_distribution, targets) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), clip_norm) optimizer.step() # Multiply mean loss by batch size to get total sum loss. log_interval_loss += (loss.detach().item() * data.size(0)) num_examples += len(targets) if batch_idx % log_interval == 0 and batch_idx > 0: cur_loss = log_interval_loss / num_examples elapsed = time.time() - start_time print('| {:5d}/{:5d} batches | lr {:02.4f} | ' 'ms/batch {:5.2f} | loss {:5.2f} | ppl {:8.2f}'.format( batch_idx, len(train_dataloader), optimizer.param_groups[0]['lr'], elapsed * 1000 / log_interval, cur_loss, math.exp(cur_loss))) log_interval_loss = 0 num_examples = 0 start_time = time.time()
Python
def forward(self, inputs, lengths=None): """ Parameters ---------- inputs: LongTensor The input data. Shape is (seq_len, batch_size) if batch_first=False, or (batch_size, seq_len) if batch_first=True. lengths: LongTensor or List[int], optional List of integers with the sequence length for each element in the batch. Returns ------- output_distribution: FloatTensor FloatTensor of shape (batch_size, vocab_size), which is a distribution over the vocabulary for each batch. """ # Shape: (batch_size, seq_len, embedding_size) if batch_first=True # Shape: (batch_size, seq_len, embedding_size) if batch_first=False embedded_seq = self.embedding(inputs) embedded_seq = self.dropout(embedded_seq) if lengths is not None: # Pack the sequence embedded_seq = pack(embedded_seq, lengths, batch_first=self.batch_first) # encoded_seq shape if batch_first=True: (batch_size, seq_len, # hidden_size) # encoded_seq shape if batch_first=False: (seq_len, batch_size, # hidden_size) self.rnn.flatten_parameters() encoded_seq, _ = self.rnn(embedded_seq) if lengths is not None: # Pad the packed sequence encoded_seq, _ = pad(encoded_seq, batch_first=self.batch_first) # Apply dropout. encoded_seq = self.dropout(encoded_seq) # Get the output after encoding the entire sequence if lengths is not None: # Shape: (batch_size, hidden_size) idx = (torch.tensor( lengths, dtype=torch.long, device=encoded_seq.device) - 1).view(-1, 1).expand( len(lengths), encoded_seq.size(2)) time_dimension = 1 if self.batch_first else 0 idx = idx.unsqueeze(time_dimension) last_output = encoded_seq.gather( time_dimension, idx).squeeze(time_dimension) else: last_output = encoded_seq[:, -1] # Run this reshaped RNN output through the decoder to get # output of shape (batch_size, vocab_size) output_distribution = self.decoder(last_output) # Return decoded, a distribution over the output vocabulary for # each sequence in the batch. # Shape: (batch_size, vocab_size) return output_distribution
def forward(self, inputs, lengths=None): """ Parameters ---------- inputs: LongTensor The input data. Shape is (seq_len, batch_size) if batch_first=False, or (batch_size, seq_len) if batch_first=True. lengths: LongTensor or List[int], optional List of integers with the sequence length for each element in the batch. Returns ------- output_distribution: FloatTensor FloatTensor of shape (batch_size, vocab_size), which is a distribution over the vocabulary for each batch. """ # Shape: (batch_size, seq_len, embedding_size) if batch_first=True # Shape: (batch_size, seq_len, embedding_size) if batch_first=False embedded_seq = self.embedding(inputs) embedded_seq = self.dropout(embedded_seq) if lengths is not None: # Pack the sequence embedded_seq = pack(embedded_seq, lengths, batch_first=self.batch_first) # encoded_seq shape if batch_first=True: (batch_size, seq_len, # hidden_size) # encoded_seq shape if batch_first=False: (seq_len, batch_size, # hidden_size) self.rnn.flatten_parameters() encoded_seq, _ = self.rnn(embedded_seq) if lengths is not None: # Pad the packed sequence encoded_seq, _ = pad(encoded_seq, batch_first=self.batch_first) # Apply dropout. encoded_seq = self.dropout(encoded_seq) # Get the output after encoding the entire sequence if lengths is not None: # Shape: (batch_size, hidden_size) idx = (torch.tensor( lengths, dtype=torch.long, device=encoded_seq.device) - 1).view(-1, 1).expand( len(lengths), encoded_seq.size(2)) time_dimension = 1 if self.batch_first else 0 idx = idx.unsqueeze(time_dimension) last_output = encoded_seq.gather( time_dimension, idx).squeeze(time_dimension) else: last_output = encoded_seq[:, -1] # Run this reshaped RNN output through the decoder to get # output of shape (batch_size, vocab_size) output_distribution = self.decoder(last_output) # Return decoded, a distribution over the output vocabulary for # each sequence in the batch. # Shape: (batch_size, vocab_size) return output_distribution
Python
def sort_batch_by_length(tensor, sequence_lengths): """ This function is mostly taken from AllenNLP, but updated for PyTorch 0.4.0. Sort a batch first tensor by some specified lengths. Parameters ---------- tensor : torch.FloatTensor, required. A batch first Pytorch tensor. sequence_lengths : torch.LongTensor, required. A tensor representing the lengths of some dimension of the tensor which we want to sort by. Returns ------- sorted_tensor : torch.FloatTensor The original tensor sorted along the batch dimension with respect to sequence_lengths. sorted_sequence_lengths : torch.LongTensor The original sequence_lengths sorted by decreasing size. restoration_indices : torch.LongTensor Indices into the sorted_tensor such that ``sorted_tensor.index_select(0, restoration_indices) == original_tensor`` permuation_index : torch.LongTensor The indices used to sort the tensor. This is useful if you want to sort many tensors using the same ordering. """ sorted_sequence_lengths, permutation_index = sequence_lengths.sort( 0, descending=True) sorted_tensor = tensor.index_select(0, permutation_index) # This is ugly, but required - we are creating a new variable at runtime, # so we must ensure it has the correct CUDA vs non-CUDA type. We do this # by cloning and refilling one of the inputs to the function. index_range = sequence_lengths.data.clone().copy_(torch.arange( 0, len(sequence_lengths))) # This is the equivalent of zipping with index, sorting by the original # sequence lengths and returning the now sorted indices. index_range = index_range.long() _, reverse_mapping = permutation_index.sort(0, descending=False) restoration_indices = index_range.index_select(0, reverse_mapping) return (sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index)
def sort_batch_by_length(tensor, sequence_lengths): """ This function is mostly taken from AllenNLP, but updated for PyTorch 0.4.0. Sort a batch first tensor by some specified lengths. Parameters ---------- tensor : torch.FloatTensor, required. A batch first Pytorch tensor. sequence_lengths : torch.LongTensor, required. A tensor representing the lengths of some dimension of the tensor which we want to sort by. Returns ------- sorted_tensor : torch.FloatTensor The original tensor sorted along the batch dimension with respect to sequence_lengths. sorted_sequence_lengths : torch.LongTensor The original sequence_lengths sorted by decreasing size. restoration_indices : torch.LongTensor Indices into the sorted_tensor such that ``sorted_tensor.index_select(0, restoration_indices) == original_tensor`` permuation_index : torch.LongTensor The indices used to sort the tensor. This is useful if you want to sort many tensors using the same ordering. """ sorted_sequence_lengths, permutation_index = sequence_lengths.sort( 0, descending=True) sorted_tensor = tensor.index_select(0, permutation_index) # This is ugly, but required - we are creating a new variable at runtime, # so we must ensure it has the correct CUDA vs non-CUDA type. We do this # by cloning and refilling one of the inputs to the function. index_range = sequence_lengths.data.clone().copy_(torch.arange( 0, len(sequence_lengths))) # This is the equivalent of zipping with index, sorting by the original # sequence lengths and returning the now sorted indices. index_range = index_range.long() _, reverse_mapping = permutation_index.sort(0, descending=False) restoration_indices = index_range.index_select(0, reverse_mapping) return (sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index)
Python
def forward(self, scores): """ Expect a score matrix with scores of the positive pairs are on the diagonal """ caption_loss = self.hc_loss(scores) image_loss = self.hc_loss(scores.t()) image_caption_loss = caption_loss + image_loss return image_caption_loss
def forward(self, scores): """ Expect a score matrix with scores of the positive pairs are on the diagonal """ caption_loss = self.hc_loss(scores) image_loss = self.hc_loss(scores.t()) image_caption_loss = caption_loss + image_loss return image_caption_loss
Python
def forward(self, scores): """ Expect a score matrix with scores of the positive pairs are on the diagonal """ caption_rank = self.sorter(scores) image_rank = self.sorter(scores.t()) image_caption_loss = torch.sum(caption_rank.diag()) + torch.sum(image_rank.diag()) return image_caption_loss
def forward(self, scores): """ Expect a score matrix with scores of the positive pairs are on the diagonal """ caption_rank = self.sorter(scores) image_rank = self.sorter(scores.t()) image_caption_loss = torch.sum(caption_rank.diag()) + torch.sum(image_rank.diag()) return image_caption_loss
Python
def create_user(first_name, last_name, username, password): ''' Function to create a new user ''' new_user = User( username, password) return new_user
def create_user(first_name, last_name, username, password): ''' Function to create a new user ''' new_user = User( username, password) return new_user
Python
def check_existing_user(username): ''' Function that check if a contact exists with that number and return a Boolean ''' return User.user_exists(username)
def check_existing_user(username): ''' Function that check if a contact exists with that number and return a Boolean ''' return User.user_exists(username)
Python
def create_new_credential(account_site, account_username, account_password): ''' function to create a new account ''' new_credential = Credential(account_site, account_username, account_password) return new_credential
def create_new_credential(account_site, account_username, account_password): ''' function to create a new account ''' new_credential = Credential(account_site, account_username, account_password) return new_credential
Python
def generate_password(password_length): # generate password """ generate a random password for the user """ return Credential.generate_password(password_length)
def generate_password(password_length): # generate password """ generate a random password for the user """ return Credential.generate_password(password_length)
Python
def find_by_account_site(cls, account_site): ''' method that takes in a account_site and returns credentials ''' for credential in cls.user_credentials: if credential.account_site == account_site: return credential return False
def find_by_account_site(cls, account_site): ''' method that takes in a account_site and returns credentials ''' for credential in cls.user_credentials: if credential.account_site == account_site: return credential return False
Python
def is_expired(self): """ Test if the broken as expired (based on the 'expires_at' field) :rtype: boolean :returns: True if the token has expired, False otherwise """ expired = datetime.datetime.today() >= self.token["expires_at"] return expired
def is_expired(self): """ Test if the broken as expired (based on the 'expires_at' field) :rtype: boolean :returns: True if the token has expired, False otherwise """ expired = datetime.datetime.today() >= self.token["expires_at"] return expired
Python
def dt_to_str_iso(dt): """ Convert a datetime.datetime object to a string respecting the ISO-8601 format Will raise ValueError if type not appropriate :param dt: The datetime object to convert :type dt: datetime.datetime :returns: ISO 8601 formatted string :rtype: str """ iso_format = "%Y-%m-%dT%H:%M:%SZ" if isinstance(dt, datetime.datetime): s = dt.strftime(iso_format) return s else: raise ValueError("Arg 'dt' should be of class 'datetime.datetime'")
def dt_to_str_iso(dt): """ Convert a datetime.datetime object to a string respecting the ISO-8601 format Will raise ValueError if type not appropriate :param dt: The datetime object to convert :type dt: datetime.datetime :returns: ISO 8601 formatted string :rtype: str """ iso_format = "%Y-%m-%dT%H:%M:%SZ" if isinstance(dt, datetime.datetime): s = dt.strftime(iso_format) return s else: raise ValueError("Arg 'dt' should be of class 'datetime.datetime'")
Python
def no_html_finder(url, max_width=None): """ A finder which returns everything but HTML """ embed = self.dummy_finder(url, max_width) embed['html'] = None return embed
def no_html_finder(url, max_width=None): """ A finder which returns everything but HTML """ embed = self.dummy_finder(url, max_width) embed['html'] = None return embed
Python
def check_panels_in_model(cls, context='model'): """Check panels configuration uses `panels` when `edit_handler` not in use.""" from wagtail.core.models import Page from wagtail.admin.edit_handlers import InlinePanel errors = [] if hasattr(cls, 'get_edit_handler'): # must check the InlinePanel related models edit_handler = cls.get_edit_handler() for tab in edit_handler.children: inline_panel_children = [ panel for panel in tab.children if isinstance(panel, InlinePanel)] for inline_panel_child in inline_panel_children: errors.extend(check_panels_in_model( inline_panel_child.db_field.related_model, context='InlinePanel model', )) if issubclass(cls, Page) or hasattr(cls, 'edit_handler'): # Pages do not need to be checked for standalone tabbed_panel usage # if edit_handler is used on any model, assume config is correct return errors tabbed_panels = [ 'content_panels', 'promote_panels', 'settings_panels', ] for panel_name in tabbed_panels: class_name = cls.__name__ if not hasattr(cls, panel_name): continue panel_name_short = panel_name.replace('_panels', '').title() error_title = "{}.{} will have no effect on {} editing".format( class_name, panel_name, context) if 'InlinePanel' in context: error_hint = """Ensure that {} uses `panels` instead of `{}`. There are no tabs on non-Page model editing within InlinePanels.""".format( class_name, panel_name) else: error_hint = """Ensure that {} uses `panels` instead of `{}`\ or set up an `edit_handler` if you want a tabbed editing interface. There are no default tabs on non-Page models so there will be no \ {} tab for the {} to render in.""".format( class_name, panel_name, panel_name_short, panel_name) error = Warning( error_title, hint=error_hint, obj=cls, id='wagtailadmin.W002' ) errors.append(error) return errors
def check_panels_in_model(cls, context='model'): """Check panels configuration uses `panels` when `edit_handler` not in use.""" from wagtail.core.models import Page from wagtail.admin.edit_handlers import InlinePanel errors = [] if hasattr(cls, 'get_edit_handler'): # must check the InlinePanel related models edit_handler = cls.get_edit_handler() for tab in edit_handler.children: inline_panel_children = [ panel for panel in tab.children if isinstance(panel, InlinePanel)] for inline_panel_child in inline_panel_children: errors.extend(check_panels_in_model( inline_panel_child.db_field.related_model, context='InlinePanel model', )) if issubclass(cls, Page) or hasattr(cls, 'edit_handler'): # Pages do not need to be checked for standalone tabbed_panel usage # if edit_handler is used on any model, assume config is correct return errors tabbed_panels = [ 'content_panels', 'promote_panels', 'settings_panels', ] for panel_name in tabbed_panels: class_name = cls.__name__ if not hasattr(cls, panel_name): continue panel_name_short = panel_name.replace('_panels', '').title() error_title = "{}.{} will have no effect on {} editing".format( class_name, panel_name, context) if 'InlinePanel' in context: error_hint = """Ensure that {} uses `panels` instead of `{}`. There are no tabs on non-Page model editing within InlinePanels.""".format( class_name, panel_name) else: error_hint = """Ensure that {} uses `panels` instead of `{}`\ or set up an `edit_handler` if you want a tabbed editing interface. There are no default tabs on non-Page models so there will be no \ {} tab for the {} to render in.""".format( class_name, panel_name, panel_name_short, panel_name) error = Warning( error_title, hint=error_hint, obj=cls, id='wagtailadmin.W002' ) errors.append(error) return errors
Python
def replace_custom_with_default(filename): """Replace the custom file with the saved default file. To be used when a module is not found or empty""" with open(path_to_jup/"custom"/filename, "w") as dst: with open(path_to_jup/"default"/filename , "r") as src: dst.write(src.read())
def replace_custom_with_default(filename): """Replace the custom file with the saved default file. To be used when a module is not found or empty""" with open(path_to_jup/"custom"/filename, "w") as dst: with open(path_to_jup/"default"/filename , "r") as src: dst.write(src.read())
Python
def _prove_one(self, tree, task): """Searches for a proof via BFS. Args: tree: Search tree with a single goal node to be proved. task: ProverTask to be performed. Returns: None on success and error message on failure. """ root = tree.nodes[0] nodes_explored = 0 # Note that adding new node to the tree might re-enable previous nodes # for tactic applications, if they were marked to be ignored by # failing sibling nodes. tree.cur_index = 0 while not self.timed_out() and not root.closed and not root.failed and ( nodes_explored < self.options.max_explored_nodes): if tree.cur_index >= len(tree.nodes): return 'BFS: All nodes are failed or ignored.' node = tree.nodes[tree.cur_index] tree.cur_index += 1 if node.ignore or node.failed or node.closed or node.processed: continue nodes_explored += 1 # Note that the following function might change tree.cur_index # (if a node that was ignored suddenly becomes subgoal of a new # tactic application). prover_util.try_tactics(node, self.options.max_top_suggestions, self.options.min_successful_branches, self.options.max_successful_branches, task.premise_set, self.action_gen, self.prover_options.tactic_timeout_ms) root_status = ' '.join([ p[0] for p in [('closed', root.closed), ('failed', root.failed)] if p[1] ]) tf.logging.info('Timeout: %s root status: %s explored: %d', str(self.timed_out()), root_status, nodes_explored) if self.timed_out(): return 'BFS: Timeout.' elif root.failed: return 'BFS: Root Failed.' elif nodes_explored >= self.options.max_explored_nodes and not root.closed: return 'BFS: Node limit reached.'
def _prove_one(self, tree, task): """Searches for a proof via BFS. Args: tree: Search tree with a single goal node to be proved. task: ProverTask to be performed. Returns: None on success and error message on failure. """ root = tree.nodes[0] nodes_explored = 0 # Note that adding new node to the tree might re-enable previous nodes # for tactic applications, if they were marked to be ignored by # failing sibling nodes. tree.cur_index = 0 while not self.timed_out() and not root.closed and not root.failed and ( nodes_explored < self.options.max_explored_nodes): if tree.cur_index >= len(tree.nodes): return 'BFS: All nodes are failed or ignored.' node = tree.nodes[tree.cur_index] tree.cur_index += 1 if node.ignore or node.failed or node.closed or node.processed: continue nodes_explored += 1 # Note that the following function might change tree.cur_index # (if a node that was ignored suddenly becomes subgoal of a new # tactic application). prover_util.try_tactics(node, self.options.max_top_suggestions, self.options.min_successful_branches, self.options.max_successful_branches, task.premise_set, self.action_gen, self.prover_options.tactic_timeout_ms) root_status = ' '.join([ p[0] for p in [('closed', root.closed), ('failed', root.failed)] if p[1] ]) tf.logging.info('Timeout: %s root status: %s explored: %d', str(self.timed_out()), root_status, nodes_explored) if self.timed_out(): return 'BFS: Timeout.' elif root.failed: return 'BFS: Root Failed.' elif nodes_explored >= self.options.max_explored_nodes and not root.closed: return 'BFS: Node limit reached.'
Python
def rldecode(data): r""" RunLength decoder (Adobe version) implementation based on PDF Reference version 1.4 section 3.3.4: The RunLengthDecode filter decodes data that has been encoded in a simple byte-oriented format based on run length. The encoded data is a sequence of runs, where each run consists of a length byte followed by 1 to 128 bytes of data. If the length byte is in the range 0 to 127, the following length + 1 (1 to 128) bytes are copied literally during decompression. If length is in the range 129 to 255, the following single byte is to be copied 257 - length (2 to 128) times during decompression. A length value of 128 denotes EOD. >>> s = b'\x05123456\xfa7\x04abcde\x80junk' >>> rldecode(s) b'1234567777777abcde' """ decoded = b'' i = 0 while i < len(data): #print('data[%d]=:%d:' % (i,ord(data[i]))) length = data[i] if length == 128: break if length >= 0 and length < 128: run = data[i+1:(i+1)+(length+1)] #print('length=%d, run=%s' % (length+1,run)) decoded += run i = (i+1) + (length+1) if length > 128: run = data[i+1:i+2]*(257-length) #print('length=%d, run=%s' % (257-length,run)) decoded += run i = (i+1) + 1 return decoded
def rldecode(data): r""" RunLength decoder (Adobe version) implementation based on PDF Reference version 1.4 section 3.3.4: The RunLengthDecode filter decodes data that has been encoded in a simple byte-oriented format based on run length. The encoded data is a sequence of runs, where each run consists of a length byte followed by 1 to 128 bytes of data. If the length byte is in the range 0 to 127, the following length + 1 (1 to 128) bytes are copied literally during decompression. If length is in the range 129 to 255, the following single byte is to be copied 257 - length (2 to 128) times during decompression. A length value of 128 denotes EOD. >>> s = b'\x05123456\xfa7\x04abcde\x80junk' >>> rldecode(s) b'1234567777777abcde' """ decoded = b'' i = 0 while i < len(data): #print('data[%d]=:%d:' % (i,ord(data[i]))) length = data[i] if length == 128: break if length >= 0 and length < 128: run = data[i+1:(i+1)+(length+1)] #print('length=%d, run=%s' % (length+1,run)) decoded += run i = (i+1) + (length+1) if length > 128: run = data[i+1:i+2]*(257-length) #print('length=%d, run=%s' % (257-length,run)) decoded += run i = (i+1) + 1 return decoded
Python
def seek(self, pos): """Seeks the parser to the given position. """ if self.debug: logging.debug('seek: %r' % pos) self.fp.seek(pos) # reset the status for nextline() self.bufpos = pos self.buf = b'' self.charpos = 0 # reset the status for nexttoken() self._parse1 = self._parse_main self._curtoken = b'' self._curtokenpos = 0 self._tokens = [] return
def seek(self, pos): """Seeks the parser to the given position. """ if self.debug: logging.debug('seek: %r' % pos) self.fp.seek(pos) # reset the status for nextline() self.bufpos = pos self.buf = b'' self.charpos = 0 # reset the status for nexttoken() self._parse1 = self._parse_main self._curtoken = b'' self._curtokenpos = 0 self._tokens = [] return
Python
def revreadlines(self): """Fetches a next line backward. This is used to locate the trailers at the end of a file. """ self.fp.seek(0, 2) pos = self.fp.tell() buf = b'' while 0 < pos: prevpos = pos pos = max(0, pos-self.BUFSIZ) self.fp.seek(pos) s = self.fp.read(prevpos-pos) if not s: break while 1: n = max(s.rfind(b'\r'), s.rfind(b'\n')) if n == -1: buf = s + buf break yield s[n:]+buf s = s[:n] buf = b'' return
def revreadlines(self): """Fetches a next line backward. This is used to locate the trailers at the end of a file. """ self.fp.seek(0, 2) pos = self.fp.tell() buf = b'' while 0 < pos: prevpos = pos pos = max(0, pos-self.BUFSIZ) self.fp.seek(pos) s = self.fp.read(prevpos-pos) if not s: break while 1: n = max(s.rfind(b'\r'), s.rfind(b'\n')) if n == -1: buf = s + buf break yield s[n:]+buf s = s[:n] buf = b'' return
Python
def nextobject(self): """Yields a list of objects. Returns keywords, literals, strings, numbers, arrays and dictionaries. Arrays and dictionaries are represented as Python lists and dictionaries. """ while not self.results: (pos, token) = self.nexttoken() #print((pos,token), (self.curtype, self.curstack)) if isinstance(token, (int, float, bool, bytes, PSLiteral)): # normal token self.push((pos, token)) elif token == KEYWORD_ARRAY_BEGIN: # begin array self.start_type(pos, 'a') elif token == KEYWORD_ARRAY_END: # end array try: self.push(self.end_type('a')) except PSTypeError: if STRICT: raise elif token == KEYWORD_DICT_BEGIN: # begin dictionary self.start_type(pos, 'd') elif token == KEYWORD_DICT_END: # end dictionary try: (pos, objs) = self.end_type('d') if len(objs) % 2 != 0: raise PSSyntaxError('Invalid dictionary construct: %r' % (objs,)) # construct a Python dictionary. d = dict((literal_name(k), v) for (k, v) in choplist(2, objs) if v is not None) self.push((pos, d)) except PSTypeError: if STRICT: raise elif token == KEYWORD_PROC_BEGIN: # begin proc self.start_type(pos, 'p') elif token == KEYWORD_PROC_END: # end proc try: self.push(self.end_type('p')) except PSTypeError: if STRICT: raise else: if self.debug: logging.debug('do_keyword: pos=%r, token=%r, stack=%r' % \ (pos, token, self.curstack)) self.do_keyword(pos, token) if self.context: continue else: self.flush() obj = self.results.pop(0) if self.debug: logging.debug('nextobject: %r' % (obj,)) return obj
def nextobject(self): """Yields a list of objects. Returns keywords, literals, strings, numbers, arrays and dictionaries. Arrays and dictionaries are represented as Python lists and dictionaries. """ while not self.results: (pos, token) = self.nexttoken() #print((pos,token), (self.curtype, self.curstack)) if isinstance(token, (int, float, bool, bytes, PSLiteral)): # normal token self.push((pos, token)) elif token == KEYWORD_ARRAY_BEGIN: # begin array self.start_type(pos, 'a') elif token == KEYWORD_ARRAY_END: # end array try: self.push(self.end_type('a')) except PSTypeError: if STRICT: raise elif token == KEYWORD_DICT_BEGIN: # begin dictionary self.start_type(pos, 'd') elif token == KEYWORD_DICT_END: # end dictionary try: (pos, objs) = self.end_type('d') if len(objs) % 2 != 0: raise PSSyntaxError('Invalid dictionary construct: %r' % (objs,)) # construct a Python dictionary. d = dict((literal_name(k), v) for (k, v) in choplist(2, objs) if v is not None) self.push((pos, d)) except PSTypeError: if STRICT: raise elif token == KEYWORD_PROC_BEGIN: # begin proc self.start_type(pos, 'p') elif token == KEYWORD_PROC_END: # end proc try: self.push(self.end_type('p')) except PSTypeError: if STRICT: raise else: if self.debug: logging.debug('do_keyword: pos=%r, token=%r, stack=%r' % \ (pos, token, self.curstack)) self.do_keyword(pos, token) if self.context: continue else: self.flush() obj = self.results.pop(0) if self.debug: logging.debug('nextobject: %r' % (obj,)) return obj
Python
def _fetch_recipients( list_name, include_wafer_users, include_google_sheet, ticket_holders_only ): """returns an array of email addresses""" users = [] if include_google_sheet: logging.info("fetching sheet users") users.extend([d[EMAIL] for d in fetch_sheet(url=include_google_sheet)]) if include_wafer_users: logging.info("fetching wafer users") users.extend(wafer_utils.fetch_all_users_and_ticket_emails(ticket_holders_only)) known_preferences = _fetch_email_preferences() final_users = [] for user in users: if user in known_preferences: if list_name in known_preferences[user]: final_users.append(user) else: logging.info(f"removing user: {user}") else: final_users.append(user) return set(final_users)
def _fetch_recipients( list_name, include_wafer_users, include_google_sheet, ticket_holders_only ): """returns an array of email addresses""" users = [] if include_google_sheet: logging.info("fetching sheet users") users.extend([d[EMAIL] for d in fetch_sheet(url=include_google_sheet)]) if include_wafer_users: logging.info("fetching wafer users") users.extend(wafer_utils.fetch_all_users_and_ticket_emails(ticket_holders_only)) known_preferences = _fetch_email_preferences() final_users = [] for user in users: if user in known_preferences: if list_name in known_preferences[user]: final_users.append(user) else: logging.info(f"removing user: {user}") else: final_users.append(user) return set(final_users)
Python
def send_emails( template_name, list_name, subject, from_email="[email protected]", dry_run_render_to_file_path=None, dry_run_send_to_email_address=None, dry_run_fetch_recipients=True, dry_run_dont_send=False, start_at_recipient_number=1, include_wafer_users=False, include_google_sheet=None, ticket_holders_only=False, **template_kwargs, ): """ Example usage: python send_emails.py --template_name "base.html" --list_name="boo" --subject="yay" --message="this is a test" --dry_run_render_to_file_path="gitignore/rendered_base.html" python send_emails.py --template_name "base.html" --list_name="boo" --subject="yay" --message="this is a test" --dry_run_send_to_email_address="[email protected]" --from_email="[email protected]" RECIPIENTS - list_name: this allows us to let people be on multiple email lists. eg "Community and event news","Sponsorship packages". The name passed in here should match something in the preferences sheet EXACTLY - preference_sheet_url. This can be a parameter or environmental variable. see settings.py - start_at_recipient_number: this allows us to pick up where we left off. Eg let's say we are sending 5 emails and the first 3 work then the power dies. We want to start the process again from number 4 DRY RUNS - dry_run_render_to_file_path: this doesn't send an email, it rather just renders the email and sticks it in a file. NOTE: The logo wont render properly except as an email - dry_run_send_to_email_address: if this is set then the whole script will run EXCEPT emails wont actually get sent to the recipients. A single email will get sent to the dry_run_email_address - dry_run_fetch_recipients: if we are doing a dry run then making this true will tell the script to fetch all the recipients """ template_kwargs["title"] = template_kwargs.get("title", subject) if dry_run_fetch_recipients: logging.info("FETCHING RECIPIENTS...") recipients = _fetch_recipients( list_name=list_name, include_wafer_users=include_wafer_users, include_google_sheet=include_google_sheet, ticket_holders_only=ticket_holders_only, ) logging.info(f"FETCHED {len(recipients)} recipients") else: logging.info("FETCHING RECIPIENTS SKIPPED") if dry_run_send_to_email_address: logging.info("...dry run: Overriding recipient list") recipients = [dry_run_send_to_email_address] elif dry_run_render_to_file_path: recipients = ["[email protected]"] total = len(recipients) for n, recipient in enumerate(recipients): number = n + 1 if number < start_at_recipient_number: # skip if we need to continue logging.info(f"sending to recipient {number}/{total}: {recipient}") html = utils.render_template( name=template_name, template_kwargs=template_kwargs ) if dry_run_render_to_file_path: logging.info("...dry run: rendering to file") with open(Path(dry_run_render_to_file_path), "w") as f: f.write(html) return if dry_run_dont_send: logging.info("...dry run: Not sent") else: # actually send the email utils.send_html_email( to_email=recipient, from_email=from_email, html=html, subject=subject, ) logging.info("...sent") time.sleep( 1 )
def send_emails( template_name, list_name, subject, from_email="[email protected]", dry_run_render_to_file_path=None, dry_run_send_to_email_address=None, dry_run_fetch_recipients=True, dry_run_dont_send=False, start_at_recipient_number=1, include_wafer_users=False, include_google_sheet=None, ticket_holders_only=False, **template_kwargs, ): """ Example usage: python send_emails.py --template_name "base.html" --list_name="boo" --subject="yay" --message="this is a test" --dry_run_render_to_file_path="gitignore/rendered_base.html" python send_emails.py --template_name "base.html" --list_name="boo" --subject="yay" --message="this is a test" --dry_run_send_to_email_address="[email protected]" --from_email="[email protected]" RECIPIENTS - list_name: this allows us to let people be on multiple email lists. eg "Community and event news","Sponsorship packages". The name passed in here should match something in the preferences sheet EXACTLY - preference_sheet_url. This can be a parameter or environmental variable. see settings.py - start_at_recipient_number: this allows us to pick up where we left off. Eg let's say we are sending 5 emails and the first 3 work then the power dies. We want to start the process again from number 4 DRY RUNS - dry_run_render_to_file_path: this doesn't send an email, it rather just renders the email and sticks it in a file. NOTE: The logo wont render properly except as an email - dry_run_send_to_email_address: if this is set then the whole script will run EXCEPT emails wont actually get sent to the recipients. A single email will get sent to the dry_run_email_address - dry_run_fetch_recipients: if we are doing a dry run then making this true will tell the script to fetch all the recipients """ template_kwargs["title"] = template_kwargs.get("title", subject) if dry_run_fetch_recipients: logging.info("FETCHING RECIPIENTS...") recipients = _fetch_recipients( list_name=list_name, include_wafer_users=include_wafer_users, include_google_sheet=include_google_sheet, ticket_holders_only=ticket_holders_only, ) logging.info(f"FETCHED {len(recipients)} recipients") else: logging.info("FETCHING RECIPIENTS SKIPPED") if dry_run_send_to_email_address: logging.info("...dry run: Overriding recipient list") recipients = [dry_run_send_to_email_address] elif dry_run_render_to_file_path: recipients = ["[email protected]"] total = len(recipients) for n, recipient in enumerate(recipients): number = n + 1 if number < start_at_recipient_number: # skip if we need to continue logging.info(f"sending to recipient {number}/{total}: {recipient}") html = utils.render_template( name=template_name, template_kwargs=template_kwargs ) if dry_run_render_to_file_path: logging.info("...dry run: rendering to file") with open(Path(dry_run_render_to_file_path), "w") as f: f.write(html) return if dry_run_dont_send: logging.info("...dry run: Not sent") else: # actually send the email utils.send_html_email( to_email=recipient, from_email=from_email, html=html, subject=subject, ) logging.info("...sent") time.sleep( 1 )
Python
def organize(url): ''' Decide whether a page has an JSON-LD recipe ''' out = flayer.tools.Output(__opts__) cur = __dbclient__.cursor() insert_sql = ''' INSERT INTO jsonld_domains (domain) VALUES (%s) ON CONFLICT DO NOTHING ''' try: req = requests.get(url) content = req.text except requests.exceptions.MissingSchema as exc: return [] except requests.exceptions.ConnectionError: return [] except requests.exceptions.SSLError: out.warn('SSL Error with {}, trying again without verification'.format(url)) req = requests.get(url, verify=False) content = req.text soup = BeautifulSoup(content, 'html.parser') if 'jsonld_domains' not in __context__: __context__['jsonld_domains'] = [] for tag in soup.find_all('script', attrs={'type': 'application/ld+json'}): for data in tag: try: script = json.loads(data) try: script_type = script['@type'].lower() except (AttributeError, KeyError, TypeError): return [] if script_type == 'recipe': url_comps = urllib.parse.urlparse(url) netloc = url_comps[1].split(':')[0] cur.execute(insert_sql, [netloc]) __dbclient__.commit() if netloc not in __context__['jsonld_domains']: __context__['jsonld_domains'].append(netloc) flayer.tools.queue_urls(url, __dbclient__, __opts__) return 'Queueing for download: {}'.format(url) except json.decoder.JSONDecodeError as exc: pass return []
def organize(url): ''' Decide whether a page has an JSON-LD recipe ''' out = flayer.tools.Output(__opts__) cur = __dbclient__.cursor() insert_sql = ''' INSERT INTO jsonld_domains (domain) VALUES (%s) ON CONFLICT DO NOTHING ''' try: req = requests.get(url) content = req.text except requests.exceptions.MissingSchema as exc: return [] except requests.exceptions.ConnectionError: return [] except requests.exceptions.SSLError: out.warn('SSL Error with {}, trying again without verification'.format(url)) req = requests.get(url, verify=False) content = req.text soup = BeautifulSoup(content, 'html.parser') if 'jsonld_domains' not in __context__: __context__['jsonld_domains'] = [] for tag in soup.find_all('script', attrs={'type': 'application/ld+json'}): for data in tag: try: script = json.loads(data) try: script_type = script['@type'].lower() except (AttributeError, KeyError, TypeError): return [] if script_type == 'recipe': url_comps = urllib.parse.urlparse(url) netloc = url_comps[1].split(':')[0] cur.execute(insert_sql, [netloc]) __dbclient__.commit() if netloc not in __context__['jsonld_domains']: __context__['jsonld_domains'].append(netloc) flayer.tools.queue_urls(url, __dbclient__, __opts__) return 'Queueing for download: {}'.format(url) except json.decoder.JSONDecodeError as exc: pass return []
Python
def process(): ''' Return the IDs of any running Web Flayer instances ''' ret = {} run_dir = __opts__.get('flayer_run_dir', '/var/run/flayer') for agent in os.listdir(run_dir): meta_file = os.path.join(run_dir, agent, 'meta') if not os.path.exists(meta_file): continue with open(meta_file, 'r') as mfh: meta = json.load(mfh) if psutil.Process(meta['pid']).cmdline()[0]: ret[meta['id']] = meta return {'flayer_agents': ret}
def process(): ''' Return the IDs of any running Web Flayer instances ''' ret = {} run_dir = __opts__.get('flayer_run_dir', '/var/run/flayer') for agent in os.listdir(run_dir): meta_file = os.path.join(run_dir, agent, 'meta') if not os.path.exists(meta_file): continue with open(meta_file, 'r') as mfh: meta = json.load(mfh) if psutil.Process(meta['pid']).cmdline()[0]: ret[meta['id']] = meta return {'flayer_agents': ret}
Python
def organize(url): ''' Decide whether a page is using JSON-LD ''' url_uuid, content = flayer.tools.get_url( url, dbclient=__dbclient__, opts=__opts__, context=__context__ ) types = set() soup = BeautifulSoup(content, 'html.parser') for tag in soup.find_all('script', attrs={'type': 'application/ld+json'}): for data in tag: try: script = json.loads(data) types.add(script['@type']) except json.decoder.JSONDecodeError as exc: types.add(exc) return list(types)
def organize(url): ''' Decide whether a page is using JSON-LD ''' url_uuid, content = flayer.tools.get_url( url, dbclient=__dbclient__, opts=__opts__, context=__context__ ) types = set() soup = BeautifulSoup(content, 'html.parser') for tag in soup.find_all('script', attrs={'type': 'application/ld+json'}): for data in tag: try: script = json.loads(data) types.add(script['@type']) except json.decoder.JSONDecodeError as exc: types.add(exc) return list(types)
Python
def _query(decode=False, id_=None, **params): ''' Send a command to the API ''' agents = __grains__['flayer_agents'] if id_ is None: if 'unknown' in agents: id_ = 'unknown' else: if len(list(agents)) == 1: id_ = list(agents)[0] else: raise CommandExecutionError('A valid Web Flayer id_ was not specified') elif id_ not in agents: raise CommandExecutionError('{} is not running'.format(id_)) api_host = agents[id_].get('api_addr', '127.0.0.1') api_port = agents[id_].get('api_port', 42424) url = 'http://{0}:{1}'.format(api_host, api_port) return salt.utils.http.query( url, params=params, decode=decode, decode_type='json', )
def _query(decode=False, id_=None, **params): ''' Send a command to the API ''' agents = __grains__['flayer_agents'] if id_ is None: if 'unknown' in agents: id_ = 'unknown' else: if len(list(agents)) == 1: id_ = list(agents)[0] else: raise CommandExecutionError('A valid Web Flayer id_ was not specified') elif id_ not in agents: raise CommandExecutionError('{} is not running'.format(id_)) api_host = agents[id_].get('api_addr', '127.0.0.1') api_port = agents[id_].get('api_port', 42424) url = 'http://{0}:{1}'.format(api_host, api_port) return salt.utils.http.query( url, params=params, decode=decode, decode_type='json', )
Python
def queue(urls, force=False, data=None): ''' Queue up a URL or URLs for download ''' if isinstance(urls, six.string_types): urls = [urls] _query(urls=urls, force=force, data=data)
def queue(urls, force=False, data=None): ''' Queue up a URL or URLs for download ''' if isinstance(urls, six.string_types): urls = [urls] _query(urls=urls, force=force, data=data)
Python
def hard_stop(id_=None): ''' Hard stop the Web Flayer daemon ''' _query(hard_stop=True, id_=id_)
def hard_stop(id_=None): ''' Hard stop the Web Flayer daemon ''' _query(hard_stop=True, id_=id_)
Python
def list_queue(id_=None): ''' List the contents of the queue ''' return _query(list_queue=True)
def list_queue(id_=None): ''' List the contents of the queue ''' return _query(list_queue=True)
Python
def show_opts(id_=None): ''' List the opts for the daemon ''' return _query(show_opts=True, id_=id_)
def show_opts(id_=None): ''' List the opts for the daemon ''' return _query(show_opts=True, id_=id_)
Python
def pop_dl_queue(dbclient, urls, opts): ''' Check the database for any queued URLS, and add to the list ''' cur = dbclient.cursor() # Unpause jobs past the time limit cur.execute(''' UPDATE dl_queue SET paused_until = NULL WHERE paused_until IS NOT NULL AND paused_until <= NOW() ''') dbclient.commit() # Lock a URL for this instance cur.execute(''' LOCK TABLE ONLY dl_queue; UPDATE dl_queue SET locked_by = %s WHERE uuid = ( SELECT uuid FROM dl_queue WHERE paused = FALSE AND paused_until IS NULL ORDER BY dl_order, added LIMIT 1 ) RETURNING uuid ''', [opts['id']]) if cur.rowcount > 0: data = cur.fetchone() url_uuid = data[0] else: return # Helps out with the lock time.sleep(.2) # Queue the URL and delete it from the queue cur.execute('SELECT url, refresh_interval FROM dl_queue WHERE uuid = %s', [url_uuid]) url, refresh = cur.fetchone() urls.append(url) if refresh: next_refresh = datetime.datetime.now() + datetime.timedelta(**refresh) cur.execute(''' UPDATE dl_queue SET locked_by = '', paused_until = %s WHERE uuid = %s ''', [next_refresh, url_uuid]) else: cur.execute('DELETE FROM dl_queue WHERE uuid = %s', [url_uuid]) dbclient.commit() opts['queue_id'] = url_uuid
def pop_dl_queue(dbclient, urls, opts): ''' Check the database for any queued URLS, and add to the list ''' cur = dbclient.cursor() # Unpause jobs past the time limit cur.execute(''' UPDATE dl_queue SET paused_until = NULL WHERE paused_until IS NOT NULL AND paused_until <= NOW() ''') dbclient.commit() # Lock a URL for this instance cur.execute(''' LOCK TABLE ONLY dl_queue; UPDATE dl_queue SET locked_by = %s WHERE uuid = ( SELECT uuid FROM dl_queue WHERE paused = FALSE AND paused_until IS NULL ORDER BY dl_order, added LIMIT 1 ) RETURNING uuid ''', [opts['id']]) if cur.rowcount > 0: data = cur.fetchone() url_uuid = data[0] else: return # Helps out with the lock time.sleep(.2) # Queue the URL and delete it from the queue cur.execute('SELECT url, refresh_interval FROM dl_queue WHERE uuid = %s', [url_uuid]) url, refresh = cur.fetchone() urls.append(url) if refresh: next_refresh = datetime.datetime.now() + datetime.timedelta(**refresh) cur.execute(''' UPDATE dl_queue SET locked_by = '', paused_until = %s WHERE uuid = %s ''', [next_refresh, url_uuid]) else: cur.execute('DELETE FROM dl_queue WHERE uuid = %s', [url_uuid]) dbclient.commit() opts['queue_id'] = url_uuid
Python
def list_queue(dbclient, opts): ''' List all queued URLs in the database ''' ret = [] out = flayer.tools.Output(opts) cur = dbclient.cursor() cur.execute('SELECT url, paused FROM dl_queue') if cur.rowcount > 0: for row in cur.fetchall(): if bool(row[1]) is True: line = '{} (paused)'.format(row[0]) else: line = row[0] ret.append(line) out.info(line) out.info('{} URLS queued'.format(cur.rowcount)) if not opts.get('already_running'): try: os.remove(opts['pid_file']) except FileNotFoundError: pass return {'urls': ret, 'number_queued': cur.rowcount}
def list_queue(dbclient, opts): ''' List all queued URLs in the database ''' ret = [] out = flayer.tools.Output(opts) cur = dbclient.cursor() cur.execute('SELECT url, paused FROM dl_queue') if cur.rowcount > 0: for row in cur.fetchall(): if bool(row[1]) is True: line = '{} (paused)'.format(row[0]) else: line = row[0] ret.append(line) out.info(line) out.info('{} URLS queued'.format(cur.rowcount)) if not opts.get('already_running'): try: os.remove(opts['pid_file']) except FileNotFoundError: pass return {'urls': ret, 'number_queued': cur.rowcount}
Python
def pause(dbclient, opts, urls): ''' Pause URL(s) in the download queue ''' ret = {'urls': urls, 'number_paused': len(urls)} out = flayer.tools.Output(opts) cur = dbclient.cursor() spacer = ', '.join(['%s' for url in range(len(urls))]) sql = 'UPDATE dl_queue SET paused = true WHERE url IN ({})'.format(spacer) cur.execute(sql, urls) dbclient.commit() out.info(ret) return ret
def pause(dbclient, opts, urls): ''' Pause URL(s) in the download queue ''' ret = {'urls': urls, 'number_paused': len(urls)} out = flayer.tools.Output(opts) cur = dbclient.cursor() spacer = ', '.join(['%s' for url in range(len(urls))]) sql = 'UPDATE dl_queue SET paused = true WHERE url IN ({})'.format(spacer) cur.execute(sql, urls) dbclient.commit() out.info(ret) return ret
Python
def unpause(dbclient, opts, urls): ''' Unpause URL(s) in the download queue ''' ret = {'urls': urls, 'number_unpaused': len(urls)} out = flayer.tools.Output(opts) cur = dbclient.cursor() spacer = ', '.join(['%s' for url in range(len(urls))]) sql = 'UPDATE dl_queue SET paused = false WHERE url IN ({})'.format(spacer) cur.execute(sql, urls) dbclient.commit() out.info(ret) return ret
def unpause(dbclient, opts, urls): ''' Unpause URL(s) in the download queue ''' ret = {'urls': urls, 'number_unpaused': len(urls)} out = flayer.tools.Output(opts) cur = dbclient.cursor() spacer = ', '.join(['%s' for url in range(len(urls))]) sql = 'UPDATE dl_queue SET paused = false WHERE url IN ({})'.format(spacer) cur.execute(sql, urls) dbclient.commit() out.info(ret) return ret
Python
def pattern_wait(dbclient, url): ''' Check the URL against the ``pattern_wait`` table, using a regular expression. If it matches, then all other URLs that match the pattern will have their ``paused_until`` values updated to ``now() + {wait} seconds``. Only the first match will be returned, so it's best to make patterns as specific as possible. Normally a pattern will only be a domain name, so this should not normally be a problem. This function should be run before and after any download, such as ``get_url()`` and ``status()``. Running before will help prevent other agents from hitting the domain again at the same time, and running after will keep all agents from hitting a domain again too fast. ''' cur = dbclient.cursor() sql = 'SELECT wait, pattern FROM pattern_wait WHERE %s ~ pattern LIMIT 1' cur.execute(sql, [url]) try: row = cur.fetchone() wait = row[0] pattern = row[1] except TypeError: # No matches return sql = ''' UPDATE dl_queue SET paused_until = now() + '%s seconds' WHERE %s ~ url ''' cur.execute(sql, [wait, pattern]) dbclient.commit()
def pattern_wait(dbclient, url): ''' Check the URL against the ``pattern_wait`` table, using a regular expression. If it matches, then all other URLs that match the pattern will have their ``paused_until`` values updated to ``now() + {wait} seconds``. Only the first match will be returned, so it's best to make patterns as specific as possible. Normally a pattern will only be a domain name, so this should not normally be a problem. This function should be run before and after any download, such as ``get_url()`` and ``status()``. Running before will help prevent other agents from hitting the domain again at the same time, and running after will keep all agents from hitting a domain again too fast. ''' cur = dbclient.cursor() sql = 'SELECT wait, pattern FROM pattern_wait WHERE %s ~ pattern LIMIT 1' cur.execute(sql, [url]) try: row = cur.fetchone() wait = row[0] pattern = row[1] except TypeError: # No matches return sql = ''' UPDATE dl_queue SET paused_until = now() + '%s seconds' WHERE %s ~ url ''' cur.execute(sql, [wait, pattern]) dbclient.commit()
Python
def store_url_metadata(dbclient, opts, url, metadata): ''' This function stores metadata for a URL which may or may not have already been retreived itself. ''' cur = dbclient.cursor() sql = 'SELECT uuid FROM urls WHERE url ~ %s' cur.execute(sql, (url,)) uuid = None data = cur.fetchone() if data: uuid = data[0] sql = ''' INSERT INTO url_metadata (uuid, url, metadata) VALUES (%s, %s, %s) ON CONFLICT (url) DO UPDATE SET metadata = %s ''' cur.execute(sql, (uuid, url, json.dumps(metadata), json.dumps(metadata))) dbclient.commit()
def store_url_metadata(dbclient, opts, url, metadata): ''' This function stores metadata for a URL which may or may not have already been retreived itself. ''' cur = dbclient.cursor() sql = 'SELECT uuid FROM urls WHERE url ~ %s' cur.execute(sql, (url,)) uuid = None data = cur.fetchone() if data: uuid = data[0] sql = ''' INSERT INTO url_metadata (uuid, url, metadata) VALUES (%s, %s, %s) ON CONFLICT (url) DO UPDATE SET metadata = %s ''' cur.execute(sql, (uuid, url, json.dumps(metadata), json.dumps(metadata))) dbclient.commit()
Python
def warn(self, msg, force=False): ''' Something is possibly wrong, but not enough to stop running ''' if not self.opts['daemon'] or force is True: print(colored(msg, self.opts.get('warn_color', 'yellow')))
def warn(self, msg, force=False): ''' Something is possibly wrong, but not enough to stop running ''' if not self.opts['daemon'] or force is True: print(colored(msg, self.opts.get('warn_color', 'yellow')))
Python
def error(self, msg, force=False): ''' Something is wrong enough to halt execution ''' if not self.opts['daemon'] or force is True: print(colored(msg, self.opts.get('error_color', 'red'), attrs=['bold']))
def error(self, msg, force=False): ''' Something is wrong enough to halt execution ''' if not self.opts['daemon'] or force is True: print(colored(msg, self.opts.get('error_color', 'red'), attrs=['bold']))
Python
def _save_path(url, url_uuid, req, wait, opts, context, dbclient): ''' Save the URL to a path ''' urlcomps = urllib.parse.urlparse(url) if opts['force_directories']: newpath = urlcomps[2].lstrip('/') file_name = os.path.join(opts['save_path'], urlcomps[1], newpath) else: file_name = os.path.join(opts['save_path'], urlcomps[2].split('/')[-1]) return status(req, url, url_uuid, file_name, wait, opts, context, dbclient)
def _save_path(url, url_uuid, req, wait, opts, context, dbclient): ''' Save the URL to a path ''' urlcomps = urllib.parse.urlparse(url) if opts['force_directories']: newpath = urlcomps[2].lstrip('/') file_name = os.path.join(opts['save_path'], urlcomps[1], newpath) else: file_name = os.path.join(opts['save_path'], urlcomps[2].split('/')[-1]) return status(req, url, url_uuid, file_name, wait, opts, context, dbclient)
Python
def dbsave_media(cur, media_url, url_uuid, file_name, dbclient): ''' Save a media item into the database, once it's been downloaded cur: Database cursor media_url: The URL of the image/video that was downloaded url_uuid: The UUID of the parent of the media_url file_name: The place where the media_url was downloaded to ''' try: cur.execute(''' INSERT INTO urls (url) values (%s) RETURNING uuid ''', [media_url]) dbclient.commit() new_id = cur.fetchone()[0] except psycopg2.IntegrityError: # This relationship already exists dbclient.rollback() cur.execute(''' SELECT uuid FROM urls WHERE url = %s ''', [media_url]) new_id = cur.fetchone()[0] try: cur.execute(''' INSERT INTO referers (url_uuid, referer_uuid) values (%s, %s) ''', [new_id, url_uuid]) dbclient.commit() except psycopg2.IntegrityError: # This relationship already exists dbclient.rollback() cur.execute(''' SELECT COUNT(*) FROM content WHERE url_uuid = %s ''', [new_id]) if cur.fetchone()[0] < 1: cur.execute(''' INSERT INTO content (url_uuid, cache_path) VALUES (%s, %s) ''', [new_id, file_name]) dbclient.commit()
def dbsave_media(cur, media_url, url_uuid, file_name, dbclient): ''' Save a media item into the database, once it's been downloaded cur: Database cursor media_url: The URL of the image/video that was downloaded url_uuid: The UUID of the parent of the media_url file_name: The place where the media_url was downloaded to ''' try: cur.execute(''' INSERT INTO urls (url) values (%s) RETURNING uuid ''', [media_url]) dbclient.commit() new_id = cur.fetchone()[0] except psycopg2.IntegrityError: # This relationship already exists dbclient.rollback() cur.execute(''' SELECT uuid FROM urls WHERE url = %s ''', [media_url]) new_id = cur.fetchone()[0] try: cur.execute(''' INSERT INTO referers (url_uuid, referer_uuid) values (%s, %s) ''', [new_id, url_uuid]) dbclient.commit() except psycopg2.IntegrityError: # This relationship already exists dbclient.rollback() cur.execute(''' SELECT COUNT(*) FROM content WHERE url_uuid = %s ''', [new_id]) if cur.fetchone()[0] < 1: cur.execute(''' INSERT INTO content (url_uuid, cache_path) VALUES (%s, %s) ''', [new_id, file_name]) dbclient.commit()
Python
def queue_urls(links, dbclient, opts): ''' Check the database for any queued URLS, and add to the list ''' out = Output(opts) cur = dbclient.cursor() if isinstance(links, str): links = [links] for url in links: if opts.get('force') is not True and not opts.get('queue_id'): # Check for URL in DB cur.execute(''' SELECT uuid FROM urls WHERE url = %s ''', [url]) if cur.rowcount > 0: if url not in opts['warned']: out.info('URL has already been downloaded; use --force if necessary') else: if url not in opts['warned']: opts['warned'].append(url) continue fields = ['url'] args = [url] if opts.get('queue_id') is not None: fields.append('uuid') args.append(opts['queue_id']) if 'refresh_interval' in opts: fields.append('refresh_interval') args.append(opts['refresh_interval']) if 'overwrite' not in opts: opts['overwrite'] = False fields.append('overwrite') args.append(opts['overwrite']) query = 'INSERT INTO dl_queue ({}) VALUES ({})'.format( ', '.join(fields), ', '.join(['%s' for arg in range(len(args))]) ) try: cur.execute(query, args) dbclient.commit() except psycopg2.IntegrityError: # This URL is already queued dbclient.rollback() cur.execute('SELECT count(*) FROM dl_queue') return cur.fetchone()[0]
def queue_urls(links, dbclient, opts): ''' Check the database for any queued URLS, and add to the list ''' out = Output(opts) cur = dbclient.cursor() if isinstance(links, str): links = [links] for url in links: if opts.get('force') is not True and not opts.get('queue_id'): # Check for URL in DB cur.execute(''' SELECT uuid FROM urls WHERE url = %s ''', [url]) if cur.rowcount > 0: if url not in opts['warned']: out.info('URL has already been downloaded; use --force if necessary') else: if url not in opts['warned']: opts['warned'].append(url) continue fields = ['url'] args = [url] if opts.get('queue_id') is not None: fields.append('uuid') args.append(opts['queue_id']) if 'refresh_interval' in opts: fields.append('refresh_interval') args.append(opts['refresh_interval']) if 'overwrite' not in opts: opts['overwrite'] = False fields.append('overwrite') args.append(opts['overwrite']) query = 'INSERT INTO dl_queue ({}) VALUES ({})'.format( ', '.join(fields), ', '.join(['%s' for arg in range(len(args))]) ) try: cur.execute(query, args) dbclient.commit() except psycopg2.IntegrityError: # This URL is already queued dbclient.rollback() cur.execute('SELECT count(*) FROM dl_queue') return cur.fetchone()[0]
Python
def reprocess_urls(urls, patterns, dbclient=None): ''' Reprocess the cached URLs which matches the pattern(s) ''' if not urls: urls = [] if isinstance(patterns, str): patterns = [patterns] cur = dbclient.cursor() wheres = ['url~%s'] * len(patterns) query = 'SELECT url FROM urls WHERE {}'.format(' OR '.join(wheres)) cur.execute(query, patterns) for row in cur.fetchall(): urls.append(row[0]) return urls
def reprocess_urls(urls, patterns, dbclient=None): ''' Reprocess the cached URLs which matches the pattern(s) ''' if not urls: urls = [] if isinstance(patterns, str): patterns = [patterns] cur = dbclient.cursor() wheres = ['url~%s'] * len(patterns) query = 'SELECT url FROM urls WHERE {}'.format(' OR '.join(wheres)) cur.execute(query, patterns) for row in cur.fetchall(): urls.append(row[0]) return urls
Python
def queue_regexp(urls, pattern, dbclient, opts): ''' Add the URLs matching the pattern to the download queue ''' expr = re.compile(pattern) links = [] for url in urls: if expr.search(url): links.append(url) queue_urls(links, dbclient, opts)
def queue_regexp(urls, pattern, dbclient, opts): ''' Add the URLs matching the pattern to the download queue ''' expr = re.compile(pattern) links = [] for url in urls: if expr.search(url): links.append(url) queue_urls(links, dbclient, opts)
Python
def _rename(media_url, file_name, opts): ''' When files are downloaded using status, rename as per a template ''' out = Output(opts) template = opts.get('rename_template', '') if not template: return file_name urlcomps = urllib.parse.urlparse(media_url) replacements = { 'host': urlcomps[1].split(':')[0], 'path': '/'.join(urlcomps[2].split('/')[:-2]) } # File extensions if '.' in urlcomps[2].split('/')[-1]: replacements['ext'] = urlcomps[2].split('/')[-1].split('.')[-1] else: replacements['ext'] = '' if not opts.get('rename_count'): opts['rename_count'] = opts.get('rename_count_start', 0) if opts.get('rename_count_padding'): try: opts['rename_count_padding'] = int(opts['rename_count_padding']) except ValueError: out.warn('--rename-count-padding must be an integer, using 0') opts['rename_count_padding'] = 0 template = template.replace('{count}', '{count:0>{rename_count_padding}}') replacements['rename_count_padding'] = opts['rename_count_padding'] replacements['count'] = str(opts['rename_count']) opts['rename_count'] += 1 file_name = os.path.join(opts['save_path'], template.format(**replacements)) return file_name
def _rename(media_url, file_name, opts): ''' When files are downloaded using status, rename as per a template ''' out = Output(opts) template = opts.get('rename_template', '') if not template: return file_name urlcomps = urllib.parse.urlparse(media_url) replacements = { 'host': urlcomps[1].split(':')[0], 'path': '/'.join(urlcomps[2].split('/')[:-2]) } # File extensions if '.' in urlcomps[2].split('/')[-1]: replacements['ext'] = urlcomps[2].split('/')[-1].split('.')[-1] else: replacements['ext'] = '' if not opts.get('rename_count'): opts['rename_count'] = opts.get('rename_count_start', 0) if opts.get('rename_count_padding'): try: opts['rename_count_padding'] = int(opts['rename_count_padding']) except ValueError: out.warn('--rename-count-padding must be an integer, using 0') opts['rename_count_padding'] = 0 template = template.replace('{count}', '{count:0>{rename_count_padding}}') replacements['rename_count_padding'] = opts['rename_count_padding'] replacements['count'] = str(opts['rename_count']) opts['rename_count'] += 1 file_name = os.path.join(opts['save_path'], template.format(**replacements)) return file_name
Python
def parse_links(url, content, level, opts): ''' Return the links from an HTML page ''' out = Output(opts) hrefs = [] try: # Get ready to do some html parsing soup = BeautifulSoup(content, 'html.parser') # Generate absolute URLs for every link on the page url_comps = urllib.parse.urlparse(url) tags = soup.find_all('a') if opts['search_src'] is True: tags = tags + soup.find_all(src=True) for link in tags: if level > int(opts['level']): continue href = urllib.parse.urljoin(url, link.get('href')) if opts['search_src'] is True and not link.get('href'): href = urllib.parse.urljoin(url, link.get('src')) link_comps = urllib.parse.urlparse(href) if link.text.startswith('javascript'): continue if int(opts.get('level', 0)) > 0 and int(opts.get('level', 0)) < 2: continue if opts['span_hosts'] is not True: if not link_comps[1].startswith(url_comps[1].split(':')[0]): continue hrefs.append(href.split('#')[0]) # Render the page, and print it along with the links if opts.get('render', False) is True: out.info(soup.get_text()) return hrefs except TypeError: # This URL probably isn't HTML return []
def parse_links(url, content, level, opts): ''' Return the links from an HTML page ''' out = Output(opts) hrefs = [] try: # Get ready to do some html parsing soup = BeautifulSoup(content, 'html.parser') # Generate absolute URLs for every link on the page url_comps = urllib.parse.urlparse(url) tags = soup.find_all('a') if opts['search_src'] is True: tags = tags + soup.find_all(src=True) for link in tags: if level > int(opts['level']): continue href = urllib.parse.urljoin(url, link.get('href')) if opts['search_src'] is True and not link.get('href'): href = urllib.parse.urljoin(url, link.get('src')) link_comps = urllib.parse.urlparse(href) if link.text.startswith('javascript'): continue if int(opts.get('level', 0)) > 0 and int(opts.get('level', 0)) < 2: continue if opts['span_hosts'] is not True: if not link_comps[1].startswith(url_comps[1].split(':')[0]): continue hrefs.append(href.split('#')[0]) # Render the page, and print it along with the links if opts.get('render', False) is True: out.info(soup.get_text()) return hrefs except TypeError: # This URL probably isn't HTML return []
Python
def MakeFlayerHTTPRequestHandler(opts, context): # pylint: disable=invalid-name ''' Return an HTTP class which can handle opts being passed in ''' class FlayerHTTPRequestHandler(BaseHTTPRequestHandler): ''' Process arguments ''' def __init__(self, *args, **kwargs): self.dbclient = flayer.db.client(opts) super(FlayerHTTPRequestHandler, self).__init__(*args, **kwargs) def do_GET(self): # pylint: disable=invalid-name ''' Only GET requests are supported at this time ''' qstr = self.path.lstrip('/?') data = urllib.parse.parse_qs(qstr) if 'list_queue' in data: queue = flayer.db.list_queue(self.dbclient, opts) self.send(json.dumps(queue)) return if 'show_opts' in data: tmp_opts = opts.copy() del tmp_opts['http_api'] del tmp_opts['salt_event'] for item in opts: if isinstance(item, set): tmp_opts[item] = list(temp_opts[item]) self.send(json.dumps(tmp_opts, indent=4), content_type='text/json') return if 'show_context' in data: self.send(json.dumps(context, indent=4), content_type='text/json') return for item in ('headers', 'parser_dir'): if item in data: opts[item] = data[item] del data[item] for item in data: if data[item][0] in ('True', 'False', 'None'): opts[item] = bool(data[item][0]) elif item == 'user_agent': opts['headers']['User-Agent'] = data[item][0] else: opts[item] = data[item][0] self.send('True') # Stop the server if necessary if opts.get('stop') or opts.get('hard_stop') or opts.get('abort'): open(opts['stop_file'], 'a').close() def send(self, message, response=200, content_type='text/html'): ''' Send a message to the client ''' self.send_response(response) self.send_header('Content-type', content_type) self.end_headers() self.wfile.write(bytes(message, 'utf8')) def log_message(self, fmt, *args): # pylint: disable=arguments-differ,unused-argument ''' Don't log to the console ''' return return FlayerHTTPRequestHandler
def MakeFlayerHTTPRequestHandler(opts, context): # pylint: disable=invalid-name ''' Return an HTTP class which can handle opts being passed in ''' class FlayerHTTPRequestHandler(BaseHTTPRequestHandler): ''' Process arguments ''' def __init__(self, *args, **kwargs): self.dbclient = flayer.db.client(opts) super(FlayerHTTPRequestHandler, self).__init__(*args, **kwargs) def do_GET(self): # pylint: disable=invalid-name ''' Only GET requests are supported at this time ''' qstr = self.path.lstrip('/?') data = urllib.parse.parse_qs(qstr) if 'list_queue' in data: queue = flayer.db.list_queue(self.dbclient, opts) self.send(json.dumps(queue)) return if 'show_opts' in data: tmp_opts = opts.copy() del tmp_opts['http_api'] del tmp_opts['salt_event'] for item in opts: if isinstance(item, set): tmp_opts[item] = list(temp_opts[item]) self.send(json.dumps(tmp_opts, indent=4), content_type='text/json') return if 'show_context' in data: self.send(json.dumps(context, indent=4), content_type='text/json') return for item in ('headers', 'parser_dir'): if item in data: opts[item] = data[item] del data[item] for item in data: if data[item][0] in ('True', 'False', 'None'): opts[item] = bool(data[item][0]) elif item == 'user_agent': opts['headers']['User-Agent'] = data[item][0] else: opts[item] = data[item][0] self.send('True') # Stop the server if necessary if opts.get('stop') or opts.get('hard_stop') or opts.get('abort'): open(opts['stop_file'], 'a').close() def send(self, message, response=200, content_type='text/html'): ''' Send a message to the client ''' self.send_response(response) self.send_header('Content-type', content_type) self.end_headers() self.wfile.write(bytes(message, 'utf8')) def log_message(self, fmt, *args): # pylint: disable=arguments-differ,unused-argument ''' Don't log to the console ''' return return FlayerHTTPRequestHandler
Python
def do_GET(self): # pylint: disable=invalid-name ''' Only GET requests are supported at this time ''' qstr = self.path.lstrip('/?') data = urllib.parse.parse_qs(qstr) if 'list_queue' in data: queue = flayer.db.list_queue(self.dbclient, opts) self.send(json.dumps(queue)) return if 'show_opts' in data: tmp_opts = opts.copy() del tmp_opts['http_api'] del tmp_opts['salt_event'] for item in opts: if isinstance(item, set): tmp_opts[item] = list(temp_opts[item]) self.send(json.dumps(tmp_opts, indent=4), content_type='text/json') return if 'show_context' in data: self.send(json.dumps(context, indent=4), content_type='text/json') return for item in ('headers', 'parser_dir'): if item in data: opts[item] = data[item] del data[item] for item in data: if data[item][0] in ('True', 'False', 'None'): opts[item] = bool(data[item][0]) elif item == 'user_agent': opts['headers']['User-Agent'] = data[item][0] else: opts[item] = data[item][0] self.send('True') # Stop the server if necessary if opts.get('stop') or opts.get('hard_stop') or opts.get('abort'): open(opts['stop_file'], 'a').close()
def do_GET(self): # pylint: disable=invalid-name ''' Only GET requests are supported at this time ''' qstr = self.path.lstrip('/?') data = urllib.parse.parse_qs(qstr) if 'list_queue' in data: queue = flayer.db.list_queue(self.dbclient, opts) self.send(json.dumps(queue)) return if 'show_opts' in data: tmp_opts = opts.copy() del tmp_opts['http_api'] del tmp_opts['salt_event'] for item in opts: if isinstance(item, set): tmp_opts[item] = list(temp_opts[item]) self.send(json.dumps(tmp_opts, indent=4), content_type='text/json') return if 'show_context' in data: self.send(json.dumps(context, indent=4), content_type='text/json') return for item in ('headers', 'parser_dir'): if item in data: opts[item] = data[item] del data[item] for item in data: if data[item][0] in ('True', 'False', 'None'): opts[item] = bool(data[item][0]) elif item == 'user_agent': opts['headers']['User-Agent'] = data[item][0] else: opts[item] = data[item][0] self.send('True') # Stop the server if necessary if opts.get('stop') or opts.get('hard_stop') or opts.get('abort'): open(opts['stop_file'], 'a').close()
Python
def send(self, message, response=200, content_type='text/html'): ''' Send a message to the client ''' self.send_response(response) self.send_header('Content-type', content_type) self.end_headers() self.wfile.write(bytes(message, 'utf8'))
def send(self, message, response=200, content_type='text/html'): ''' Send a message to the client ''' self.send_response(response) self.send_header('Content-type', content_type) self.end_headers() self.wfile.write(bytes(message, 'utf8'))
Python
def log_message(self, fmt, *args): # pylint: disable=arguments-differ,unused-argument ''' Don't log to the console ''' return
def log_message(self, fmt, *args): # pylint: disable=arguments-differ,unused-argument ''' Don't log to the console ''' return
Python
def map_scatter_nav(preview): """ Interactive preview of the map to ease navigation through clickable ROIS """ #TODO:incorporate roi coordinates from data #currently scatter dots are randomly positioned # Create figure fig = go.Figure() x_dots = preview.width*np.random.rand(15)*0.8 y_dots = preview.height*np.random.rand(15)*0.8 # Add trace fig.add_trace( go.Scatter(x=x_dots, y=y_dots,mode='markers', opacity=0.6, hoverinfo='none', marker=dict(color=np.random.randn(15), colorscale='ylorbr', line_width=1, size=10)) ) # Add images fig.add_layout_image( dict( source=preview, xref="x", yref="y", x=0, y=preview.height, sizex=preview.width, sizey=preview.height, layer="below") ) # Set templates fig.update_layout( template="plotly_white", autosize=False, height=preview.height, width=preview.width, margin=dict(r=0, l=0, b=0, t=0)) fig.update_xaxes(showgrid=False,visible=False,range=[0, preview.width]) fig.update_yaxes(showgrid=False,visible=False,range=[0, preview.height]) return fig
def map_scatter_nav(preview): """ Interactive preview of the map to ease navigation through clickable ROIS """ #TODO:incorporate roi coordinates from data #currently scatter dots are randomly positioned # Create figure fig = go.Figure() x_dots = preview.width*np.random.rand(15)*0.8 y_dots = preview.height*np.random.rand(15)*0.8 # Add trace fig.add_trace( go.Scatter(x=x_dots, y=y_dots,mode='markers', opacity=0.6, hoverinfo='none', marker=dict(color=np.random.randn(15), colorscale='ylorbr', line_width=1, size=10)) ) # Add images fig.add_layout_image( dict( source=preview, xref="x", yref="y", x=0, y=preview.height, sizex=preview.width, sizey=preview.height, layer="below") ) # Set templates fig.update_layout( template="plotly_white", autosize=False, height=preview.height, width=preview.width, margin=dict(r=0, l=0, b=0, t=0)) fig.update_xaxes(showgrid=False,visible=False,range=[0, preview.width]) fig.update_yaxes(showgrid=False,visible=False,range=[0, preview.height]) return fig
Python
def available_datasets(base_url): """ Lists available urls to requests different scans input: root url pointing to the scan tiles provider output: datasets ids """ response = requests.get(base_url+"/keys") band_key = response.json()["keys"][-1]["key"] response = requests.get(base_url+"/datasets") datasets = response.json()["datasets"] datasets_df = pd.DataFrame.from_dict(datasets).drop(columns=band_key).drop_duplicates() datasets_ids = datasets_df.apply(lambda p:"/".join(p),axis=1) return datasets_ids
def available_datasets(base_url): """ Lists available urls to requests different scans input: root url pointing to the scan tiles provider output: datasets ids """ response = requests.get(base_url+"/keys") band_key = response.json()["keys"][-1]["key"] response = requests.get(base_url+"/datasets") datasets = response.json()["datasets"] datasets_df = pd.DataFrame.from_dict(datasets).drop(columns=band_key).drop_duplicates() datasets_ids = datasets_df.apply(lambda p:"/".join(p),axis=1) return datasets_ids
Python
def _recipe_seurat(adata, gene_num): """ Normalization and filtering as of Seurat [Satija15]_. This uses a particular preprocessing """ import scanpy as sc cell_mask = sc.pp.filter_cells(adata, min_genes=200, inplace=False)[0] adata = adata[cell_mask,:] gene_mask = sc.pp.filter_genes(adata, min_cells=3, inplace=False)[0] adata = adata[:,gene_mask] gene_names = adata.var_names.values sc.pp.normalize_total(adata, target_sum=1e4, key_added='counts_per_cell') filter_result = sc.pp.filter_genes_dispersion( adata.X, min_mean=0.0125, max_mean=3, min_disp=0.5, log=False, n_top_genes=gene_num) sc.pp.log1p(adata) expression = adata.X.copy() adata._inplace_subset_var(filter_result.gene_subset) # filter genes sc.pp.scale(adata, max_value=10) return adata, expression, gene_names, cell_mask, gene_mask, filter_result.gene_subset
def _recipe_seurat(adata, gene_num): """ Normalization and filtering as of Seurat [Satija15]_. This uses a particular preprocessing """ import scanpy as sc cell_mask = sc.pp.filter_cells(adata, min_genes=200, inplace=False)[0] adata = adata[cell_mask,:] gene_mask = sc.pp.filter_genes(adata, min_cells=3, inplace=False)[0] adata = adata[:,gene_mask] gene_names = adata.var_names.values sc.pp.normalize_total(adata, target_sum=1e4, key_added='counts_per_cell') filter_result = sc.pp.filter_genes_dispersion( adata.X, min_mean=0.0125, max_mean=3, min_disp=0.5, log=False, n_top_genes=gene_num) sc.pp.log1p(adata) expression = adata.X.copy() adata._inplace_subset_var(filter_result.gene_subset) # filter genes sc.pp.scale(adata, max_value=10) return adata, expression, gene_names, cell_mask, gene_mask, filter_result.gene_subset
Python
def preprocess_data(self, processed: bool = False, dimred: bool = False, K: float = 1e4, gene_num: int = 2000, data_type: str = 'UMI', npc: int = 64): ''' Data preprocessing - log-normalization, feature selection, and scaling. If the inputs are preprocessed by users, then `Gaussian` model will be used and PCA will be performed to reduce the input dimension. Otherwise, preprocessing will be performed on `X` following Seurat's routine. If `adata` is provided, the preprocession will be done via `scanpy`. Parameters ---------- processed : boolean, optional Whether adata has been processed. If `processed=True`, then `Gaussian` model will be used. dimred : boolean, optional Whether the processed adata is after dimension reduction. K : float, optional The constant summing gene expression in each cell up to. gene_num : int, optional The number of feature to select. data_type : str, optional 'UMI', 'non-UMI' and 'Gaussian', default is 'UMI'. If the input is a processed scanpy object, data type is set to Gaussian. npc : int, optional The number of PCs to retain. ''' if data_type not in set(['UMI', 'non-UMI', 'Gaussian']): raise ValueError("Invalid data type, must be one of 'UMI', 'non-UMI', and 'Gaussian'.") if (self.adata is not None) and processed: self.data_type = 'Gaussian' else: self.data_type = data_type print('Using Gaussian likelihood.') raw_X = self.raw_X.copy() if self.raw_X is not None else None self.X_normalized, self.expression, self.X, self.c_score, \ self.cell_names, self.gene_names, self.selected_gene_names, \ self.scale_factor, self.labels, self.label_names, \ self.le, self.gene_scalar = preprocess.preprocess( self.adata, processed, dimred, raw_X, self.c_score, self.raw_label_names, self.raw_cell_names, self.raw_gene_names, K, gene_num, self.data_type, npc) self.dim_origin = self.X.shape[1] self.selected_cell_subset = self.cell_names self.selected_cell_subset_id = np.arange(len(self.cell_names)) self.adata = None
def preprocess_data(self, processed: bool = False, dimred: bool = False, K: float = 1e4, gene_num: int = 2000, data_type: str = 'UMI', npc: int = 64): ''' Data preprocessing - log-normalization, feature selection, and scaling. If the inputs are preprocessed by users, then `Gaussian` model will be used and PCA will be performed to reduce the input dimension. Otherwise, preprocessing will be performed on `X` following Seurat's routine. If `adata` is provided, the preprocession will be done via `scanpy`. Parameters ---------- processed : boolean, optional Whether adata has been processed. If `processed=True`, then `Gaussian` model will be used. dimred : boolean, optional Whether the processed adata is after dimension reduction. K : float, optional The constant summing gene expression in each cell up to. gene_num : int, optional The number of feature to select. data_type : str, optional 'UMI', 'non-UMI' and 'Gaussian', default is 'UMI'. If the input is a processed scanpy object, data type is set to Gaussian. npc : int, optional The number of PCs to retain. ''' if data_type not in set(['UMI', 'non-UMI', 'Gaussian']): raise ValueError("Invalid data type, must be one of 'UMI', 'non-UMI', and 'Gaussian'.") if (self.adata is not None) and processed: self.data_type = 'Gaussian' else: self.data_type = data_type print('Using Gaussian likelihood.') raw_X = self.raw_X.copy() if self.raw_X is not None else None self.X_normalized, self.expression, self.X, self.c_score, \ self.cell_names, self.gene_names, self.selected_gene_names, \ self.scale_factor, self.labels, self.label_names, \ self.le, self.gene_scalar = preprocess.preprocess( self.adata, processed, dimred, raw_X, self.c_score, self.raw_label_names, self.raw_cell_names, self.raw_gene_names, K, gene_num, self.data_type, npc) self.dim_origin = self.X.shape[1] self.selected_cell_subset = self.cell_names self.selected_cell_subset_id = np.arange(len(self.cell_names)) self.adata = None
Python
def build_model(self, dimensions = [16], dim_latent: int = 8, ): ''' Initialize the Variational Auto Encoder model. Parameters ---------- dimensions : list, optional The list of dimensions of layers of autoencoder between latent space and original space. dim_latent : int, optional The dimension of latent space. ''' self.dimensions = dimensions self.dim_latent = dim_latent self.vae = model.VariationalAutoEncoder( self.dim_origin, self.dimensions, self.dim_latent, self.data_type, False if self.c_score is None else True ) if hasattr(self, 'inferer'): delattr(self, 'inferer')
def build_model(self, dimensions = [16], dim_latent: int = 8, ): ''' Initialize the Variational Auto Encoder model. Parameters ---------- dimensions : list, optional The list of dimensions of layers of autoencoder between latent space and original space. dim_latent : int, optional The dimension of latent space. ''' self.dimensions = dimensions self.dim_latent = dim_latent self.vae = model.VariationalAutoEncoder( self.dim_origin, self.dimensions, self.dim_latent, self.data_type, False if self.c_score is None else True ) if hasattr(self, 'inferer'): delattr(self, 'inferer')
Python
def save_model(self, path_to_file: str = 'model.checkpoint'): '''Saving model weights. Parameters ---------- path_to_file : str, optional The path to weight files of pre-trained or trained model ''' self.vae.save_weights(path_to_file) if hasattr(self, 'cluster_labels') and self.cluster_labels is not None: with open(path_to_file+'.label', 'wb') as f: np.save(f, self.cluster_labels) with open(path_to_file+'.config', 'wb') as f: np.save(f, np.array([ self.dim_origin, self.dimensions, self.dim_latent, self.data_type, False if self.c_score is None else True], dtype=object)) if hasattr(self, 'inferer') and hasattr(self.inferer, 'embed_mu'): with open(path_to_file+'.inference', 'wb') as f: np.save(f, np.array([ self.pi, self.mu, self.pc_x, self.w_tilde, self.var_w_tilde, self.z, self.embed_z, self.inferer.embed_mu], dtype=object))
def save_model(self, path_to_file: str = 'model.checkpoint'): '''Saving model weights. Parameters ---------- path_to_file : str, optional The path to weight files of pre-trained or trained model ''' self.vae.save_weights(path_to_file) if hasattr(self, 'cluster_labels') and self.cluster_labels is not None: with open(path_to_file+'.label', 'wb') as f: np.save(f, self.cluster_labels) with open(path_to_file+'.config', 'wb') as f: np.save(f, np.array([ self.dim_origin, self.dimensions, self.dim_latent, self.data_type, False if self.c_score is None else True], dtype=object)) if hasattr(self, 'inferer') and hasattr(self.inferer, 'embed_mu'): with open(path_to_file+'.inference', 'wb') as f: np.save(f, np.array([ self.pi, self.mu, self.pc_x, self.w_tilde, self.var_w_tilde, self.z, self.embed_z, self.inferer.embed_mu], dtype=object))
Python
def load_model(self, path_to_file: str = 'model.checkpoint', load_labels: bool = False): '''Load model weights. Parameters ---------- path_to_file : str, optional The path to weight files of pre trained or trained model load_labels : boolean, optional Whether to load clustering labels or not. If load_labels is True, then the LatentSpace layer will be initialized basd on the model. If load_labels is False, then the LatentSpace layer will not be initialized. ''' if not os.path.exists(path_to_file+'.config'): raise AssertionError('Config file not exist!') if load_labels and not os.path.exists(path_to_file+'.label'): raise AssertionError('Label file not exist!') with open(path_to_file+'.config', 'rb') as f: [self.dim_origin, self.dimensions, self.dim_latent, self.data_type, has_c] = np.load(f, allow_pickle=True) self.vae = model.VariationalAutoEncoder( self.dim_origin, self.dimensions, self.dim_latent, self.data_type, has_c ) if load_labels: with open(path_to_file+'.label', 'rb') as f: cluster_labels = np.load(f, allow_pickle=True) n_clusters = len(np.unique(cluster_labels)) self.init_latent_space(n_clusters, cluster_labels) if os.path.exists(path_to_file+'.inference'): with open(path_to_file+'.inference', 'rb') as f: arr = np.load(f, allow_pickle=True) if len(arr)==9: [self.pi, self.mu, self.pc_x, self.w_tilde, self.var_w_tilde, self.D_JS, self.z, self.embed_z, embed_mu] = arr else: [self.pi, self.mu, self.pc_x, self.w_tilde, self.var_w_tilde, self.z, self.embed_z, embed_mu] = arr self.inferer.mu = self.mu self.inferer.embed_z = self.embed_z self.inferer.embed_mu = embed_mu self.vae.load_weights(path_to_file)
def load_model(self, path_to_file: str = 'model.checkpoint', load_labels: bool = False): '''Load model weights. Parameters ---------- path_to_file : str, optional The path to weight files of pre trained or trained model load_labels : boolean, optional Whether to load clustering labels or not. If load_labels is True, then the LatentSpace layer will be initialized basd on the model. If load_labels is False, then the LatentSpace layer will not be initialized. ''' if not os.path.exists(path_to_file+'.config'): raise AssertionError('Config file not exist!') if load_labels and not os.path.exists(path_to_file+'.label'): raise AssertionError('Label file not exist!') with open(path_to_file+'.config', 'rb') as f: [self.dim_origin, self.dimensions, self.dim_latent, self.data_type, has_c] = np.load(f, allow_pickle=True) self.vae = model.VariationalAutoEncoder( self.dim_origin, self.dimensions, self.dim_latent, self.data_type, has_c ) if load_labels: with open(path_to_file+'.label', 'rb') as f: cluster_labels = np.load(f, allow_pickle=True) n_clusters = len(np.unique(cluster_labels)) self.init_latent_space(n_clusters, cluster_labels) if os.path.exists(path_to_file+'.inference'): with open(path_to_file+'.inference', 'rb') as f: arr = np.load(f, allow_pickle=True) if len(arr)==9: [self.pi, self.mu, self.pc_x, self.w_tilde, self.var_w_tilde, self.D_JS, self.z, self.embed_z, embed_mu] = arr else: [self.pi, self.mu, self.pc_x, self.w_tilde, self.var_w_tilde, self.z, self.embed_z, embed_mu] = arr self.inferer.mu = self.mu self.inferer.embed_z = self.embed_z self.inferer.embed_mu = embed_mu self.vae.load_weights(path_to_file)
Python
def pre_train(self, stratify = False, test_size = 0.1, random_state: int = 0, learning_rate: float = 1e-3, batch_size: int = 32, L: int = 1, alpha: float = 0.10, num_epoch: int = 300, num_step_per_epoch: Optional[int] = None, early_stopping_patience: int = 5, early_stopping_tolerance: float = 1.0, path_to_weights: Optional[str] = None): '''Pretrain the model with specified learning rate. Parameters ---------- stratify : np.array, None, or False If an array is provided, or `stratify=None` and `self.labels` is available, then they will be used to perform stratified shuffle splitting. Otherwise, general shuffle splitting is used. Set to `False` if `self.labels` is not intented for stratified shuffle splitting. test_size : float or int, optional The proportion or size of the test set. random_state : int, optional The random state for data splitting. learning_rate : float, optional The initial learning rate for the Adam optimizer. batch_size : int, optional The batch size for pre-training. L : int, optional The number of MC samples. alpha : float, optional The value of alpha in [0,1] to encourage covariate adjustment. Not used if there is no covariates. num_epoch : int, optional The maximum number of epoches. num_step_per_epoch : int, optional The number of step per epoch, it will be inferred from number of cells and batch size if it is None. early_stopping_patience : int, optional The maximum number of epoches if there is no improvement. early_stopping_tolerance : float, optional The minimum change of loss to be considered as an improvement. path_to_weights : str, optional The path of weight file to be saved; not saving weight if None. ''' if stratify is None: stratify = self.labels elif stratify is False: stratify = None id_train, id_test = train_test_split( np.arange(self.X.shape[0]), test_size=test_size, stratify=stratify, random_state=random_state) if num_step_per_epoch is None: num_step_per_epoch = len(id_train)//batch_size+1 self.train_dataset = train.warp_dataset(self.X_normalized[id_train].astype(tf.keras.backend.floatx()), None if self.c_score is None else self.c_score[id_train].astype(tf.keras.backend.floatx()), batch_size, self.X[id_train].astype(tf.keras.backend.floatx()), self.scale_factor[id_train].astype(tf.keras.backend.floatx())) self.test_dataset = train.warp_dataset(self.X_normalized[id_test], None if self.c_score is None else self.c_score[id_test].astype(tf.keras.backend.floatx()), batch_size, self.X[id_test].astype(tf.keras.backend.floatx()), self.scale_factor[id_test].astype(tf.keras.backend.floatx())) self.vae = train.pre_train( self.train_dataset, self.test_dataset, self.vae, learning_rate, L, alpha, num_epoch, num_step_per_epoch, early_stopping_patience, early_stopping_tolerance, 0) if path_to_weights is not None: self.save_model(path_to_weights)
def pre_train(self, stratify = False, test_size = 0.1, random_state: int = 0, learning_rate: float = 1e-3, batch_size: int = 32, L: int = 1, alpha: float = 0.10, num_epoch: int = 300, num_step_per_epoch: Optional[int] = None, early_stopping_patience: int = 5, early_stopping_tolerance: float = 1.0, path_to_weights: Optional[str] = None): '''Pretrain the model with specified learning rate. Parameters ---------- stratify : np.array, None, or False If an array is provided, or `stratify=None` and `self.labels` is available, then they will be used to perform stratified shuffle splitting. Otherwise, general shuffle splitting is used. Set to `False` if `self.labels` is not intented for stratified shuffle splitting. test_size : float or int, optional The proportion or size of the test set. random_state : int, optional The random state for data splitting. learning_rate : float, optional The initial learning rate for the Adam optimizer. batch_size : int, optional The batch size for pre-training. L : int, optional The number of MC samples. alpha : float, optional The value of alpha in [0,1] to encourage covariate adjustment. Not used if there is no covariates. num_epoch : int, optional The maximum number of epoches. num_step_per_epoch : int, optional The number of step per epoch, it will be inferred from number of cells and batch size if it is None. early_stopping_patience : int, optional The maximum number of epoches if there is no improvement. early_stopping_tolerance : float, optional The minimum change of loss to be considered as an improvement. path_to_weights : str, optional The path of weight file to be saved; not saving weight if None. ''' if stratify is None: stratify = self.labels elif stratify is False: stratify = None id_train, id_test = train_test_split( np.arange(self.X.shape[0]), test_size=test_size, stratify=stratify, random_state=random_state) if num_step_per_epoch is None: num_step_per_epoch = len(id_train)//batch_size+1 self.train_dataset = train.warp_dataset(self.X_normalized[id_train].astype(tf.keras.backend.floatx()), None if self.c_score is None else self.c_score[id_train].astype(tf.keras.backend.floatx()), batch_size, self.X[id_train].astype(tf.keras.backend.floatx()), self.scale_factor[id_train].astype(tf.keras.backend.floatx())) self.test_dataset = train.warp_dataset(self.X_normalized[id_test], None if self.c_score is None else self.c_score[id_test].astype(tf.keras.backend.floatx()), batch_size, self.X[id_test].astype(tf.keras.backend.floatx()), self.scale_factor[id_test].astype(tf.keras.backend.floatx())) self.vae = train.pre_train( self.train_dataset, self.test_dataset, self.vae, learning_rate, L, alpha, num_epoch, num_step_per_epoch, early_stopping_patience, early_stopping_tolerance, 0) if path_to_weights is not None: self.save_model(path_to_weights)
Python
def train(self, stratify = False, test_size = 0.1, random_state: int = 0, learning_rate: float = 1e-3, batch_size: int = 32, L: int = 1, alpha: float = 0.10, beta: float = 1, num_epoch: int = 300, num_step_per_epoch: Optional[int] = None, early_stopping_patience: int = 5, early_stopping_tolerance: float = 1.0, early_stopping_warmup: int = 10, path_to_weights: Optional[str] = None, plot_every_num_epoch: Optional[int] = None, dimred: str = 'umap', **kwargs): '''Train the model. Parameters ---------- stratify : np.array, None, or False If an array is provided, or `stratify=None` and `self.labels` is available, then they will be used to perform stratified shuffle splitting. Otherwise, general shuffle splitting is used. Set to `False` if `self.labels` is not intented for stratified shuffle splitting. test_size : float or int, optional The proportion or size of the test set. random_state : int, optional The random state for data splitting. learning_rate : float, optional The initial learning rate for the Adam optimizer. batch_size : int, optional The batch size for training. L : int, optional The number of MC samples. alpha : float, optional The value of alpha in [0,1] to encourage covariate adjustment. Not used if there is no covariates. beta : float, optional The value of beta in beta-VAE. num_epoch : int, optional The number of epoch. num_step_per_epoch : int, optional The number of step per epoch, it will be inferred from number of cells and batch size if it is None. early_stopping_patience : int, optional The maximum number of epoches if there is no improvement. early_stopping_tolerance : float, optional The minimum change of loss to be considered as an improvement. early_stopping_warmup : int, optional The number of warmup epoches. path_to_weights : str, optional The path of weight file to be saved; not saving weight if None. plot_every_num_epoch : int, optional Plot the intermediate result every few epoches, or not plotting if it is None. dimred : str, optional The name of dimension reduction algorithms, can be 'umap', 'pca' and 'tsne'. Only used if 'plot_every_num_epoch' is not None. **kwargs : Extra key-value arguments for dimension reduction algorithms. ''' if stratify is None: stratify = self.labels[self.selected_cell_subset_id] elif stratify is False: stratify = None id_train, id_test = train_test_split( np.arange(len(self.selected_cell_subset_id)), test_size=test_size, stratify=stratify, random_state=random_state) if num_step_per_epoch is None: num_step_per_epoch = len(id_train)//batch_size+1 c = None if self.c_score is None else self.c_score[self.selected_cell_subset_id,:].astype(tf.keras.backend.floatx()) self.train_dataset = train.warp_dataset(self.X_normalized[self.selected_cell_subset_id,:][id_train].astype(tf.keras.backend.floatx()), None if c is None else c[id_train], batch_size, self.X[self.selected_cell_subset_id,:][id_train].astype(tf.keras.backend.floatx()), self.scale_factor[self.selected_cell_subset_id][id_train].astype(tf.keras.backend.floatx())) self.test_dataset = train.warp_dataset(self.X_normalized[self.selected_cell_subset_id,:][id_test].astype(tf.keras.backend.floatx()), None if c is None else c[id_test], batch_size, self.X[self.selected_cell_subset_id,:][id_test].astype(tf.keras.backend.floatx()), self.scale_factor[self.selected_cell_subset_id][id_test].astype(tf.keras.backend.floatx())) if plot_every_num_epoch is None: self.whole_dataset = None else: self.whole_dataset = train.warp_dataset(self.X_normalized[self.selected_cell_subset_id,:].astype(tf.keras.backend.floatx()), c, batch_size) self.vae = train.train( self.train_dataset, self.test_dataset, self.whole_dataset, self.vae, learning_rate, L, alpha, beta, num_epoch, num_step_per_epoch, early_stopping_patience, early_stopping_tolerance, early_stopping_warmup, self.labels[self.selected_cell_subset_id], plot_every_num_epoch, dimred, **kwargs ) if path_to_weights is not None: self.save_model(path_to_weights)
def train(self, stratify = False, test_size = 0.1, random_state: int = 0, learning_rate: float = 1e-3, batch_size: int = 32, L: int = 1, alpha: float = 0.10, beta: float = 1, num_epoch: int = 300, num_step_per_epoch: Optional[int] = None, early_stopping_patience: int = 5, early_stopping_tolerance: float = 1.0, early_stopping_warmup: int = 10, path_to_weights: Optional[str] = None, plot_every_num_epoch: Optional[int] = None, dimred: str = 'umap', **kwargs): '''Train the model. Parameters ---------- stratify : np.array, None, or False If an array is provided, or `stratify=None` and `self.labels` is available, then they will be used to perform stratified shuffle splitting. Otherwise, general shuffle splitting is used. Set to `False` if `self.labels` is not intented for stratified shuffle splitting. test_size : float or int, optional The proportion or size of the test set. random_state : int, optional The random state for data splitting. learning_rate : float, optional The initial learning rate for the Adam optimizer. batch_size : int, optional The batch size for training. L : int, optional The number of MC samples. alpha : float, optional The value of alpha in [0,1] to encourage covariate adjustment. Not used if there is no covariates. beta : float, optional The value of beta in beta-VAE. num_epoch : int, optional The number of epoch. num_step_per_epoch : int, optional The number of step per epoch, it will be inferred from number of cells and batch size if it is None. early_stopping_patience : int, optional The maximum number of epoches if there is no improvement. early_stopping_tolerance : float, optional The minimum change of loss to be considered as an improvement. early_stopping_warmup : int, optional The number of warmup epoches. path_to_weights : str, optional The path of weight file to be saved; not saving weight if None. plot_every_num_epoch : int, optional Plot the intermediate result every few epoches, or not plotting if it is None. dimred : str, optional The name of dimension reduction algorithms, can be 'umap', 'pca' and 'tsne'. Only used if 'plot_every_num_epoch' is not None. **kwargs : Extra key-value arguments for dimension reduction algorithms. ''' if stratify is None: stratify = self.labels[self.selected_cell_subset_id] elif stratify is False: stratify = None id_train, id_test = train_test_split( np.arange(len(self.selected_cell_subset_id)), test_size=test_size, stratify=stratify, random_state=random_state) if num_step_per_epoch is None: num_step_per_epoch = len(id_train)//batch_size+1 c = None if self.c_score is None else self.c_score[self.selected_cell_subset_id,:].astype(tf.keras.backend.floatx()) self.train_dataset = train.warp_dataset(self.X_normalized[self.selected_cell_subset_id,:][id_train].astype(tf.keras.backend.floatx()), None if c is None else c[id_train], batch_size, self.X[self.selected_cell_subset_id,:][id_train].astype(tf.keras.backend.floatx()), self.scale_factor[self.selected_cell_subset_id][id_train].astype(tf.keras.backend.floatx())) self.test_dataset = train.warp_dataset(self.X_normalized[self.selected_cell_subset_id,:][id_test].astype(tf.keras.backend.floatx()), None if c is None else c[id_test], batch_size, self.X[self.selected_cell_subset_id,:][id_test].astype(tf.keras.backend.floatx()), self.scale_factor[self.selected_cell_subset_id][id_test].astype(tf.keras.backend.floatx())) if plot_every_num_epoch is None: self.whole_dataset = None else: self.whole_dataset = train.warp_dataset(self.X_normalized[self.selected_cell_subset_id,:].astype(tf.keras.backend.floatx()), c, batch_size) self.vae = train.train( self.train_dataset, self.test_dataset, self.whole_dataset, self.vae, learning_rate, L, alpha, beta, num_epoch, num_step_per_epoch, early_stopping_patience, early_stopping_tolerance, early_stopping_warmup, self.labels[self.selected_cell_subset_id], plot_every_num_epoch, dimred, **kwargs ) if path_to_weights is not None: self.save_model(path_to_weights)
Python
def init_inference(self, batch_size: int = 32, L: int = 5, dimred: str = 'umap', refit_dimred: bool = True, **kwargs): '''Initialize trajectory inference by computing the posterior estimations. Parameters ---------- batch_size : int, optional The batch size when doing inference. L : int, optional The number of MC samples when doing inference. dimred : str, optional The name of dimension reduction algorithms, can be 'umap', 'pca' and 'tsne'. refit_dimred : boolean, optional Whether to refit the dimension reduction algorithm or not. **kwargs : Extra key-value arguments for dimension reduction algorithms. ''' c = None if self.c_score is None else self.c_score.astype(tf.keras.backend.floatx()) self.test_dataset = train.warp_dataset(self.X_normalized.astype(tf.keras.backend.floatx()), c, batch_size) self.pi, self.mu, self.pc_x,\ self.w_tilde,self.var_w_tilde,self.z = self.vae.inference(self.test_dataset, L=L) if refit_dimred or not hasattr(self.inferer, 'embed_z'): print('Computing embeddings.') self.embed_z = self.inferer.init_embedding(self.z, self.mu, dimred, **kwargs) else: self.embed_z = self.inferer.embed_z return None
def init_inference(self, batch_size: int = 32, L: int = 5, dimred: str = 'umap', refit_dimred: bool = True, **kwargs): '''Initialize trajectory inference by computing the posterior estimations. Parameters ---------- batch_size : int, optional The batch size when doing inference. L : int, optional The number of MC samples when doing inference. dimred : str, optional The name of dimension reduction algorithms, can be 'umap', 'pca' and 'tsne'. refit_dimred : boolean, optional Whether to refit the dimension reduction algorithm or not. **kwargs : Extra key-value arguments for dimension reduction algorithms. ''' c = None if self.c_score is None else self.c_score.astype(tf.keras.backend.floatx()) self.test_dataset = train.warp_dataset(self.X_normalized.astype(tf.keras.backend.floatx()), c, batch_size) self.pi, self.mu, self.pc_x,\ self.w_tilde,self.var_w_tilde,self.z = self.vae.inference(self.test_dataset, L=L) if refit_dimred or not hasattr(self.inferer, 'embed_z'): print('Computing embeddings.') self.embed_z = self.inferer.init_embedding(self.z, self.mu, dimred, **kwargs) else: self.embed_z = self.inferer.embed_z return None
Python
def select_root(self, days, method: str = 'sum'): '''Select the root vertex based on days information. Parameters ---------- day : np.array, optional The day information for selected cells used to determine the root vertex. The dtype should be 'int' or 'float'. method : str, optional 'sum' or 'mean'. For 'sum', the root is the one with maximal number of cells from the earliest day. For 'mean', the root is the one with earliest mean time among cells associated with it. Returns ---------- root : int The root vertex in the inferred trajectory based on given day information. ''' if days is not None and len(days)!=len(self.selected_cell_subset_id): raise ValueError("The length of day information ({}) is not " "consistent with the number of selected cells ({})!".format( len(days), len(self.selected_cell_subset_id))) if not hasattr(self.inferer, 'embed_z'): raise ValueError("Need to call 'init_inference' first!") estimated_cell_types = np.argmax(self.w_tilde, axis=-1) if method=='sum': root = np.argmax([np.sum(days[estimated_cell_types==i]==np.min(days)) for i in range(self.w_tilde.shape[-1])]) elif method=='mean': root = np.argmin([np.mean(days[estimated_cell_types==i]) for i in range(self.w_tilde.shape[-1])]) else: raise ValueError("Method can be either 'sum' or 'mean'!") return root
def select_root(self, days, method: str = 'sum'): '''Select the root vertex based on days information. Parameters ---------- day : np.array, optional The day information for selected cells used to determine the root vertex. The dtype should be 'int' or 'float'. method : str, optional 'sum' or 'mean'. For 'sum', the root is the one with maximal number of cells from the earliest day. For 'mean', the root is the one with earliest mean time among cells associated with it. Returns ---------- root : int The root vertex in the inferred trajectory based on given day information. ''' if days is not None and len(days)!=len(self.selected_cell_subset_id): raise ValueError("The length of day information ({}) is not " "consistent with the number of selected cells ({})!".format( len(days), len(self.selected_cell_subset_id))) if not hasattr(self.inferer, 'embed_z'): raise ValueError("Need to call 'init_inference' first!") estimated_cell_types = np.argmax(self.w_tilde, axis=-1) if method=='sum': root = np.argmax([np.sum(days[estimated_cell_types==i]==np.min(days)) for i in range(self.w_tilde.shape[-1])]) elif method=='mean': root = np.argmin([np.mean(days[estimated_cell_types==i]) for i in range(self.w_tilde.shape[-1])]) else: raise ValueError("Method can be either 'sum' or 'mean'!") return root
Python
def infer_trajectory(self, init_node: int, cutoff: Optional[float] = None, is_plot: bool = True, path: Optional[str] = None): '''Infer the trajectory. Parameters ---------- init_node : int The initial node for the inferred trajectory. cutoff : string, optional The threshold for filtering edges with scores less than cutoff. is_plot : boolean, optional Whether to plot or not. path : string, optional The path to save figure, or don't save if it is None. Returns ---------- modified_G : nx.Graph The modified graph that indicates the inferred trajectory. modified_w_tilde : np.array \([N,k]\) The modified \(\\tilde{w}\). pseudotime : np.array \([N,]\) The pseudotime based on projected trajectory. ''' self.modified_G, modified_w_tilde, pseudotime = self.inferer.infer_trajectory(init_node, self.label_names[self.selected_cell_subset_id], cutoff, path=path, is_plot=is_plot) if len(self.selected_cell_subset_id)<len(self.cell_names): self.modified_w_tilde = np.full(self.w_tilde.shape, np.nan) self.modified_w_tilde[self.selected_cell_subset_id,:] = modified_w_tilde self.pseudotime = np.full(len(self.cell_names), np.nan) self.pseudotime[self.selected_cell_subset_id] = pseudotime else: self.modified_w_tilde = modified_w_tilde self.pseudotime = pseudotime return self.modified_G, modified_w_tilde, pseudotime
def infer_trajectory(self, init_node: int, cutoff: Optional[float] = None, is_plot: bool = True, path: Optional[str] = None): '''Infer the trajectory. Parameters ---------- init_node : int The initial node for the inferred trajectory. cutoff : string, optional The threshold for filtering edges with scores less than cutoff. is_plot : boolean, optional Whether to plot or not. path : string, optional The path to save figure, or don't save if it is None. Returns ---------- modified_G : nx.Graph The modified graph that indicates the inferred trajectory. modified_w_tilde : np.array \([N,k]\) The modified \(\\tilde{w}\). pseudotime : np.array \([N,]\) The pseudotime based on projected trajectory. ''' self.modified_G, modified_w_tilde, pseudotime = self.inferer.infer_trajectory(init_node, self.label_names[self.selected_cell_subset_id], cutoff, path=path, is_plot=is_plot) if len(self.selected_cell_subset_id)<len(self.cell_names): self.modified_w_tilde = np.full(self.w_tilde.shape, np.nan) self.modified_w_tilde[self.selected_cell_subset_id,:] = modified_w_tilde self.pseudotime = np.full(len(self.cell_names), np.nan) self.pseudotime[self.selected_cell_subset_id] = pseudotime else: self.modified_w_tilde = modified_w_tilde self.pseudotime = pseudotime return self.modified_G, modified_w_tilde, pseudotime
Python
def plot_uncertainty(self, refit_dimred: bool = False, dimred: str = 'umap', path: Optional[str] =None, **kwargs): '''Plot uncertainty of all selected cells. Parameters ---------- refit_dimred : boolean, optional Whether to refit dimension reduction or use the existing embedding after inference. dimred : str, optional The name of dimension reduction algorithms, can be 'umap', 'pca' and 'tsne'. path : str, optional The path to save the figure, or not saving if it is None. **kwargs : Extra key-value arguments for dimension reduction algorithms. ''' if not hasattr(self, 'modified_w_tilde'): raise ReferenceError("The object 'modified_w_tilde' does not exist! Please infer a trajectory before calling this function.") uncertainty = np.sum((self.modified_w_tilde - self.w_tilde)**2, axis=-1) + np.sum(self.var_w_tilde, axis=-1) if not hasattr(self, 'embed_z') or refit_dimred: z = self.get_latent_z() embed_z = get_embedding(z, dimred, **kwargs) else: embed_z = self.embed_z return plot_uncertainty(uncertainty[self.selected_cell_subset_id], embed_z[self.selected_cell_subset_id,:], path)
def plot_uncertainty(self, refit_dimred: bool = False, dimred: str = 'umap', path: Optional[str] =None, **kwargs): '''Plot uncertainty of all selected cells. Parameters ---------- refit_dimred : boolean, optional Whether to refit dimension reduction or use the existing embedding after inference. dimred : str, optional The name of dimension reduction algorithms, can be 'umap', 'pca' and 'tsne'. path : str, optional The path to save the figure, or not saving if it is None. **kwargs : Extra key-value arguments for dimension reduction algorithms. ''' if not hasattr(self, 'modified_w_tilde'): raise ReferenceError("The object 'modified_w_tilde' does not exist! Please infer a trajectory before calling this function.") uncertainty = np.sum((self.modified_w_tilde - self.w_tilde)**2, axis=-1) + np.sum(self.var_w_tilde, axis=-1) if not hasattr(self, 'embed_z') or refit_dimred: z = self.get_latent_z() embed_z = get_embedding(z, dimred, **kwargs) else: embed_z = self.embed_z return plot_uncertainty(uncertainty[self.selected_cell_subset_id], embed_z[self.selected_cell_subset_id,:], path)
Python
def plot_marker_gene(self, gene_name: str, refit_dimred: bool = False, dimred: str = 'umap', path: Optional[str] =None, **kwargs): '''Plot expression of the given marker gene. Parameters ---------- gene_name : str The name of the marker gene. refit_dimred : boolean, optional Whether to refit dimension reduction or use the existing embedding after inference. dimred : str, optional The name of dimension reduction algorithms, can be 'umap', 'pca' and 'tsne'. path : str, optional The path to save the figure, or not saving if it is None. **kwargs : Extra key-value arguments for dimension reduction algorithms. ''' if gene_name not in self.gene_names: raise ValueError("Gene '{}' does not exist!".format(gene_name)) if self.expression is None: raise ReferenceError("The expression matrix does not exist!") expression = self.expression[self.selected_cell_subset_id,:][:,self.gene_names==gene_name].flatten() if not hasattr(self, 'embed_z') or refit_dimred: z = self.get_latent_z() embed_z = get_embedding(z, dimred, **kwargs) else: embed_z = self.embed_z return plot_marker_gene(expression, gene_name, embed_z[self.selected_cell_subset_id,:], path)
def plot_marker_gene(self, gene_name: str, refit_dimred: bool = False, dimred: str = 'umap', path: Optional[str] =None, **kwargs): '''Plot expression of the given marker gene. Parameters ---------- gene_name : str The name of the marker gene. refit_dimred : boolean, optional Whether to refit dimension reduction or use the existing embedding after inference. dimred : str, optional The name of dimension reduction algorithms, can be 'umap', 'pca' and 'tsne'. path : str, optional The path to save the figure, or not saving if it is None. **kwargs : Extra key-value arguments for dimension reduction algorithms. ''' if gene_name not in self.gene_names: raise ValueError("Gene '{}' does not exist!".format(gene_name)) if self.expression is None: raise ReferenceError("The expression matrix does not exist!") expression = self.expression[self.selected_cell_subset_id,:][:,self.gene_names==gene_name].flatten() if not hasattr(self, 'embed_z') or refit_dimred: z = self.get_latent_z() embed_z = get_embedding(z, dimred, **kwargs) else: embed_z = self.embed_z return plot_marker_gene(expression, gene_name, embed_z[self.selected_cell_subset_id,:], path)
Python
def pre_train(train_dataset, test_dataset, vae, learning_rate: float, L: int, alpha: float, num_epoch_pre: int, num_step_per_epoch: int, early_stopping_patience: int, early_stopping_tolerance: int, early_stopping_warmup: int): '''Pretraining. Parameters ---------- train_dataset : tf.Dataset The Tensorflow Dataset object. test_dataset : tf.Dataset The Tensorflow Dataset object. vae : VariationalAutoEncoder The model. learning_rate : float The initial learning rate for the Adam optimizer. L : int The number of MC samples. alpha : float, optional The value of alpha in [0,1] to encourage covariate adjustment. Not used if there is no covariates. num_epoch_pre : int The maximum number of epoches. num_step_per_epoch : int The number of step per epoch, it will be inferred from number of cells and batch size if it is None. early_stopping_patience : int The maximum number of epoches if there is no improvement. early_stopping_tolerance : float The minimum change of loss to be considered as an improvement. early_stopping_warmup : int, optional The number of warmup epoches. Returns ---------- vae : VariationalAutoEncoder The pretrained model. ''' optimizer = tf.keras.optimizers.Adam(learning_rate = learning_rate) loss_train = tf.keras.metrics.Mean() loss_test = tf.keras.metrics.Mean() early_stopping = Early_Stopping(patience=early_stopping_patience, tolerance=early_stopping_tolerance, warmup=early_stopping_warmup) for epoch in range(num_epoch_pre): progbar = Progbar(num_step_per_epoch) print('Pretrain - Start of epoch %d' % (epoch,)) # Iterate over the batches of the dataset. for step, (x_batch, x_norm_batch, c_score, x_scale_factor) in enumerate(train_dataset): with tf.GradientTape() as tape: losses = vae(x_norm_batch, c_score, x_batch, x_scale_factor, pre_train=True, L=L, alpha=alpha) # Compute reconstruction loss loss = tf.reduce_sum(losses[0]) grads = tape.gradient(loss, vae.trainable_weights, unconnected_gradients=tf.UnconnectedGradients.ZERO) optimizer.apply_gradients(zip(grads, vae.trainable_weights)) loss_train(loss) if (step+1)%10==0 or step+1==num_step_per_epoch: progbar.update(step+1, [('Reconstructed Loss', float(loss))]) for step, (x_batch, x_norm_batch, c_score, x_scale_factor) in enumerate(test_dataset): losses = vae(x_norm_batch, c_score, x_batch, x_scale_factor, pre_train=True, L=L, alpha=alpha) loss = tf.reduce_sum(losses[0]) loss_test(loss) print(' Training loss over epoch: %.4f. Testing loss over epoch: %.4f' % (float(loss_train.result()), float(loss_test.result()))) if early_stopping(float(loss_test.result())): print('Early stopping.') break loss_train.reset_states() loss_test.reset_states() print('Pretrain Done.') return vae
def pre_train(train_dataset, test_dataset, vae, learning_rate: float, L: int, alpha: float, num_epoch_pre: int, num_step_per_epoch: int, early_stopping_patience: int, early_stopping_tolerance: int, early_stopping_warmup: int): '''Pretraining. Parameters ---------- train_dataset : tf.Dataset The Tensorflow Dataset object. test_dataset : tf.Dataset The Tensorflow Dataset object. vae : VariationalAutoEncoder The model. learning_rate : float The initial learning rate for the Adam optimizer. L : int The number of MC samples. alpha : float, optional The value of alpha in [0,1] to encourage covariate adjustment. Not used if there is no covariates. num_epoch_pre : int The maximum number of epoches. num_step_per_epoch : int The number of step per epoch, it will be inferred from number of cells and batch size if it is None. early_stopping_patience : int The maximum number of epoches if there is no improvement. early_stopping_tolerance : float The minimum change of loss to be considered as an improvement. early_stopping_warmup : int, optional The number of warmup epoches. Returns ---------- vae : VariationalAutoEncoder The pretrained model. ''' optimizer = tf.keras.optimizers.Adam(learning_rate = learning_rate) loss_train = tf.keras.metrics.Mean() loss_test = tf.keras.metrics.Mean() early_stopping = Early_Stopping(patience=early_stopping_patience, tolerance=early_stopping_tolerance, warmup=early_stopping_warmup) for epoch in range(num_epoch_pre): progbar = Progbar(num_step_per_epoch) print('Pretrain - Start of epoch %d' % (epoch,)) # Iterate over the batches of the dataset. for step, (x_batch, x_norm_batch, c_score, x_scale_factor) in enumerate(train_dataset): with tf.GradientTape() as tape: losses = vae(x_norm_batch, c_score, x_batch, x_scale_factor, pre_train=True, L=L, alpha=alpha) # Compute reconstruction loss loss = tf.reduce_sum(losses[0]) grads = tape.gradient(loss, vae.trainable_weights, unconnected_gradients=tf.UnconnectedGradients.ZERO) optimizer.apply_gradients(zip(grads, vae.trainable_weights)) loss_train(loss) if (step+1)%10==0 or step+1==num_step_per_epoch: progbar.update(step+1, [('Reconstructed Loss', float(loss))]) for step, (x_batch, x_norm_batch, c_score, x_scale_factor) in enumerate(test_dataset): losses = vae(x_norm_batch, c_score, x_batch, x_scale_factor, pre_train=True, L=L, alpha=alpha) loss = tf.reduce_sum(losses[0]) loss_test(loss) print(' Training loss over epoch: %.4f. Testing loss over epoch: %.4f' % (float(loss_train.result()), float(loss_test.result()))) if early_stopping(float(loss_test.result())): print('Early stopping.') break loss_train.reset_states() loss_test.reset_states() print('Pretrain Done.') return vae
Python
def build_milestone_net(subgraph, init_node): ''' Args: subgraph - a connected component of the graph, csr_matrix init_node - root node Returns: df_subgraph - dataframe of milestone network ''' if len(subgraph)==1: warnings.warn('Singular node.') return [] else: # Dijkstra's Algorithm unvisited = {node: {'parent':None, 'score':np.inf, 'distance':np.inf} for node in subgraph.nodes} current = init_node currentScore = 0 currentDistance = 0 unvisited[current]['score'] = currentScore milestone_net = [] while True: for neighbour in subgraph.neighbors(current): if neighbour not in unvisited: continue newScore = currentScore + subgraph[current][neighbour]['weight'] if unvisited[neighbour]['score'] > newScore: unvisited[neighbour]['score'] = newScore unvisited[neighbour]['parent'] = current unvisited[neighbour]['distance'] = currentDistance+1 if len(unvisited)<len(subgraph): milestone_net.append([unvisited[current]['parent'], current, unvisited[current]['distance']]) del unvisited[current] if not unvisited: break current, currentScore, currentDistance = \ sorted([(i[0],i[1]['score'],i[1]['distance']) for i in unvisited.items()], key = lambda x: x[1])[0] return np.array(milestone_net)
def build_milestone_net(subgraph, init_node): ''' Args: subgraph - a connected component of the graph, csr_matrix init_node - root node Returns: df_subgraph - dataframe of milestone network ''' if len(subgraph)==1: warnings.warn('Singular node.') return [] else: # Dijkstra's Algorithm unvisited = {node: {'parent':None, 'score':np.inf, 'distance':np.inf} for node in subgraph.nodes} current = init_node currentScore = 0 currentDistance = 0 unvisited[current]['score'] = currentScore milestone_net = [] while True: for neighbour in subgraph.neighbors(current): if neighbour not in unvisited: continue newScore = currentScore + subgraph[current][neighbour]['weight'] if unvisited[neighbour]['score'] > newScore: unvisited[neighbour]['score'] = newScore unvisited[neighbour]['parent'] = current unvisited[neighbour]['distance'] = currentDistance+1 if len(unvisited)<len(subgraph): milestone_net.append([unvisited[current]['parent'], current, unvisited[current]['distance']]) del unvisited[current] if not unvisited: break current, currentScore, currentDistance = \ sorted([(i[0],i[1]['score'],i[1]['distance']) for i in unvisited.items()], key = lambda x: x[1])[0] return np.array(milestone_net)
Python
def load_summoner_list(self, guilds): """Create list variable for summoner list and guilds region Args: file (os.IO File): .summoner_list file guild_id (str): discord.Guild.id """ guild_id_list = [g["_id"] for g in self.guild.find()] self.user_list = {} for guild in guilds: if not guild.id in guild_id_list: self.guild.insert_one( {"_id": guild.id, "guild_name": guild.name, "region": "kr"} ) log("new db documents was inserted", guild) self.guild_region[guild.id] = self.guild.find_one({"_id": guild.id})[ "region" ] self.user_list[guild.id] = [ n["user_name"] for n in self.user.find({"guild_id": guild.id}) ]
def load_summoner_list(self, guilds): """Create list variable for summoner list and guilds region Args: file (os.IO File): .summoner_list file guild_id (str): discord.Guild.id """ guild_id_list = [g["_id"] for g in self.guild.find()] self.user_list = {} for guild in guilds: if not guild.id in guild_id_list: self.guild.insert_one( {"_id": guild.id, "guild_name": guild.name, "region": "kr"} ) log("new db documents was inserted", guild) self.guild_region[guild.id] = self.guild.find_one({"_id": guild.id})[ "region" ] self.user_list[guild.id] = [ n["user_name"] for n in self.user.find({"guild_id": guild.id}) ]
Python
def edit_summoner_list(self, guild, add, summonerName): """Operation that add or remove summonerName in summoner_list Args: guild_id (int): discord.Guild.id add (Bool): True is add, False is remove. summonerName (str): Summoner's name to add or remove. Returns: str: Operation result """ region = self.guild_region[guild.id] locale = self.get_locale(region) if add: url = "https://{}.api.riotgames.com/lol/summoner/v4/summoners/by-name/{}".format( self.guild_region[guild.id], summonerName ) response = requests.get(url, headers={"X-Riot-Token": self.riot_api_key}) if response.status_code != 200: if response.status_code == 404: return locale['invalid_summoner_name'] else: return locale['error'] if summonerName in self.user_list[guild.id]: return locale['exists_summoner_name'] else: try: self.user.insert_one( {"guild_id": guild.id, "user_name": summonerName} ) except Exception as err: logErr(err) return locale['db_error'] self.user_list[guild.id].append(summonerName) log("New summoner was added : {}".format(summonerName), guild) return locale['success_added'] elif not add: try: self.user_list[guild.id].remove(summonerName) except ValueError: return locale['summoner_not_in_list'] self.user.delete_one({"guild_id": guild.id, "user_name": summonerName}) log("Summoner was removed : {}".format(summonerName), guild) return locale['success_removed']
def edit_summoner_list(self, guild, add, summonerName): """Operation that add or remove summonerName in summoner_list Args: guild_id (int): discord.Guild.id add (Bool): True is add, False is remove. summonerName (str): Summoner's name to add or remove. Returns: str: Operation result """ region = self.guild_region[guild.id] locale = self.get_locale(region) if add: url = "https://{}.api.riotgames.com/lol/summoner/v4/summoners/by-name/{}".format( self.guild_region[guild.id], summonerName ) response = requests.get(url, headers={"X-Riot-Token": self.riot_api_key}) if response.status_code != 200: if response.status_code == 404: return locale['invalid_summoner_name'] else: return locale['error'] if summonerName in self.user_list[guild.id]: return locale['exists_summoner_name'] else: try: self.user.insert_one( {"guild_id": guild.id, "user_name": summonerName} ) except Exception as err: logErr(err) return locale['db_error'] self.user_list[guild.id].append(summonerName) log("New summoner was added : {}".format(summonerName), guild) return locale['success_added'] elif not add: try: self.user_list[guild.id].remove(summonerName) except ValueError: return locale['summoner_not_in_list'] self.user.delete_one({"guild_id": guild.id, "user_name": summonerName}) log("Summoner was removed : {}".format(summonerName), guild) return locale['success_removed']
Python
async def load_live_match_data(self, guild, match, lt=True): """Call Riot API to receive live_match information. Args: guild (Guild()): discord.Guild match: response.json() Returns: dict: live_match data """ region = self.guild_region[guild.id] locale = self.get_locale(region) url = "https://{}.api.riotgames.com/lol/spectator/v4/active-games/by-summoner/{}".format( region, match ) response = requests.get(url, headers={"X-Riot-Token": self.riot_api_key}) if response.status_code != 200: return match = response.json() if lt: try: self.live_game_id[guild.id] except KeyError: self.live_game_id[guild.id] = [] # When match was already in self.live_game_id if match["gameId"] in self.live_game_id[guild.id]: return else: response = self.is_match_ended(guild, match["gameId"]) # If the match was ended if response.status_code != 404: return self.live_game_id[guild.id].append(match["gameId"]) log("New live match was added : {}".format(match["gameId"]), guild) log("Current tracking match list : {}".format(str(self.live_game_id[guild.id])), guild) maps = self.locale_maps['na1'] if not region in self.locale_maps.keys() else self.locale_maps[region] queues = self.locale_queues['na1'] if not region in self.locale_queues.keys() else self.locale_queues[region] data = {} data['match_data'] = wrapper.get_match_data(match, queues, maps) data['participants'] = wrapper.get_participants(match, self.static_champ_list, self.static_spell_list) try: await self.get_participants_data(data, region) except AssertionError as ex: logErr("AssertionError : {}".format(ex)) return locale['error'] df = pd.DataFrame(data['participants']) print(df) return wrapper.draw_image(self.latest, data, locale, self.font_name)
async def load_live_match_data(self, guild, match, lt=True): """Call Riot API to receive live_match information. Args: guild (Guild()): discord.Guild match: response.json() Returns: dict: live_match data """ region = self.guild_region[guild.id] locale = self.get_locale(region) url = "https://{}.api.riotgames.com/lol/spectator/v4/active-games/by-summoner/{}".format( region, match ) response = requests.get(url, headers={"X-Riot-Token": self.riot_api_key}) if response.status_code != 200: return match = response.json() if lt: try: self.live_game_id[guild.id] except KeyError: self.live_game_id[guild.id] = [] # When match was already in self.live_game_id if match["gameId"] in self.live_game_id[guild.id]: return else: response = self.is_match_ended(guild, match["gameId"]) # If the match was ended if response.status_code != 404: return self.live_game_id[guild.id].append(match["gameId"]) log("New live match was added : {}".format(match["gameId"]), guild) log("Current tracking match list : {}".format(str(self.live_game_id[guild.id])), guild) maps = self.locale_maps['na1'] if not region in self.locale_maps.keys() else self.locale_maps[region] queues = self.locale_queues['na1'] if not region in self.locale_queues.keys() else self.locale_queues[region] data = {} data['match_data'] = wrapper.get_match_data(match, queues, maps) data['participants'] = wrapper.get_participants(match, self.static_champ_list, self.static_spell_list) try: await self.get_participants_data(data, region) except AssertionError as ex: logErr("AssertionError : {}".format(ex)) return locale['error'] df = pd.DataFrame(data['participants']) print(df) return wrapper.draw_image(self.latest, data, locale, self.font_name)
Python
async def hangman(self, ctx, guess: str = None): """Play a game of hangman against the bot!""" if guess is None: if self.the_data[ctx.guild]["running"]: await ctx.send( "Game of hangman is already running!\nEnter your guess!" ) await self._printgame(ctx.channel) else: await ctx.send("Starting a game of hangman!") self._startgame(ctx.guild) await self._printgame(ctx.channel) elif not self.the_data[ctx.guild]["running"]: await ctx.send( "Game of hangman is not yet running!\nStarting a game of hangman!" ) self._startgame(ctx.guild) await self._printgame(ctx.channel) else: await ctx.send("Guess by reacting to the message")
async def hangman(self, ctx, guess: str = None): """Play a game of hangman against the bot!""" if guess is None: if self.the_data[ctx.guild]["running"]: await ctx.send( "Game of hangman is already running!\nEnter your guess!" ) await self._printgame(ctx.channel) else: await ctx.send("Starting a game of hangman!") self._startgame(ctx.guild) await self._printgame(ctx.channel) elif not self.the_data[ctx.guild]["running"]: await ctx.send( "Game of hangman is not yet running!\nStarting a game of hangman!" ) self._startgame(ctx.guild) await self._printgame(ctx.channel) else: await ctx.send("Guess by reacting to the message")
Python
def _startgame(self, guild): """Starts a new game of hangman""" self.the_data[guild]["answer"] = self._getphrase().upper() self.the_data[guild]["hangman"] = 0 self.the_data[guild]["guesses"] = [] self.winbool[guild] = False self.the_data[guild]["running"] = True self.the_data[guild]["trackmessage"] = False
def _startgame(self, guild): """Starts a new game of hangman""" self.the_data[guild]["answer"] = self._getphrase().upper() self.the_data[guild]["hangman"] = 0 self.the_data[guild]["guesses"] = [] self.winbool[guild] = False self.the_data[guild]["running"] = True self.the_data[guild]["trackmessage"] = False
Python
def _getphrase(self): """Get a new phrase for the game and returns it""" with open(self.answer_path, "r") as phrasefile: phrases = phrasefile.readlines() outphrase = "" while outphrase == "": outphrase = phrases[randint(0, len(phrases) - 1)].partition(" (")[0] return outphrase
def _getphrase(self): """Get a new phrase for the game and returns it""" with open(self.answer_path, "r") as phrasefile: phrases = phrasefile.readlines() outphrase = "" while outphrase == "": outphrase = phrases[randint(0, len(phrases) - 1)].partition(" (")[0] return outphrase
Python
async def _guessletter(self, guess, message): """Checks the guess on a letter and prints game if acceptable guess""" channel = message.channel if guess.upper() not in "ABCDEFGHIJKLMNOPQRSTUVWXYZ" or len(guess) != 1: await channel.send("Invalid guess. Only A-Z is accepted") return if guess.upper() in self.the_data[channel.guild]["guesses"]: await channel.send("Already guessed that! Try again") return if guess.upper() not in self.the_data[channel.guild]["answer"]: self.the_data[channel.guild]["hangman"] += 1 self.the_data[channel.guild]["guesses"].append(guess.upper()) await self._reprintgame(message)
async def _guessletter(self, guess, message): """Checks the guess on a letter and prints game if acceptable guess""" channel = message.channel if guess.upper() not in "ABCDEFGHIJKLMNOPQRSTUVWXYZ" or len(guess) != 1: await channel.send("Invalid guess. Only A-Z is accepted") return if guess.upper() in self.the_data[channel.guild]["guesses"]: await channel.send("Already guessed that! Try again") return if guess.upper() not in self.the_data[channel.guild]["answer"]: self.the_data[channel.guild]["hangman"] += 1 self.the_data[channel.guild]["guesses"].append(guess.upper()) await self._reprintgame(message)
Python
async def _printgame(self, channel): """Print the current state of game""" if channel.guild not in self.hanglist: await self._update_hanglist() c_say = await self._make_say(channel.guild) message = await channel.send(c_say) self.the_data[channel.guild]["trackmessage"] = message.id await self._reactmessage_menu(message) await self._checkdone(channel)
async def _printgame(self, channel): """Print the current state of game""" if channel.guild not in self.hanglist: await self._update_hanglist() c_say = await self._make_say(channel.guild) message = await channel.send(c_say) self.the_data[channel.guild]["trackmessage"] = message.id await self._reactmessage_menu(message) await self._checkdone(channel)
Python
async def covidnews(self, ctx, countrycode: str): """Covid News from a Country - County must be 2-letter ISO 3166-1 code. Check luci covidcountries for a list of all possible country codes supported.""" self.params["country"] = countrycode async with ctx.typing(): data = await self.get(self.newsapi, params = self.params) if data.get("failed") is not None: return await ctx.send(data.get("failed")) if data["totalResults"] == 0: await ctx.send("No results found, ensure you're looking up the correct country code.") await ctx.send("Check luci covidcountries for a list.") return await GenericMenu(source = ArticleFormat(data["articles"]), ctx = ctx,).start( ctx = ctx, wait = False, )
async def covidnews(self, ctx, countrycode: str): """Covid News from a Country - County must be 2-letter ISO 3166-1 code. Check luci covidcountries for a list of all possible country codes supported.""" self.params["country"] = countrycode async with ctx.typing(): data = await self.get(self.newsapi, params = self.params) if data.get("failed") is not None: return await ctx.send(data.get("failed")) if data["totalResults"] == 0: await ctx.send("No results found, ensure you're looking up the correct country code.") await ctx.send("Check luci covidcountries for a list.") return await GenericMenu(source = ArticleFormat(data["articles"]), ctx = ctx,).start( ctx = ctx, wait = False, )
Python
async def covid(self, ctx, *, country: typing.Optional[str]): """Stats about Covid-19 or countries if provided. Supports multiple countries seperated by a comma. Example: luci covid Ireland, England """ if not country: async with ctx.typing(): data = await self.get(self.api + "/all") if isinstance(data, dict) and data.get("failed") is not None: return await ctx.send(data.get("failed")) if not data: return await ctx.send("No data available.") embed = discord.Embed( color = 0xf34949, title= "Covid-19 Global Statistics", timestamp = datetime.datetime.utcfromtimestamp(data["updated"] / 1000), ) embed.add_field(name = "Cases", value = self.humanize_number(data["cases"])) embed.add_field(name = "Deaths", value = self.humanize_number(data["deaths"])) embed.add_field(name = "Recovered", value = self.humanize_number(data["recovered"])) embed.add_field(name = "Critical", value = self.humanize_number(data["critical"])) embed.add_field(name = "Active", value = self.humanize_number(data["active"])) embed.add_field( name = "Affected Countries", value = self.humanize_number(data["affectedCountries"]) ) embed.add_field(name = "Cases Today", value = self.humanize_number(data["todayCases"])) embed.add_field(name = "Deaths Today", value = self.humanize_number(data["todayDeaths"])) embed.add_field(name = "Recovered Today", value = self.humanize_number(data["todayRecovered"])) embed.add_field(name = "Total Tests", value = self.humanize_number(data["tests"])) await ctx.send(embed = embed) else: async with ctx.typing(): data = await self.get(self.api + "/countries/{}".format(country)) if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) data = [data] if not data: return await ctx.send("No data available.") await GenericMenu(source = CovidMenu(data), ctx = ctx, type= "Today").start( ctx = ctx, wait = False, )
async def covid(self, ctx, *, country: typing.Optional[str]): """Stats about Covid-19 or countries if provided. Supports multiple countries seperated by a comma. Example: luci covid Ireland, England """ if not country: async with ctx.typing(): data = await self.get(self.api + "/all") if isinstance(data, dict) and data.get("failed") is not None: return await ctx.send(data.get("failed")) if not data: return await ctx.send("No data available.") embed = discord.Embed( color = 0xf34949, title= "Covid-19 Global Statistics", timestamp = datetime.datetime.utcfromtimestamp(data["updated"] / 1000), ) embed.add_field(name = "Cases", value = self.humanize_number(data["cases"])) embed.add_field(name = "Deaths", value = self.humanize_number(data["deaths"])) embed.add_field(name = "Recovered", value = self.humanize_number(data["recovered"])) embed.add_field(name = "Critical", value = self.humanize_number(data["critical"])) embed.add_field(name = "Active", value = self.humanize_number(data["active"])) embed.add_field( name = "Affected Countries", value = self.humanize_number(data["affectedCountries"]) ) embed.add_field(name = "Cases Today", value = self.humanize_number(data["todayCases"])) embed.add_field(name = "Deaths Today", value = self.humanize_number(data["todayDeaths"])) embed.add_field(name = "Recovered Today", value = self.humanize_number(data["todayRecovered"])) embed.add_field(name = "Total Tests", value = self.humanize_number(data["tests"])) await ctx.send(embed = embed) else: async with ctx.typing(): data = await self.get(self.api + "/countries/{}".format(country)) if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) data = [data] if not data: return await ctx.send("No data available.") await GenericMenu(source = CovidMenu(data), ctx = ctx, type= "Today").start( ctx = ctx, wait = False, )
Python
async def yesterday(self, ctx, *, country: str): """Show the statistics from yesterday for countries. Supports multiple countries seperated by a comma. Example: luci covid yesterday Ireland, England """ async with ctx.typing(): data = await self.get(self.api + "/countries/{}?yesterday = 1".format(country)) if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) data = [data] if not data: return await ctx.send("No data available.") await GenericMenu(source = CovidMenu(data), ctx = ctx, type= "Yesterday").start( ctx = ctx, wait = False, )
async def yesterday(self, ctx, *, country: str): """Show the statistics from yesterday for countries. Supports multiple countries seperated by a comma. Example: luci covid yesterday Ireland, England """ async with ctx.typing(): data = await self.get(self.api + "/countries/{}?yesterday = 1".format(country)) if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) data = [data] if not data: return await ctx.send("No data available.") await GenericMenu(source = CovidMenu(data), ctx = ctx, type= "Yesterday").start( ctx = ctx, wait = False, )
Python
async def todaycases(self, ctx): """Show the highest cases from countries today.""" async with ctx.typing(): data = await self.get(self.api + "/countries?sort = todayCases") if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) if not data: return await ctx.send("No data available.") embed = discord.Embed( color = 0xf34949, title= "Covid-19 | Highest Cases Today | {}".format(data[0]["country"]), timestamp = datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000), ) embed.add_field(name = "Cases", value = self.humanize_number(data[0]["cases"])) embed.add_field(name = "Deaths", value = self.humanize_number(data[0]["deaths"])) embed.add_field(name = "Recovered", value = self.humanize_number(data[0]["recovered"])) embed.add_field(name = "Cases Today", value = self.humanize_number(data[0]["todayCases"])) embed.add_field(name = "Deaths Today", value = self.humanize_number(data[0]["todayDeaths"])) embed.add_field(name = "Critical Condition", value = self.humanize_number(data[0]["critical"])) await ctx.send(embed = embed)
async def todaycases(self, ctx): """Show the highest cases from countries today.""" async with ctx.typing(): data = await self.get(self.api + "/countries?sort = todayCases") if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) if not data: return await ctx.send("No data available.") embed = discord.Embed( color = 0xf34949, title= "Covid-19 | Highest Cases Today | {}".format(data[0]["country"]), timestamp = datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000), ) embed.add_field(name = "Cases", value = self.humanize_number(data[0]["cases"])) embed.add_field(name = "Deaths", value = self.humanize_number(data[0]["deaths"])) embed.add_field(name = "Recovered", value = self.humanize_number(data[0]["recovered"])) embed.add_field(name = "Cases Today", value = self.humanize_number(data[0]["todayCases"])) embed.add_field(name = "Deaths Today", value = self.humanize_number(data[0]["todayDeaths"])) embed.add_field(name = "Critical Condition", value = self.humanize_number(data[0]["critical"])) await ctx.send(embed = embed)
Python
async def todaydeaths(self, ctx): """Show the highest deaths from countries today.""" async with ctx.typing(): data = await self.get(self.api + "/countries?sort = todayDeaths") if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) if not data: return await ctx.send("No data available.") embed = discord.Embed( color = 0xf34949, title= "Covid-19 | Highest Deaths Today | {}".format(data[0]["country"]), timestamp = datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000), ) embed.add_field(name = "Cases", value = self.humanize_number(data[0]["cases"])) embed.add_field(name = "Deaths", value = self.humanize_number(data[0]["deaths"])) embed.add_field(name = "Recovered", value = self.humanize_number(data[0]["recovered"])) embed.add_field(name = "Cases Today", value = self.humanize_number(data[0]["todayCases"])) embed.add_field(name = "Deaths Today", value = self.humanize_number(data[0]["todayDeaths"])) embed.add_field(name = "Critical Condition", value = self.humanize_number(data[0]["critical"])) await ctx.send(embed = embed)
async def todaydeaths(self, ctx): """Show the highest deaths from countries today.""" async with ctx.typing(): data = await self.get(self.api + "/countries?sort = todayDeaths") if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) if not data: return await ctx.send("No data available.") embed = discord.Embed( color = 0xf34949, title= "Covid-19 | Highest Deaths Today | {}".format(data[0]["country"]), timestamp = datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000), ) embed.add_field(name = "Cases", value = self.humanize_number(data[0]["cases"])) embed.add_field(name = "Deaths", value = self.humanize_number(data[0]["deaths"])) embed.add_field(name = "Recovered", value = self.humanize_number(data[0]["recovered"])) embed.add_field(name = "Cases Today", value = self.humanize_number(data[0]["todayCases"])) embed.add_field(name = "Deaths Today", value = self.humanize_number(data[0]["todayDeaths"])) embed.add_field(name = "Critical Condition", value = self.humanize_number(data[0]["critical"])) await ctx.send(embed = embed)
Python
async def highestcases(self, ctx): """Show the highest cases from countries overall.""" async with ctx.typing(): data = await self.get(self.api + "/countries?sort = cases") if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) if not data: return await ctx.send("No data available.") embed = discord.Embed( color = 0xf34949, title= "Covid-19 | Highest Cases Overall | {}".format(data[0]["country"]), timestamp = datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000), ) embed.add_field(name = "Cases", value = self.humanize_number(data[0]["cases"])) embed.add_field(name = "Deaths", value = self.humanize_number(data[0]["deaths"])) embed.add_field(name = "Recovered", value = self.humanize_number(data[0]["recovered"])) embed.add_field(name = "Cases Today", value = self.humanize_number(data[0]["todayCases"])) embed.add_field(name = "Deaths Today", value = self.humanize_number(data[0]["todayDeaths"])) embed.add_field(name = "Critical Condition", value = self.humanize_number(data[0]["critical"])) await ctx.send(embed = embed)
async def highestcases(self, ctx): """Show the highest cases from countries overall.""" async with ctx.typing(): data = await self.get(self.api + "/countries?sort = cases") if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) if not data: return await ctx.send("No data available.") embed = discord.Embed( color = 0xf34949, title= "Covid-19 | Highest Cases Overall | {}".format(data[0]["country"]), timestamp = datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000), ) embed.add_field(name = "Cases", value = self.humanize_number(data[0]["cases"])) embed.add_field(name = "Deaths", value = self.humanize_number(data[0]["deaths"])) embed.add_field(name = "Recovered", value = self.humanize_number(data[0]["recovered"])) embed.add_field(name = "Cases Today", value = self.humanize_number(data[0]["todayCases"])) embed.add_field(name = "Deaths Today", value = self.humanize_number(data[0]["todayDeaths"])) embed.add_field(name = "Critical Condition", value = self.humanize_number(data[0]["critical"])) await ctx.send(embed = embed)
Python
async def highestdeaths(self, ctx): """Show the highest deaths from countries overall.""" async with ctx.typing(): data = await self.get(self.api + "/countries?sort = deaths") if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) if not data: return await ctx.send("No data available.") embed = discord.Embed( color = 0xf34949, title= "Covid-19 | Highest Deaths Overall | {}".format(data[0]["country"]), timestamp = datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000), ) embed.add_field(name = "Cases", value = self.humanize_number(data[0]["cases"])) embed.add_field(name = "Deaths", value = self.humanize_number(data[0]["deaths"])) embed.add_field(name = "Recovered", value = self.humanize_number(data[0]["recovered"])) embed.add_field(name = "Cases Today", value = self.humanize_number(data[0]["todayCases"])) embed.add_field(name = "Deaths Today", value = self.humanize_number(data[0]["todayDeaths"])) embed.add_field(name = "Critical Condition", value = self.humanize_number(data[0]["critical"])) await ctx.send(embed = embed)
async def highestdeaths(self, ctx): """Show the highest deaths from countries overall.""" async with ctx.typing(): data = await self.get(self.api + "/countries?sort = deaths") if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) if not data: return await ctx.send("No data available.") embed = discord.Embed( color = 0xf34949, title= "Covid-19 | Highest Deaths Overall | {}".format(data[0]["country"]), timestamp = datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000), ) embed.add_field(name = "Cases", value = self.humanize_number(data[0]["cases"])) embed.add_field(name = "Deaths", value = self.humanize_number(data[0]["deaths"])) embed.add_field(name = "Recovered", value = self.humanize_number(data[0]["recovered"])) embed.add_field(name = "Cases Today", value = self.humanize_number(data[0]["todayCases"])) embed.add_field(name = "Deaths Today", value = self.humanize_number(data[0]["todayDeaths"])) embed.add_field(name = "Critical Condition", value = self.humanize_number(data[0]["critical"])) await ctx.send(embed = embed)
Python
async def topcases(self, ctx, amount: int = 6): """Show X countries with top amount of cases. Defaults to 6. """ if amount > 20 or amount < 0: return await ctx.send("Invalid amount. Please choose between an amount between 1-20.") async with ctx.typing(): data = await self.get(self.api + "/countries?sort = cases") if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) if not data: return await ctx.send("No data available.") embed = discord.Embed( color = 0xf34949, title= "Covid-19 | Top {} Cases ".format(amount), timestamp = datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000), ) for i in range(amount): cases = self.humanize_number(data[i]["cases"]) deaths = self.humanize_number(data[i]["deaths"]) recovered = self.humanize_number(data[i]["recovered"]) todayCases = self.humanize_number(data[i]["todayCases"]) todayDeaths = self.humanize_number(data[i]["todayDeaths"]) critical = self.humanize_number(data[i]["critical"]) msg = str('**Cases**: {}\n**Deaths**: {}\n**Recovered**: {}\n**Cases Today**: {}' + '\n**Deaths Today**: {}\n**Critical**: {}').format(cases, deaths, recovered, todayCases, todayDeaths, critical) embed.add_field(name = data[i]["country"], value = msg) await ctx.send(embed = embed)
async def topcases(self, ctx, amount: int = 6): """Show X countries with top amount of cases. Defaults to 6. """ if amount > 20 or amount < 0: return await ctx.send("Invalid amount. Please choose between an amount between 1-20.") async with ctx.typing(): data = await self.get(self.api + "/countries?sort = cases") if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) if not data: return await ctx.send("No data available.") embed = discord.Embed( color = 0xf34949, title= "Covid-19 | Top {} Cases ".format(amount), timestamp = datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000), ) for i in range(amount): cases = self.humanize_number(data[i]["cases"]) deaths = self.humanize_number(data[i]["deaths"]) recovered = self.humanize_number(data[i]["recovered"]) todayCases = self.humanize_number(data[i]["todayCases"]) todayDeaths = self.humanize_number(data[i]["todayDeaths"]) critical = self.humanize_number(data[i]["critical"]) msg = str('**Cases**: {}\n**Deaths**: {}\n**Recovered**: {}\n**Cases Today**: {}' + '\n**Deaths Today**: {}\n**Critical**: {}').format(cases, deaths, recovered, todayCases, todayDeaths, critical) embed.add_field(name = data[i]["country"], value = msg) await ctx.send(embed = embed)
Python
async def topcasestoday(self, ctx, amount: int = 6): """Show X countries with top amount of cases today. Defaults to 6. """ if amount > 20 or amount < 0: return await ctx.send("Invalid amount. Please choose between an amount between 1-20.") async with ctx.typing(): data = await self.get(self.api + "/countries?sort = todayCases") if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) if not data: return await ctx.send("No data available.") embed = discord.Embed( color = 0xf34949, title= "Covid-19 | Top {} Cases Today ".format(amount), timestamp = datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000), ) for i in range(amount): cases = self.humanize_number(data[i]["cases"]) deaths = self.humanize_number(data[i]["deaths"]) recovered = self.humanize_number(data[i]["recovered"]) todayCases = self.humanize_number(data[i]["todayCases"]) todayDeaths = self.humanize_number(data[i]["todayDeaths"]) critical = self.humanize_number(data[i]["critical"]) msg = str('**Cases**: {}\n**Deaths**: {}\n**Recovered**: {}\n**Cases Today**: {}' + '\n**Deaths Today**: {}\n**Critical**: {}').format(cases, deaths, recovered, todayCases, todayDeaths, critical) embed.add_field(name = data[i]["country"], value = msg) await ctx.send(embed = embed)
async def topcasestoday(self, ctx, amount: int = 6): """Show X countries with top amount of cases today. Defaults to 6. """ if amount > 20 or amount < 0: return await ctx.send("Invalid amount. Please choose between an amount between 1-20.") async with ctx.typing(): data = await self.get(self.api + "/countries?sort = todayCases") if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) if not data: return await ctx.send("No data available.") embed = discord.Embed( color = 0xf34949, title= "Covid-19 | Top {} Cases Today ".format(amount), timestamp = datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000), ) for i in range(amount): cases = self.humanize_number(data[i]["cases"]) deaths = self.humanize_number(data[i]["deaths"]) recovered = self.humanize_number(data[i]["recovered"]) todayCases = self.humanize_number(data[i]["todayCases"]) todayDeaths = self.humanize_number(data[i]["todayDeaths"]) critical = self.humanize_number(data[i]["critical"]) msg = str('**Cases**: {}\n**Deaths**: {}\n**Recovered**: {}\n**Cases Today**: {}' + '\n**Deaths Today**: {}\n**Critical**: {}').format(cases, deaths, recovered, todayCases, todayDeaths, critical) embed.add_field(name = data[i]["country"], value = msg) await ctx.send(embed = embed)
Python
async def topdeaths(self, ctx, amount: int = 6): """Show X countries with top amount of deaths. Defaults to 6. """ if amount > 20 or amount < 0: return await ctx.send("Invalid amount. Please choose between an amount between 1-20.") async with ctx.typing(): data = await self.get(self.api + "/countries?sort = deaths") if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) if not data: return await ctx.send("No data available.") embed = discord.Embed( color = 0xf34949, title= "Covid-19 | Top {} Deaths ".format(amount), timestamp = datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000), ) for i in range(amount): cases = self.humanize_number(data[i]["cases"]) deaths = self.humanize_number(data[i]["deaths"]) recovered = self.humanize_number(data[i]["recovered"]) todayCases = self.humanize_number(data[i]["todayCases"]) todayDeaths = self.humanize_number(data[i]["todayDeaths"]) critical = self.humanize_number(data[i]["critical"]) msg = str('**Cases**: {}\n**Deaths**: {}\n**Recovered**: {}\n**Cases Today**: {}' + '\n**Deaths Today**: {}\n**Critical**: {}').format(cases, deaths, recovered, todayCases, todayDeaths, critical) embed.add_field(name = data[i]["country"], value = msg) await ctx.send(embed = embed)
async def topdeaths(self, ctx, amount: int = 6): """Show X countries with top amount of deaths. Defaults to 6. """ if amount > 20 or amount < 0: return await ctx.send("Invalid amount. Please choose between an amount between 1-20.") async with ctx.typing(): data = await self.get(self.api + "/countries?sort = deaths") if isinstance(data, dict): error = data.get("failed") if error is not None: return await ctx.send(error) if not data: return await ctx.send("No data available.") embed = discord.Embed( color = 0xf34949, title= "Covid-19 | Top {} Deaths ".format(amount), timestamp = datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000), ) for i in range(amount): cases = self.humanize_number(data[i]["cases"]) deaths = self.humanize_number(data[i]["deaths"]) recovered = self.humanize_number(data[i]["recovered"]) todayCases = self.humanize_number(data[i]["todayCases"]) todayDeaths = self.humanize_number(data[i]["todayDeaths"]) critical = self.humanize_number(data[i]["critical"]) msg = str('**Cases**: {}\n**Deaths**: {}\n**Recovered**: {}\n**Cases Today**: {}' + '\n**Deaths Today**: {}\n**Critical**: {}').format(cases, deaths, recovered, todayCases, todayDeaths, critical) embed.add_field(name = data[i]["country"], value = msg) await ctx.send(embed = embed)