language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def add_todo(delay_time: str, task: str, start_time: datetime = NOW) -> str: """ Add a todo list item in the future with a delay time. Parse out the time unit from the passed in delay_time str: - 30d = 30 days - 1h 10m = 1 hour and 10 min - 5m 3s = 5 min and 3 seconds - 45 or 45s = 45 seconds Return the task and planned time which is calculated from provided start_time (here default = NOW): >>> add_todo("1h 10m", "Wash my car") >>> "Wash my car @ 2019-02-06 23:10:00" """ future_time = start_time for word in delay_time.split(): if word[-1] == 'd': future_time += timedelta(days=int(word[:-1])) elif word[-1] == 'h': future_time += timedelta(hours=int(word[:-1])) elif word[-1] == 'm': future_time += timedelta(minutes=int(word[:-1])) elif word[-1] == 's': future_time += timedelta(seconds=int(word[:-1])) else: future_time += timedelta(seconds=int(word)) return f'{task} @ {future_time}' pass
def add_todo(delay_time: str, task: str, start_time: datetime = NOW) -> str: """ Add a todo list item in the future with a delay time. Parse out the time unit from the passed in delay_time str: - 30d = 30 days - 1h 10m = 1 hour and 10 min - 5m 3s = 5 min and 3 seconds - 45 or 45s = 45 seconds Return the task and planned time which is calculated from provided start_time (here default = NOW): >>> add_todo("1h 10m", "Wash my car") >>> "Wash my car @ 2019-02-06 23:10:00" """ future_time = start_time for word in delay_time.split(): if word[-1] == 'd': future_time += timedelta(days=int(word[:-1])) elif word[-1] == 'h': future_time += timedelta(hours=int(word[:-1])) elif word[-1] == 'm': future_time += timedelta(minutes=int(word[:-1])) elif word[-1] == 's': future_time += timedelta(seconds=int(word[:-1])) else: future_time += timedelta(seconds=int(word)) return f'{task} @ {future_time}' pass
Python
def running_mean(sequence): """Calculate the running mean of the sequence passed in, returns a sequence of same length with the averages. You can assume all items in sequence are numeric.""" for i,j in enumerate(itertools.accumulate(sequence)): yield round(j/(i+1),2) pass
def running_mean(sequence): """Calculate the running mean of the sequence passed in, returns a sequence of same length with the averages. You can assume all items in sequence are numeric.""" for i,j in enumerate(itertools.accumulate(sequence)): yield round(j/(i+1),2) pass
Python
def hex2rgb(hexcolor): """Class method that converts a hex value into an rgb one""" if type(hexcolor) != str: raise ValueError if hexcolor[0] != "#": raise ValueError if len(hexcolor) != 7: raise ValueError allowable_chars = chars = set('0123456789abcdef') if not any((c in allowable_chars) for c in hexcolor[1:]): raise ValueError def convert_to_dec(string): string = string[1:] dec_mapping = dict(zip('a b c d e f'.split(),[10,11,12,13,14,15])) interim_list = [] for hex_digit in string: try: interim_list.append(int(hex_digit[0])) except: interim_list.append(dec_mapping[hex_digit[0]]) split_string = [interim_list[i:i+2] for i in range(0,6,2)] return_list = [] for code in split_string: return_list.append(int(code[0])*16 + int(code[1])) return tuple(return_list) return convert_to_dec(hexcolor) pass
def hex2rgb(hexcolor): """Class method that converts a hex value into an rgb one""" if type(hexcolor) != str: raise ValueError if hexcolor[0] != "#": raise ValueError if len(hexcolor) != 7: raise ValueError allowable_chars = chars = set('0123456789abcdef') if not any((c in allowable_chars) for c in hexcolor[1:]): raise ValueError def convert_to_dec(string): string = string[1:] dec_mapping = dict(zip('a b c d e f'.split(),[10,11,12,13,14,15])) interim_list = [] for hex_digit in string: try: interim_list.append(int(hex_digit[0])) except: interim_list.append(dec_mapping[hex_digit[0]]) split_string = [interim_list[i:i+2] for i in range(0,6,2)] return_list = [] for code in split_string: return_list.append(int(code[0])*16 + int(code[1])) return tuple(return_list) return convert_to_dec(hexcolor) pass
Python
def rgb2hex(rgbcode): """Class method that converts an rgb value into a hex one""" if type(rgbcode) != tuple: raise ValueError if len(rgbcode) != 3: raise ValueError if not all(type(i) == int for i in rgbcode): raise ValueError if not all(0 <= i <= 255 for i in rgbcode): raise ValueError def convert_to_hex(number): first_digit = int(number/16) second_digit = number%16 hex_mapping = dict(zip(range(10,16), 'a b c d e f'.split())) return_str = str() for number in (first_digit, second_digit): if number > 9: return_str += hex_mapping[number] else: return_str += str(number) return return_str final_string = '#' for number in rgbcode: final_string += convert_to_hex(number) return final_string pass
def rgb2hex(rgbcode): """Class method that converts an rgb value into a hex one""" if type(rgbcode) != tuple: raise ValueError if len(rgbcode) != 3: raise ValueError if not all(type(i) == int for i in rgbcode): raise ValueError if not all(0 <= i <= 255 for i in rgbcode): raise ValueError def convert_to_hex(number): first_digit = int(number/16) second_digit = number%16 hex_mapping = dict(zip(range(10,16), 'a b c d e f'.split())) return_str = str() for number in (first_digit, second_digit): if number > 9: return_str += hex_mapping[number] else: return_str += str(number) return return_str final_string = '#' for number in rgbcode: final_string += convert_to_hex(number) return final_string pass
Python
def years_gold_value_decreased(gold_prices: str = gold_prices) -> (int, int): """Analyze gold_prices returning a tuple of the year the gold price decreased the most and the year the gold price increased the most. """ split_prices = gold_prices.split() years, prices = [], [] for line in split_prices: yr, prc = line.split(',') years.append(yr[:-3]) prices.append(float(prc)) data = pd.DataFrame({'Years': years, 'Prices': prices}) data['Last Year Prices'] = data['Prices'].shift(1) data['Delta'] = data['Prices'] - data['Last Year Prices'] max_year = int(data.loc[data['Delta'] == data['Delta'].max(), 'Years'].values[0]) min_year = int(data.loc[data['Delta'] == data['Delta'].min(), 'Years'].values[0]) return (min_year, max_year) pass
def years_gold_value_decreased(gold_prices: str = gold_prices) -> (int, int): """Analyze gold_prices returning a tuple of the year the gold price decreased the most and the year the gold price increased the most. """ split_prices = gold_prices.split() years, prices = [], [] for line in split_prices: yr, prc = line.split(',') years.append(yr[:-3]) prices.append(float(prc)) data = pd.DataFrame({'Years': years, 'Prices': prices}) data['Last Year Prices'] = data['Prices'].shift(1) data['Delta'] = data['Prices'] - data['Last Year Prices'] max_year = int(data.loc[data['Delta'] == data['Delta'].max(), 'Years'].values[0]) min_year = int(data.loc[data['Delta'] == data['Delta'].min(), 'Years'].values[0]) return (min_year, max_year) pass
Python
def username(self): """A username consists of the first char of the user's first_name and the first 7 chars of the user's last_name, both lowercased. If this is your first property, check out: https://pybit.es/property-decorator.html """ # TODO 2: you code return self.first_name.lower()[0] + self.last_name.lower()[:7] pass
def username(self): """A username consists of the first char of the user's first_name and the first 7 chars of the user's last_name, both lowercased. If this is your first property, check out: https://pybit.es/property-decorator.html """ # TODO 2: you code return self.first_name.lower()[0] + self.last_name.lower()[:7] pass
Python
def on_input(self, note, correct): ''' calls the appropriate on hit or on miss function let notes that belong in a chord slide ''' if self.status == "call": #don't score during call status return if not correct: self.on_miss_input(note)
def on_input(self, note, correct): ''' calls the appropriate on hit or on miss function let notes that belong in a chord slide ''' if self.status == "call": #don't score during call status return if not correct: self.on_miss_input(note)
Python
def on_hit(self, noteVals): ''' Called if the note detector finds a match with target gem's chord. This checks if the hit was within the slack window (if necessary), and updates the gem record and score appropriately ''' if self.status == "call": return if self._temporal_hit(): print('past temp hit') if self.scoreCard: self.scoreCard.on_chord_hit( self.barNum, self.targetGem.copy_gem(), noteVals) self.targetGem = None allHit = True for gem in self.ticker.active_gems: allHit = allHit and gem.hit if len(self.ticker.active_gems) == 1: #we just finished a round self.increment_bar()
def on_hit(self, noteVals): ''' Called if the note detector finds a match with target gem's chord. This checks if the hit was within the slack window (if necessary), and updates the gem record and score appropriately ''' if self.status == "call": return if self._temporal_hit(): print('past temp hit') if self.scoreCard: self.scoreCard.on_chord_hit( self.barNum, self.targetGem.copy_gem(), noteVals) self.targetGem = None allHit = True for gem in self.ticker.active_gems: allHit = allHit and gem.hit if len(self.ticker.active_gems) == 1: #we just finished a round self.increment_bar()
Python
def on_miss_input(self, note): ''' Called if note detector finds a note not currently in target gem. Updates score record appropriately ''' if self.targetGem: self.scoreCard.on_chord_miss(self.barNum, self.targetGem.copy_gem(), note) else: self.scoreCard.on_misc_miss(self.barNum, note) pass
def on_miss_input(self, note): ''' Called if note detector finds a note not currently in target gem. Updates score record appropriately ''' if self.targetGem: self.scoreCard.on_chord_miss(self.barNum, self.targetGem.copy_gem(), note) else: self.scoreCard.on_misc_miss(self.barNum, note) pass
Python
def patternReader(fileInput): ''' Input takes one call (4 bars) per line. Entries will be parse as #chord, #beat return an Array of tuples ''' output = [] with open(fileInput, 'r') as inputFile: for line in inputFile.readlines(): tokens = line.strip().split(',') tokens = list(map(int, tokens)) pattern = list(zip(tokens[0::2], tokens[1::2])) output.append(pattern) return output
def patternReader(fileInput): ''' Input takes one call (4 bars) per line. Entries will be parse as #chord, #beat return an Array of tuples ''' output = [] with open(fileInput, 'r') as inputFile: for line in inputFile.readlines(): tokens = line.strip().split(',') tokens = list(map(int, tokens)) pattern = list(zip(tokens[0::2], tokens[1::2])) output.append(pattern) return output
Python
def handle_exceptions( generic_message='An error has occurred', status_code=500, error_handler=None): """ Generate a middleware that logs unexpected exceptions and returns a JSON response. Exceptions of type HTTPException won't be handled as they should be expected exceptions. Args: generic_message: The message that will be send as an error status_code: The HTTP status code (default = 500) error_handler: Callable(request, exception) to be executed when an exception occurs """ @web.middleware async def middleware(request, handler): try: response = await handler(request) return response except web.HTTPException: raise except Exception as ex: message = str(ex) if error_handler: error_handler(request, ex) logging.exception('Error: %s', message) return web.json_response( {'error': generic_message}, status=status_code ) return middleware
def handle_exceptions( generic_message='An error has occurred', status_code=500, error_handler=None): """ Generate a middleware that logs unexpected exceptions and returns a JSON response. Exceptions of type HTTPException won't be handled as they should be expected exceptions. Args: generic_message: The message that will be send as an error status_code: The HTTP status code (default = 500) error_handler: Callable(request, exception) to be executed when an exception occurs """ @web.middleware async def middleware(request, handler): try: response = await handler(request) return response except web.HTTPException: raise except Exception as ex: message = str(ex) if error_handler: error_handler(request, ex) logging.exception('Error: %s', message) return web.json_response( {'error': generic_message}, status=status_code ) return middleware
Python
def handle_invalid_json( generic_message='Invalid JSON data', status_code=400, methods=None, exclude=None): """ Generate a middleware that validates JSON data from the request and returns a 400 error if it is invalid. Args: generic_message: The message that will be send as an error status_code: The HTTP status code (default = 400) methods: a set of methods (default: POST, PATCH, PUT) exclude: a set of endpoints to be excluded """ if not methods: methods = {'POST', 'PATCH', 'PUT'} if exclude is None: exclude = {} @web.middleware async def middleware(request, handler): if request.method in methods and request.path not in exclude: try: await request.json() except JSONDecodeError: logging.exception('Invalid JSON') return web.json_response( {'error': generic_message}, status=status_code ) response = await handler(request) return response return middleware
def handle_invalid_json( generic_message='Invalid JSON data', status_code=400, methods=None, exclude=None): """ Generate a middleware that validates JSON data from the request and returns a 400 error if it is invalid. Args: generic_message: The message that will be send as an error status_code: The HTTP status code (default = 400) methods: a set of methods (default: POST, PATCH, PUT) exclude: a set of endpoints to be excluded """ if not methods: methods = {'POST', 'PATCH', 'PUT'} if exclude is None: exclude = {} @web.middleware async def middleware(request, handler): if request.method in methods and request.path not in exclude: try: await request.json() except JSONDecodeError: logging.exception('Invalid JSON') return web.json_response( {'error': generic_message}, status=status_code ) response = await handler(request) return response return middleware
Python
def update_context(self, inputs): ''' append single transition to the current context ''' o, a, r, no, d, info = inputs if self.sparse_rewards: r = info['sparse_reward'] o = ptu.from_numpy(o[None, None, ...]) a = ptu.from_numpy(a[None, None, ...]) r = ptu.from_numpy(np.array([r])[None, None, ...]) no = ptu.from_numpy(no[None, None, ...]) if self.use_next_obs_in_context: data = torch.cat([o, a, r, no], dim=2) else: data = torch.cat([o, a, r], dim=2) if self.context is None: self.context = data else: self.context = torch.cat([self.context, data], dim=1)
def update_context(self, inputs): ''' append single transition to the current context ''' o, a, r, no, d, info = inputs if self.sparse_rewards: r = info['sparse_reward'] o = ptu.from_numpy(o[None, None, ...]) a = ptu.from_numpy(a[None, None, ...]) r = ptu.from_numpy(np.array([r])[None, None, ...]) no = ptu.from_numpy(no[None, None, ...]) if self.use_next_obs_in_context: data = torch.cat([o, a, r, no], dim=2) else: data = torch.cat([o, a, r], dim=2) if self.context is None: self.context = data else: self.context = torch.cat([self.context, data], dim=1)
Python
def infer_posterior(self, context): ''' compute q(z|c) as a function of input context and sample new z from it''' params = self.context_encoder(context) params = params.view(context.size(0), -1, self.context_encoder.output_size) # with probabilistic z, predict mean and variance of q(z | c) if self.use_ib: mu = params[..., :self.latent_dim] sigma_squared = F.softplus(params[..., self.latent_dim:]) # Encoder could give negative values, but sigma must be positive z_params = [_product_of_gaussians(m, s) for m, s in zip(torch.unbind(mu), torch.unbind(sigma_squared))] self.z_means = torch.stack([p[0] for p in z_params]) self.z_vars = torch.stack([p[1] for p in z_params]) # sum rather than product of gaussians structure else: self.z_means = torch.mean(params, dim=1) self.sample_z()
def infer_posterior(self, context): ''' compute q(z|c) as a function of input context and sample new z from it''' params = self.context_encoder(context) params = params.view(context.size(0), -1, self.context_encoder.output_size) # with probabilistic z, predict mean and variance of q(z | c) if self.use_ib: mu = params[..., :self.latent_dim] sigma_squared = F.softplus(params[..., self.latent_dim:]) # Encoder could give negative values, but sigma must be positive z_params = [_product_of_gaussians(m, s) for m, s in zip(torch.unbind(mu), torch.unbind(sigma_squared))] self.z_means = torch.stack([p[0] for p in z_params]) self.z_vars = torch.stack([p[1] for p in z_params]) # sum rather than product of gaussians structure else: self.z_means = torch.mean(params, dim=1) self.sample_z()
Python
def forward(self, obs, context): ''' given context, get statistics under the current policy of a set of observations ''' self.infer_posterior(context) self.sample_z() task_z = self.z t, b, _ = obs.size() obs = obs.view(t * b, -1) task_z = [z.repeat(b, 1) for z in task_z] task_z = torch.cat(task_z, dim=0) # run policy, get log probs and new actions in_ = torch.cat([obs, task_z.detach()], dim=1) policy_outputs = self.policy(in_, reparameterize=True, return_log_prob=True) return policy_outputs, task_z
def forward(self, obs, context): ''' given context, get statistics under the current policy of a set of observations ''' self.infer_posterior(context) self.sample_z() task_z = self.z t, b, _ = obs.size() obs = obs.view(t * b, -1) task_z = [z.repeat(b, 1) for z in task_z] task_z = torch.cat(task_z, dim=0) # run policy, get log probs and new actions in_ = torch.cat([obs, task_z.detach()], dim=1) policy_outputs = self.policy(in_, reparameterize=True, return_log_prob=True) return policy_outputs, task_z
Python
def rollout(env, agent, max_path_length=np.inf, accum_context=True, animated=False, save_frames=False, plotting=False, online=0, buffer_size=1): """ The following value for the following keys will be a 2D array, with the first dimension corresponding to the time dimension. - observations - actions - rewards - next_observations - terminals The next two elements will be lists of dictionaries, with the index into the list being the index into the time - agent_infos - env_infos :param env: :param agent: :param max_path_length: :param accum_context: if True, accumulate the collected context :param animated: :param save_frames: if True, save video of rollout :return: """ observations = [] task_indicators = [] actions = [] rewards = [] terminals = [] agent_infos = [] env_infos = [] if plotting: zs = np.zeros((1, agent.latent_dim*2)) o = env.reset() next_o = None path_length = 0 if animated: env.render() while path_length < max_path_length: out = agent.get_action(o) a, agent_info = out[0] task_indicator = out[1] next_o, r, d, env_info = env.step(a) # update the agent's current context if accum_context: agent.update_context([o, a, r, next_o, d, env_info]) if online == 1: # Variant 1: context is last N elements cont = agent.get_last_n_context_elements(buffer_size) agent.infer_posterior(cont) elif online == 2: # Variant 2: task assignment check agent.do_task_assignment(buffer_size) else: pass if plotting: zs = np.append(zs, np.concatenate((agent.z_means.detach().cpu().numpy(), agent.z_vars.detach().cpu().numpy()), axis=1), axis=0) observations.append(o) task_indicators.append(task_indicator) rewards.append(r) terminals.append(d) actions.append(a) agent_infos.append(agent_info) path_length += 1 if d: break o = next_o if animated: env.render() if save_frames: from PIL import Image image = Image.fromarray(np.flipud(env.get_image(width=512, height=512))) env_info['frame'] = image env_infos.append(env_info) next_task_indicator = agent.get_action(next_o)[1] actions = np.array(actions) if len(actions.shape) == 1: actions = np.expand_dims(actions, 1) observations = np.array(observations) task_indicators = np.array(task_indicators) if len(observations.shape) == 1: observations = np.expand_dims(observations, 1) task_indicators. np.expand_dim(task_indicators, 1) next_o = np.array([next_o]) next_task_indicator = np.array([next_task_indicator]) next_observations = np.vstack( ( observations[1:, :], np.expand_dims(next_o, 0) ) ) next_task_indicators = np.vstack( ( task_indicators[1:, :], np.expand_dims(next_task_indicator, 0) ) ) if plotting: import matplotlib.pyplot as plt plt.figure() zs= zs[1:-1,:] for i in range(zs.shape[1]): plt.plot(list(range(zs.shape[0])), zs[:,i], label="z"+str(i)) plt.plot(list(range(len(rewards))), rewards, label="reward") plt.legend() #plt.savefig("a.png", dpi=300, format="png") plt.show() if online == 2: #other stuff plt.figure() plt.plot(list(range(len(agent.best_matching_tasks))), agent.best_matching_tasks, label="best_matching_tasks") plt.plot(list(range(len(agent.task_numbers))), agent.task_numbers, label="#tasks") plt.legend() #plt.savefig("b.png", dpi=300, format="png") plt.show() return dict( observations=observations, task_indicators=task_indicators, actions=actions, rewards=np.array(rewards).reshape(-1, 1), next_observations=next_observations, next_task_indicators=next_task_indicators, terminals=np.array(terminals).reshape(-1, 1), agent_infos=agent_infos, env_infos=env_infos, )
def rollout(env, agent, max_path_length=np.inf, accum_context=True, animated=False, save_frames=False, plotting=False, online=0, buffer_size=1): """ The following value for the following keys will be a 2D array, with the first dimension corresponding to the time dimension. - observations - actions - rewards - next_observations - terminals The next two elements will be lists of dictionaries, with the index into the list being the index into the time - agent_infos - env_infos :param env: :param agent: :param max_path_length: :param accum_context: if True, accumulate the collected context :param animated: :param save_frames: if True, save video of rollout :return: """ observations = [] task_indicators = [] actions = [] rewards = [] terminals = [] agent_infos = [] env_infos = [] if plotting: zs = np.zeros((1, agent.latent_dim*2)) o = env.reset() next_o = None path_length = 0 if animated: env.render() while path_length < max_path_length: out = agent.get_action(o) a, agent_info = out[0] task_indicator = out[1] next_o, r, d, env_info = env.step(a) # update the agent's current context if accum_context: agent.update_context([o, a, r, next_o, d, env_info]) if online == 1: # Variant 1: context is last N elements cont = agent.get_last_n_context_elements(buffer_size) agent.infer_posterior(cont) elif online == 2: # Variant 2: task assignment check agent.do_task_assignment(buffer_size) else: pass if plotting: zs = np.append(zs, np.concatenate((agent.z_means.detach().cpu().numpy(), agent.z_vars.detach().cpu().numpy()), axis=1), axis=0) observations.append(o) task_indicators.append(task_indicator) rewards.append(r) terminals.append(d) actions.append(a) agent_infos.append(agent_info) path_length += 1 if d: break o = next_o if animated: env.render() if save_frames: from PIL import Image image = Image.fromarray(np.flipud(env.get_image(width=512, height=512))) env_info['frame'] = image env_infos.append(env_info) next_task_indicator = agent.get_action(next_o)[1] actions = np.array(actions) if len(actions.shape) == 1: actions = np.expand_dims(actions, 1) observations = np.array(observations) task_indicators = np.array(task_indicators) if len(observations.shape) == 1: observations = np.expand_dims(observations, 1) task_indicators. np.expand_dim(task_indicators, 1) next_o = np.array([next_o]) next_task_indicator = np.array([next_task_indicator]) next_observations = np.vstack( ( observations[1:, :], np.expand_dims(next_o, 0) ) ) next_task_indicators = np.vstack( ( task_indicators[1:, :], np.expand_dims(next_task_indicator, 0) ) ) if plotting: import matplotlib.pyplot as plt plt.figure() zs= zs[1:-1,:] for i in range(zs.shape[1]): plt.plot(list(range(zs.shape[0])), zs[:,i], label="z"+str(i)) plt.plot(list(range(len(rewards))), rewards, label="reward") plt.legend() #plt.savefig("a.png", dpi=300, format="png") plt.show() if online == 2: #other stuff plt.figure() plt.plot(list(range(len(agent.best_matching_tasks))), agent.best_matching_tasks, label="best_matching_tasks") plt.plot(list(range(len(agent.task_numbers))), agent.task_numbers, label="#tasks") plt.legend() #plt.savefig("b.png", dpi=300, format="png") plt.show() return dict( observations=observations, task_indicators=task_indicators, actions=actions, rewards=np.array(rewards).reshape(-1, 1), next_observations=next_observations, next_task_indicators=next_task_indicators, terminals=np.array(terminals).reshape(-1, 1), agent_infos=agent_infos, env_infos=env_infos, )
Python
def prior_pz(self, y): ''' As proposed in the CURL paper: use linear layer, that conditioned on y gives Gaussian parameters ''' one_hot = ptu.zeros(self.batch_size, self.num_classes) one_hot[:, y] = 1 mu_sigma = self.prior_pz_layer(one_hot).detach() # we do not want to backprop into prior #for debug return torch.distributions.normal.Normal(ptu.ones(self.batch_size, 1) * y, ptu.ones(self.batch_size, 1) * 0.5) if self.encoding_neglect_z: mu_sigma = ptu.ones(self.batch_size, self.latent_dim * 2) mu_sigma[:, 0] = mu_sigma[:, 0] * y mu_sigma[:, 1] = mu_sigma[:, 1] * 0.01 return generate_gaussian(mu_sigma, self.latent_dim, sigma_ops=None) # mu_sigma = ptu.ones(self.batch_size, self.latent_dim * 2) # mu_sigma[:, 0] = mu_sigma[:, 0] * y - 0.5 # mu_sigma[:, 1] = mu_sigma[:, 1] * 0.1 else: return generate_gaussian(mu_sigma, self.latent_dim)
def prior_pz(self, y): ''' As proposed in the CURL paper: use linear layer, that conditioned on y gives Gaussian parameters ''' one_hot = ptu.zeros(self.batch_size, self.num_classes) one_hot[:, y] = 1 mu_sigma = self.prior_pz_layer(one_hot).detach() # we do not want to backprop into prior #for debug return torch.distributions.normal.Normal(ptu.ones(self.batch_size, 1) * y, ptu.ones(self.batch_size, 1) * 0.5) if self.encoding_neglect_z: mu_sigma = ptu.ones(self.batch_size, self.latent_dim * 2) mu_sigma[:, 0] = mu_sigma[:, 0] * y mu_sigma[:, 1] = mu_sigma[:, 1] * 0.01 return generate_gaussian(mu_sigma, self.latent_dim, sigma_ops=None) # mu_sigma = ptu.ones(self.batch_size, self.latent_dim * 2) # mu_sigma[:, 0] = mu_sigma[:, 0] * y - 0.5 # mu_sigma[:, 1] = mu_sigma[:, 1] * 0.1 else: return generate_gaussian(mu_sigma, self.latent_dim)
Python
def obtain_samples(self, task, train_test, deterministic=False, max_samples=np.inf, max_trajs=np.inf, animated=False, save_frames=False): """ Obtains samples in the environment until either we reach either max_samples transitions or num_traj trajectories. """ assert max_samples < np.inf or max_trajs < np.inf, "either max_samples or max_trajs must be finite" paths = [] n_steps_total = 0 n_trajs = 0 while n_steps_total < max_samples and n_trajs < max_trajs: self.env.reset_task(task) self.env.set_meta_mode(train_test) path = self.rollout(deterministic=deterministic, max_path_length=self.max_path_length if max_samples - n_steps_total > self.max_path_length else max_samples - n_steps_total, animated=animated, save_frames=save_frames) paths.append(path) n_steps_total += len(path['observations']) n_trajs += 1 return paths, n_steps_total
def obtain_samples(self, task, train_test, deterministic=False, max_samples=np.inf, max_trajs=np.inf, animated=False, save_frames=False): """ Obtains samples in the environment until either we reach either max_samples transitions or num_traj trajectories. """ assert max_samples < np.inf or max_trajs < np.inf, "either max_samples or max_trajs must be finite" paths = [] n_steps_total = 0 n_trajs = 0 while n_steps_total < max_samples and n_trajs < max_trajs: self.env.reset_task(task) self.env.set_meta_mode(train_test) path = self.rollout(deterministic=deterministic, max_path_length=self.max_path_length if max_samples - n_steps_total > self.max_path_length else max_samples - n_steps_total, animated=animated, save_frames=save_frames) paths.append(path) n_steps_total += len(path['observations']) n_trajs += 1 return paths, n_steps_total
Python
def prior_pz(self, y): ''' As proposed in the CURL paper: use linear layer, that conditioned on y gives Gaussian parameters OR Gaussian with N(y, 0.5) IF z not used: Just give back y with 0.01 variance ''' if self.isIndividualY: if self.prior_mode == 'fixedOnY': return torch.distributions.normal.Normal(ptu.ones(self.batch_size, self.timesteps, 1) * y, ptu.ones(self.batch_size, self.timesteps, 1) * self.prior_sigma) elif self.prior_mode == 'network': one_hot = ptu.zeros(self.batch_size, self.timesteps, self.num_classes) one_hot[:, :, y] = 1 mu_sigma = self.prior_pz_layer(one_hot)#.detach() # we do not want to backprop into prior return generate_gaussian(mu_sigma, self.latent_dim) else: if self.prior_mode == 'fixedOnY': return torch.distributions.normal.Normal(ptu.ones(self.batch_size, self.latent_dim) * y, ptu.ones(self.batch_size, self.latent_dim) * self.prior_sigma) elif self.prior_mode == 'network': one_hot = ptu.zeros(self.batch_size, self.num_classes) one_hot[:, y] = 1 mu_sigma = self.prior_pz_layer(one_hot)#.detach() # we do not want to backprop into prior return generate_gaussian(mu_sigma, self.latent_dim)
def prior_pz(self, y): ''' As proposed in the CURL paper: use linear layer, that conditioned on y gives Gaussian parameters OR Gaussian with N(y, 0.5) IF z not used: Just give back y with 0.01 variance ''' if self.isIndividualY: if self.prior_mode == 'fixedOnY': return torch.distributions.normal.Normal(ptu.ones(self.batch_size, self.timesteps, 1) * y, ptu.ones(self.batch_size, self.timesteps, 1) * self.prior_sigma) elif self.prior_mode == 'network': one_hot = ptu.zeros(self.batch_size, self.timesteps, self.num_classes) one_hot[:, :, y] = 1 mu_sigma = self.prior_pz_layer(one_hot)#.detach() # we do not want to backprop into prior return generate_gaussian(mu_sigma, self.latent_dim) else: if self.prior_mode == 'fixedOnY': return torch.distributions.normal.Normal(ptu.ones(self.batch_size, self.latent_dim) * y, ptu.ones(self.batch_size, self.latent_dim) * self.prior_sigma) elif self.prior_mode == 'network': one_hot = ptu.zeros(self.batch_size, self.num_classes) one_hot[:, y] = 1 mu_sigma = self.prior_pz_layer(one_hot)#.detach() # we do not want to backprop into prior return generate_gaussian(mu_sigma, self.latent_dim)
Python
def add_experiment_to_database(name, algo, env, date, description, algo_version, env_version, progress_name=progress_file, variant_name=variant_file): ''' Add experiment data from different algorithms to database Following attributes are saved: name algo env date description param_log n_timesteps time trainAverageReturn testAverageReturn ''' database = load_json(database_file) # read from progress file columns = [] with open(progress_name, 'rU') as f: reader = csv.reader(f) for row in reader: if columns: for i, value in enumerate(row): columns[i].append(value) else: # first row columns = [[value] for value in row] # you now have a column-major 2D array of your file. progress_dict = {c[0]: c[1:] for c in columns} # find out next id if list(database.keys()) == []: new_id = 0 else: new_id = str(max([int(x) for x in list(database.keys())]) + 1) database[new_id] = {} database[new_id]["name"] = name database[new_id]["algo"] = algo database[new_id]["env"] = env database[new_id]["env_version"] = env_version database[new_id]["date"] = date database[new_id]["description"] = description if algo == "r2l": database[new_id]["algo_version"] = "0" database[new_id]["param_log"] = load_json("params.json") database[new_id]["n_timesteps"] = [float(i) for i in progress_dict["n_timesteps"]] database[new_id]["time"] = [float(i) for i in progress_dict["Time"]] database[new_id]["trainAverageReturn"] = [float(i) for i in progress_dict["train-AverageReturn"]] database[new_id]["testAverageReturn"] = [float(i) for i in progress_dict["train-AverageReturn"]] elif algo == "pearl": database[new_id]["algo_version"] = algo_version database[new_id]["param_log"] = load_json(variant_name) database[new_id]["n_timesteps"] = [float(i) for i in progress_dict["Number of env steps total"]] database[new_id]["time"] = [float(i) for i in progress_dict["Total Train Time (s)"]] database[new_id]["trainAverageReturn"] = [float(i) for i in progress_dict["AverageReturn_all_train_tasks"]] database[new_id]["testAverageReturn"] = [float(i) for i in progress_dict["AverageReturn_all_test_tasks"]] elif algo == "cemrl": database[new_id]["algo_version"] = algo_version database[new_id]["param_log"] = load_json(variant_name) database[new_id]["n_timesteps"] = [float(i) for i in progress_dict["n_env_steps_total"]] database[new_id]["time"] = [float(i) for i in progress_dict["time_total"]] database[new_id]["trainAverageReturn"] = [float(i) for i in progress_dict["train_eval_avg_reward_deterministic"]] database[new_id]["testAverageReturn"] = [float(i) for i in progress_dict["test_eval_avg_reward_deterministic"]] if "train_eval_success_rate" in progress_dict: database[new_id]["trainSuccessRate"] = [float(i) for i in progress_dict["train_eval_success_rate"]] if "test_eval_success_rate" in progress_dict: database[new_id]["testSuccessRate"] = [float(i) for i in progress_dict["test_eval_success_rate"]] dump_json(database_file, database) return new_id
def add_experiment_to_database(name, algo, env, date, description, algo_version, env_version, progress_name=progress_file, variant_name=variant_file): ''' Add experiment data from different algorithms to database Following attributes are saved: name algo env date description param_log n_timesteps time trainAverageReturn testAverageReturn ''' database = load_json(database_file) # read from progress file columns = [] with open(progress_name, 'rU') as f: reader = csv.reader(f) for row in reader: if columns: for i, value in enumerate(row): columns[i].append(value) else: # first row columns = [[value] for value in row] # you now have a column-major 2D array of your file. progress_dict = {c[0]: c[1:] for c in columns} # find out next id if list(database.keys()) == []: new_id = 0 else: new_id = str(max([int(x) for x in list(database.keys())]) + 1) database[new_id] = {} database[new_id]["name"] = name database[new_id]["algo"] = algo database[new_id]["env"] = env database[new_id]["env_version"] = env_version database[new_id]["date"] = date database[new_id]["description"] = description if algo == "r2l": database[new_id]["algo_version"] = "0" database[new_id]["param_log"] = load_json("params.json") database[new_id]["n_timesteps"] = [float(i) for i in progress_dict["n_timesteps"]] database[new_id]["time"] = [float(i) for i in progress_dict["Time"]] database[new_id]["trainAverageReturn"] = [float(i) for i in progress_dict["train-AverageReturn"]] database[new_id]["testAverageReturn"] = [float(i) for i in progress_dict["train-AverageReturn"]] elif algo == "pearl": database[new_id]["algo_version"] = algo_version database[new_id]["param_log"] = load_json(variant_name) database[new_id]["n_timesteps"] = [float(i) for i in progress_dict["Number of env steps total"]] database[new_id]["time"] = [float(i) for i in progress_dict["Total Train Time (s)"]] database[new_id]["trainAverageReturn"] = [float(i) for i in progress_dict["AverageReturn_all_train_tasks"]] database[new_id]["testAverageReturn"] = [float(i) for i in progress_dict["AverageReturn_all_test_tasks"]] elif algo == "cemrl": database[new_id]["algo_version"] = algo_version database[new_id]["param_log"] = load_json(variant_name) database[new_id]["n_timesteps"] = [float(i) for i in progress_dict["n_env_steps_total"]] database[new_id]["time"] = [float(i) for i in progress_dict["time_total"]] database[new_id]["trainAverageReturn"] = [float(i) for i in progress_dict["train_eval_avg_reward_deterministic"]] database[new_id]["testAverageReturn"] = [float(i) for i in progress_dict["test_eval_avg_reward_deterministic"]] if "train_eval_success_rate" in progress_dict: database[new_id]["trainSuccessRate"] = [float(i) for i in progress_dict["train_eval_success_rate"]] if "test_eval_success_rate" in progress_dict: database[new_id]["testSuccessRate"] = [float(i) for i in progress_dict["test_eval_success_rate"]] dump_json(database_file, database) return new_id
Python
def _resolve_download_tag(self, _, node) -> str: ''' Download file from link after ``!download``, save it into cachedir and replace tag with absolute path to this file. ''' url = node.value file_ext = get_file_ext_from_url(url) url_hash = md5(url.encode()).hexdigest() save_to = os.path.join(self.downloadfile_cache_dir, url_hash + file_ext) return download_file(self.project_path, node.value, save_to=save_to)
def _resolve_download_tag(self, _, node) -> str: ''' Download file from link after ``!download``, save it into cachedir and replace tag with absolute path to this file. ''' url = node.value file_ext = get_file_ext_from_url(url) url_hash = md5(url.encode()).hexdigest() save_to = os.path.join(self.downloadfile_cache_dir, url_hash + file_ext) return download_file(self.project_path, node.value, save_to=save_to)
Python
def _get_package_root(self): ''' Get the path to the installation location (where libwarpx.so would be installed). ''' cur = os.path.abspath(__file__) while True: name = os.path.basename(cur) if name == 'pywarpx': return cur elif not name: return '' cur = os.path.dirname(cur)
def _get_package_root(self): ''' Get the path to the installation location (where libwarpx.so would be installed). ''' cur = os.path.abspath(__file__) while True: name = os.path.basename(cur) if name == 'pywarpx': return cur elif not name: return '' cur = os.path.dirname(cur)
Python
def _array1d_from_pointer(pointer, dtype, size): ''' Function for converting a ctypes pointer to a numpy array ''' if not pointer: raise Exception(f'_array1d_from_pointer: pointer is a nullptr') if sys.version_info.major >= 3: # from where do I import these? this might only work for CPython... #PyBuf_READ = 0x100 PyBUF_WRITE = 0x200 buffer_from_memory = ctypes.pythonapi.PyMemoryView_FromMemory buffer_from_memory.argtypes = (ctypes.c_void_p, ctypes.c_int, ctypes.c_int) buffer_from_memory.restype = ctypes.py_object buf = buffer_from_memory(pointer, dtype.itemsize*size, PyBUF_WRITE) else: buffer_from_memory = ctypes.pythonapi.PyBuffer_FromReadWriteMemory buffer_from_memory.restype = ctypes.py_object buf = buffer_from_memory(pointer, dtype.itemsize*size) return np.frombuffer(buf, dtype=dtype, count=size)
def _array1d_from_pointer(pointer, dtype, size): ''' Function for converting a ctypes pointer to a numpy array ''' if not pointer: raise Exception(f'_array1d_from_pointer: pointer is a nullptr') if sys.version_info.major >= 3: # from where do I import these? this might only work for CPython... #PyBuf_READ = 0x100 PyBUF_WRITE = 0x200 buffer_from_memory = ctypes.pythonapi.PyMemoryView_FromMemory buffer_from_memory.argtypes = (ctypes.c_void_p, ctypes.c_int, ctypes.c_int) buffer_from_memory.restype = ctypes.py_object buf = buffer_from_memory(pointer, dtype.itemsize*size, PyBUF_WRITE) else: buffer_from_memory = ctypes.pythonapi.PyBuffer_FromReadWriteMemory buffer_from_memory.restype = ctypes.py_object buf = buffer_from_memory(pointer, dtype.itemsize*size) return np.frombuffer(buf, dtype=dtype, count=size)
Python
def evolve(self, num_steps=-1): ''' Evolve the simulation for num_steps steps. If num_steps=-1, the simulation will be run until the end as specified in the inputs file. Parameters ---------- num_steps: int, the number of steps to take ''' self.libwarpx_so.warpx_evolve(num_steps);
def evolve(self, num_steps=-1): ''' Evolve the simulation for num_steps steps. If num_steps=-1, the simulation will be run until the end as specified in the inputs file. Parameters ---------- num_steps: int, the number of steps to take ''' self.libwarpx_so.warpx_evolve(num_steps);
Python
def add_particles(self, species_name, x=None, y=None, z=None, ux=None, uy=None, uz=None, w=None, unique_particles=True, **kwargs): ''' A function for adding particles to the WarpX simulation. Parameters ---------- species_name : the species to add the particle to x, y, z : arrays or scalars of the particle positions (default = 0.) ux, uy, uz : arrays or scalars of the particle momenta (default = 0.) w : array or scalar of particle weights (default = 0.) unique_particles : whether the particles are unique or duplicated on several processes. (default = True) kwargs : dictionary containing an entry for all the extra particle attribute arrays. If an attribute is not given it will be set to 0. ''' # --- Get length of arrays, set to one for scalars lenx = np.size(x) leny = np.size(y) lenz = np.size(z) lenux = np.size(ux) lenuy = np.size(uy) lenuz = np.size(uz) lenw = np.size(w) # --- Find the max length of the parameters supplied maxlen = 0 if x is not None: maxlen = max(maxlen, lenx) if y is not None: maxlen = max(maxlen, leny) if z is not None: maxlen = max(maxlen, lenz) if ux is not None: maxlen = max(maxlen, lenux) if uy is not None: maxlen = max(maxlen, lenuy) if uz is not None: maxlen = max(maxlen, lenuz) if w is not None: maxlen = max(maxlen, lenw) # --- Make sure that the lengths of the input parameters are consistent assert x is None or lenx==maxlen or lenx==1, "Length of x doesn't match len of others" assert y is None or leny==maxlen or leny==1, "Length of y doesn't match len of others" assert z is None or lenz==maxlen or lenz==1, "Length of z doesn't match len of others" assert ux is None or lenux==maxlen or lenux==1, "Length of ux doesn't match len of others" assert uy is None or lenuy==maxlen or lenuy==1, "Length of uy doesn't match len of others" assert uz is None or lenuz==maxlen or lenuz==1, "Length of uz doesn't match len of others" assert w is None or lenw==maxlen or lenw==1, "Length of w doesn't match len of others" for key, val in kwargs.items(): assert np.size(val)==1 or len(val)==maxlen, f"Length of {key} doesn't match len of others" # --- Broadcast scalars into appropriate length arrays # --- If the parameter was not supplied, use the default value if lenx == 1: x = np.full(maxlen, (x or 0.), float) if leny == 1: y = np.full(maxlen, (y or 0.), float) if lenz == 1: z = np.full(maxlen, (z or 0.), float) if lenux == 1: ux = np.full(maxlen, (ux or 0.), float) if lenuy == 1: uy = np.full(maxlen, (uy or 0.), float) if lenuz == 1: uz = np.full(maxlen, (uz or 0.), float) if lenw == 1: w = np.full(maxlen, (w or 0.), float) for key, val in kwargs.items(): if np.size(val) == 1: kwargs[key] = np.full(maxlen, val, float) # --- The -3 is because the comps include the velocites nattr = self.get_nattr_species(species_name) - 3 attr = np.zeros((maxlen, nattr)) attr[:,0] = w for key, vals in kwargs.items(): # --- The -3 is because components 1 to 3 are velocities attr[:,self.get_particle_comp_index(species_name, key)-3] = vals self.libwarpx_so.warpx_addNParticles( ctypes.c_char_p(species_name.encode('utf-8')), x.size, x, y, z, ux, uy, uz, nattr, attr, unique_particles )
def add_particles(self, species_name, x=None, y=None, z=None, ux=None, uy=None, uz=None, w=None, unique_particles=True, **kwargs): ''' A function for adding particles to the WarpX simulation. Parameters ---------- species_name : the species to add the particle to x, y, z : arrays or scalars of the particle positions (default = 0.) ux, uy, uz : arrays or scalars of the particle momenta (default = 0.) w : array or scalar of particle weights (default = 0.) unique_particles : whether the particles are unique or duplicated on several processes. (default = True) kwargs : dictionary containing an entry for all the extra particle attribute arrays. If an attribute is not given it will be set to 0. ''' # --- Get length of arrays, set to one for scalars lenx = np.size(x) leny = np.size(y) lenz = np.size(z) lenux = np.size(ux) lenuy = np.size(uy) lenuz = np.size(uz) lenw = np.size(w) # --- Find the max length of the parameters supplied maxlen = 0 if x is not None: maxlen = max(maxlen, lenx) if y is not None: maxlen = max(maxlen, leny) if z is not None: maxlen = max(maxlen, lenz) if ux is not None: maxlen = max(maxlen, lenux) if uy is not None: maxlen = max(maxlen, lenuy) if uz is not None: maxlen = max(maxlen, lenuz) if w is not None: maxlen = max(maxlen, lenw) # --- Make sure that the lengths of the input parameters are consistent assert x is None or lenx==maxlen or lenx==1, "Length of x doesn't match len of others" assert y is None or leny==maxlen or leny==1, "Length of y doesn't match len of others" assert z is None or lenz==maxlen or lenz==1, "Length of z doesn't match len of others" assert ux is None or lenux==maxlen or lenux==1, "Length of ux doesn't match len of others" assert uy is None or lenuy==maxlen or lenuy==1, "Length of uy doesn't match len of others" assert uz is None or lenuz==maxlen or lenuz==1, "Length of uz doesn't match len of others" assert w is None or lenw==maxlen or lenw==1, "Length of w doesn't match len of others" for key, val in kwargs.items(): assert np.size(val)==1 or len(val)==maxlen, f"Length of {key} doesn't match len of others" # --- Broadcast scalars into appropriate length arrays # --- If the parameter was not supplied, use the default value if lenx == 1: x = np.full(maxlen, (x or 0.), float) if leny == 1: y = np.full(maxlen, (y or 0.), float) if lenz == 1: z = np.full(maxlen, (z or 0.), float) if lenux == 1: ux = np.full(maxlen, (ux or 0.), float) if lenuy == 1: uy = np.full(maxlen, (uy or 0.), float) if lenuz == 1: uz = np.full(maxlen, (uz or 0.), float) if lenw == 1: w = np.full(maxlen, (w or 0.), float) for key, val in kwargs.items(): if np.size(val) == 1: kwargs[key] = np.full(maxlen, val, float) # --- The -3 is because the comps include the velocites nattr = self.get_nattr_species(species_name) - 3 attr = np.zeros((maxlen, nattr)) attr[:,0] = w for key, vals in kwargs.items(): # --- The -3 is because components 1 to 3 are velocities attr[:,self.get_particle_comp_index(species_name, key)-3] = vals self.libwarpx_so.warpx_addNParticles( ctypes.c_char_p(species_name.encode('utf-8')), x.size, x, y, z, ux, uy, uz, nattr, attr, unique_particles )
Python
def add_real_comp(self, species_name, pid_name, comm=True): ''' Add a real component to the particle data array. Parameters ---------- species_name : the species name for which the new component will be added pid_name : string that is used to identify the new component comm : should the component be communicated ''' self.libwarpx_so.warpx_addRealComp( ctypes.c_char_p(species_name.encode('utf-8')), ctypes.c_char_p(pid_name.encode('utf-8')), comm )
def add_real_comp(self, species_name, pid_name, comm=True): ''' Add a real component to the particle data array. Parameters ---------- species_name : the species name for which the new component will be added pid_name : string that is used to identify the new component comm : should the component be communicated ''' self.libwarpx_so.warpx_addRealComp( ctypes.c_char_p(species_name.encode('utf-8')), ctypes.c_char_p(pid_name.encode('utf-8')), comm )
Python
def clearParticleBoundaryBuffer(self): ''' Clear the buffer that holds the particles lost at the boundaries. ''' self.libwarpx_so.warpx_clearParticleBoundaryBuffer()
def clearParticleBoundaryBuffer(self): ''' Clear the buffer that holds the particles lost at the boundaries. ''' self.libwarpx_so.warpx_clearParticleBoundaryBuffer()
Python
def depositChargeDensity(self, species_name, level, clear_rho=True, sync_rho=True): ''' Deposit the specified species' charge density in rho_fp in order to access that data via pywarpx.fields.RhoFPWrapper() Parameters ---------- species_name : the species name that will be deposited. level : Which AMR level to retrieve scraped particle data from. clear_rho : If True, zero out rho_fp before deposition. sync_rho : If True, perform MPI exchange and properly set boundary cells for rho_fp. ''' if clear_rho: from . import fields fields.RhoFPWrapper(level, True)[...] = 0.0 self.libwarpx_so.warpx_depositChargeDensity( ctypes.c_char_p(species_name.encode('utf-8')), level ) if sync_rho: self.libwarpx_so.warpx_SyncRho()
def depositChargeDensity(self, species_name, level, clear_rho=True, sync_rho=True): ''' Deposit the specified species' charge density in rho_fp in order to access that data via pywarpx.fields.RhoFPWrapper() Parameters ---------- species_name : the species name that will be deposited. level : Which AMR level to retrieve scraped particle data from. clear_rho : If True, zero out rho_fp before deposition. sync_rho : If True, perform MPI exchange and properly set boundary cells for rho_fp. ''' if clear_rho: from . import fields fields.RhoFPWrapper(level, True)[...] = 0.0 self.libwarpx_so.warpx_depositChargeDensity( ctypes.c_char_p(species_name.encode('utf-8')), level ) if sync_rho: self.libwarpx_so.warpx_SyncRho()
Python
def _get_mesh_field_list(self, warpx_func, level, direction, include_ghosts): """ Generic routine to fetch the list of field data arrays. """ shapes = _LP_c_int() size = ctypes.c_int(0) ncomps = ctypes.c_int(0) ngrowvect = _LP_c_int() if direction is None: data = warpx_func(level, ctypes.byref(size), ctypes.byref(ncomps), ctypes.byref(ngrowvect), ctypes.byref(shapes)) else: data = warpx_func(level, direction, ctypes.byref(size), ctypes.byref(ncomps), ctypes.byref(ngrowvect), ctypes.byref(shapes)) if not data: raise Exception('object was not initialized') ngvect = [ngrowvect[i] for i in range(self.dim)] grid_data = [] shapesize = self.dim if ncomps.value > 1: shapesize += 1 for i in range(size.value): shape = tuple([shapes[shapesize*i + d] for d in range(shapesize)]) # --- The data is stored in Fortran order, hence shape is reversed and a transpose is taken. if shape[::-1] == 0: continue if not data[i]: raise Exception(f'get_particle_arrays: data[i] for i={i} was not initialized') arr = np.ctypeslib.as_array(data[i], shape[::-1]).T try: # This fails on some versions of numpy arr.setflags(write=1) except ValueError: pass if include_ghosts: grid_data.append(arr) else: grid_data.append(arr[tuple([slice(ngvect[d], -ngvect[d]) for d in range(self.dim)])]) _libc.free(shapes) _libc.free(data) return grid_data
def _get_mesh_field_list(self, warpx_func, level, direction, include_ghosts): """ Generic routine to fetch the list of field data arrays. """ shapes = _LP_c_int() size = ctypes.c_int(0) ncomps = ctypes.c_int(0) ngrowvect = _LP_c_int() if direction is None: data = warpx_func(level, ctypes.byref(size), ctypes.byref(ncomps), ctypes.byref(ngrowvect), ctypes.byref(shapes)) else: data = warpx_func(level, direction, ctypes.byref(size), ctypes.byref(ncomps), ctypes.byref(ngrowvect), ctypes.byref(shapes)) if not data: raise Exception('object was not initialized') ngvect = [ngrowvect[i] for i in range(self.dim)] grid_data = [] shapesize = self.dim if ncomps.value > 1: shapesize += 1 for i in range(size.value): shape = tuple([shapes[shapesize*i + d] for d in range(shapesize)]) # --- The data is stored in Fortran order, hence shape is reversed and a transpose is taken. if shape[::-1] == 0: continue if not data[i]: raise Exception(f'get_particle_arrays: data[i] for i={i} was not initialized') arr = np.ctypeslib.as_array(data[i], shape[::-1]).T try: # This fails on some versions of numpy arr.setflags(write=1) except ValueError: pass if include_ghosts: grid_data.append(arr) else: grid_data.append(arr[tuple([slice(ngvect[d], -ngvect[d]) for d in range(self.dim)])]) _libc.free(shapes) _libc.free(data) return grid_data
Python
def clearlist(self): """Unregister/clear out all registered C callbacks""" self.funcs = [] libwarpx.libwarpx_so.warpx_clear_callback_py( ctypes.c_char_p(self.name.encode('utf-8')) )
def clearlist(self): """Unregister/clear out all registered C callbacks""" self.funcs = [] libwarpx.libwarpx_so.warpx_clear_callback_py( ctypes.c_char_p(self.name.encode('utf-8')) )
Python
def installfuncinlist(self,f): """Check if the specified function is installed""" if len(self.funcs) == 0: # If this is the first function installed, set the callback in the C++ # to call this class instance. # Note that the _c_func must be saved. _CALLBACK_FUNC_0 = ctypes.CFUNCTYPE(None) self._c_func = _CALLBACK_FUNC_0(self) libwarpx.libwarpx_so.warpx_set_callback_py( ctypes.c_char_p(self.name.encode('utf-8')), self._c_func ) if isinstance(f,types.MethodType): # --- If the function is a method of a class instance, then save a full # --- reference to that instance and the method name. finstance = f.__self__ fname = f.__name__ self.funcs.append([finstance,fname]) elif callable(f): # --- If a function had already been installed by name, then skip the install. # --- This is problematic, since no warning message is given, but it is unlikely # --- to arise under normal circumstances. # --- The purpose of this check is to avoid redundant installation of functions # --- during a restore from a dump file. Without the check, functions that had been # --- installed via a decorator would be installed an extra time since the source # --- of the function contains the decoration (which is activated when the source # --- is exec'd). if f.__name__ not in self.funcs: self.funcs.append(f) else: self.funcs.append(f)
def installfuncinlist(self,f): """Check if the specified function is installed""" if len(self.funcs) == 0: # If this is the first function installed, set the callback in the C++ # to call this class instance. # Note that the _c_func must be saved. _CALLBACK_FUNC_0 = ctypes.CFUNCTYPE(None) self._c_func = _CALLBACK_FUNC_0(self) libwarpx.libwarpx_so.warpx_set_callback_py( ctypes.c_char_p(self.name.encode('utf-8')), self._c_func ) if isinstance(f,types.MethodType): # --- If the function is a method of a class instance, then save a full # --- reference to that instance and the method name. finstance = f.__self__ fname = f.__name__ self.funcs.append([finstance,fname]) elif callable(f): # --- If a function had already been installed by name, then skip the install. # --- This is problematic, since no warning message is given, but it is unlikely # --- to arise under normal circumstances. # --- The purpose of this check is to avoid redundant installation of functions # --- during a restore from a dump file. Without the check, functions that had been # --- installed via a decorator would be installed an extra time since the source # --- of the function contains the decoration (which is activated when the source # --- is exec'd). if f.__name__ not in self.funcs: self.funcs.append(f) else: self.funcs.append(f)
Python
def isinstalledfuncinlist(self,f): """Checks if the specified function is installed""" # --- An element by element search is needed funclistcopy = copy.copy(self.funcs) for func in funclistcopy: if f == func: return 1 elif isinstance(func,list) and isinstance(f,types.MethodType): object = self._getmethodobject(func) if f.__self__ is object and f.__name__ == func[1]: return 1 elif isinstance(func,str): if f.__name__ == func: return 1 return 0
def isinstalledfuncinlist(self,f): """Checks if the specified function is installed""" # --- An element by element search is needed funclistcopy = copy.copy(self.funcs) for func in funclistcopy: if f == func: return 1 elif isinstance(func,list) and isinstance(f,types.MethodType): object = self._getmethodobject(func) if f.__self__ is object and f.__name__ == func[1]: return 1 elif isinstance(func,str): if f.__name__ == func: return 1 return 0
Python
def printcallbacktimers(tmin=1.,lminmax=False,ff=None): """Prints timings of installed functions. - tmin=1.: only functions with time greater than tmin will be printed - lminmax=False: If True, prints the min and max times over all processors - ff=None: If given, timings will be written to the file object instead of stdout """ if ff is None: ff = sys.stdout for c in [_afterinit,_beforeEsolve,_poissonsolver,_afterEsolve, _beforedeposition,_afterdeposition, _particlescraper, _particleloader, _beforestep,_afterstep, _afterrestart, _particleinjection, _appliedfields]: for fname, time in c.timers.items(): #vlist = numpy.array(gather(time)) vlist = numpy.array([time]) #if me > 0: continue vsum = numpy.sum(vlist) if vsum <= tmin: continue vrms = numpy.sqrt(max(0.,numpy.sum(vlist**2)/len(vlist) - (numpy.sum(vlist)/len(vlist))**2)) npes = 1. # Only works for one processor ff.write('%20s %s %10.4f %10.4f %10.4f'%(c.name,fname,vsum,vsum/npes,vrms)) if lminmax: vmin = numpy.min(vlist) vmax = numpy.max(vlist) ff.write(' %10.4f %10.4f'%(vmin,vmax)) it = libwarpx.libwarpx_so.warpx_getistep(0) if it > 0: ff.write(' %10.4f'%(vsum/npes/(it))) ff.write('\n')
def printcallbacktimers(tmin=1.,lminmax=False,ff=None): """Prints timings of installed functions. - tmin=1.: only functions with time greater than tmin will be printed - lminmax=False: If True, prints the min and max times over all processors - ff=None: If given, timings will be written to the file object instead of stdout """ if ff is None: ff = sys.stdout for c in [_afterinit,_beforeEsolve,_poissonsolver,_afterEsolve, _beforedeposition,_afterdeposition, _particlescraper, _particleloader, _beforestep,_afterstep, _afterrestart, _particleinjection, _appliedfields]: for fname, time in c.timers.items(): #vlist = numpy.array(gather(time)) vlist = numpy.array([time]) #if me > 0: continue vsum = numpy.sum(vlist) if vsum <= tmin: continue vrms = numpy.sqrt(max(0.,numpy.sum(vlist**2)/len(vlist) - (numpy.sum(vlist)/len(vlist))**2)) npes = 1. # Only works for one processor ff.write('%20s %s %10.4f %10.4f %10.4f'%(c.name,fname,vsum,vsum/npes,vrms)) if lminmax: vmin = numpy.min(vlist) vmax = numpy.max(vlist) ff.write(' %10.4f %10.4f'%(vmin,vmax)) it = libwarpx.libwarpx_so.warpx_getistep(0) if it > 0: ff.write(' %10.4f'%(vsum/npes/(it))) ff.write('\n')
Python
def installpoissonsolver(f): """Installs an external function to solve Poisson's equation""" if _poissonsolver.hasfuncsinstalled(): raise RuntimeError("Only one external Poisson solver can be installed.") _poissonsolver.installfuncinlist(f)
def installpoissonsolver(f): """Installs an external function to solve Poisson's equation""" if _poissonsolver.hasfuncsinstalled(): raise RuntimeError("Only one external Poisson solver can be installed.") _poissonsolver.installfuncinlist(f)
Python
def preprocess_edges(self, nodes, node_neighbours, edges): """ Edge preprocessing step, to be implemented in all `EdgeMPNN` subclasses. Args: nodes (torch.Tensor) : Batch of size {total number of nodes in batch, number of node features}. node_neighbours (torch.Tensor) : Batch of size {total number of nodes in batch, max node degree, number of node features}. edges (torch.Tensor) : Batch of size {total number of nodes in batch, max node degree, number of edge features}. """ raise NotImplementedError
def preprocess_edges(self, nodes, node_neighbours, edges): """ Edge preprocessing step, to be implemented in all `EdgeMPNN` subclasses. Args: nodes (torch.Tensor) : Batch of size {total number of nodes in batch, number of node features}. node_neighbours (torch.Tensor) : Batch of size {total number of nodes in batch, max node degree, number of node features}. edges (torch.Tensor) : Batch of size {total number of nodes in batch, max node degree, number of edge features}. """ raise NotImplementedError
Python
def propagate_edges(self, edges, ingoing_edge_memories, ingoing_edges_mask): """ Edge propagation rule, to be implemented in all `EdgeMPNN` subclasses. Args: edges (torch.Tensor) : Batch of size {N, number of nodes, number of nodes, total number of edge features}, where N is the total number of subgraphs in the batch. ingoing_edge_memories (torch.Tensor) : Batch of size {total number of edges in batch, total number of edge features}. ingoing_edges_mask (torch.Tensor) : Batch of size {total number of edges in batch, max node degree, total number of edge features}. """ raise NotImplementedError
def propagate_edges(self, edges, ingoing_edge_memories, ingoing_edges_mask): """ Edge propagation rule, to be implemented in all `EdgeMPNN` subclasses. Args: edges (torch.Tensor) : Batch of size {N, number of nodes, number of nodes, total number of edge features}, where N is the total number of subgraphs in the batch. ingoing_edge_memories (torch.Tensor) : Batch of size {total number of edges in batch, total number of edge features}. ingoing_edges_mask (torch.Tensor) : Batch of size {total number of edges in batch, max node degree, total number of edge features}. """ raise NotImplementedError
Python
def readout(self, hidden_nodes, input_nodes, node_mask): """ Local readout function, to be implemented in all `EdgeMPNN` subclasses. Args: hidden_nodes (torch.Tensor) : Batch of size {total number of nodes in batch, number of node features}. input_nodes (torch.Tensor) : Batch of size {total number of nodes in batch, number of node features}. node_mask (torch.Tensor) : Batch of size {total number of nodes in batch, number of node features}, where elements are 1 if corresponding element exists and 0 otherwise. """ raise NotImplementedError
def readout(self, hidden_nodes, input_nodes, node_mask): """ Local readout function, to be implemented in all `EdgeMPNN` subclasses. Args: hidden_nodes (torch.Tensor) : Batch of size {total number of nodes in batch, number of node features}. input_nodes (torch.Tensor) : Batch of size {total number of nodes in batch, number of node features}. node_mask (torch.Tensor) : Batch of size {total number of nodes in batch, number of node features}, where elements are 1 if corresponding element exists and 0 otherwise. """ raise NotImplementedError
Python
def initialize_model(): """ Initializes the model to be trained. Possible models: "MNN", "S2V", "AttS2V", "GGNN", "AttGGNN", or "EMN". Returns: model (modules.SummationMPNN or modules.AggregationMPNN or modules.EdgeMPNN) : Neural net model. """ try: hidden_node_features = C.hidden_node_features except AttributeError: # raised for EMN model only hidden_node_features = None edge_emb_hidden_dim = C.edge_emb_hidden_dim if C.model == "MNN": net = gnn.mpnn.MNN( f_add_elems=C.dim_f_add_p1, edge_features=C.dim_edges[2], mlp1_depth=C.mlp1_depth, mlp1_dropout_p=C.mlp1_dropout_p, mlp1_hidden_dim=C.mlp1_hidden_dim, mlp2_depth=C.mlp2_depth, mlp2_dropout_p=C.mlp2_dropout_p, mlp2_hidden_dim=C.mlp2_hidden_dim, hidden_node_features=hidden_node_features, initialization=C.weights_initialization, message_passes=C.message_passes, message_size=C.message_size, n_nodes_largest_graph=C.max_n_nodes, node_features=C.dim_nodes[1], ) elif C.model == "S2V": net = gnn.mpnn.S2V( f_add_elems=C.dim_f_add_p1, edge_features=C.dim_edges[2], enn_depth=C.enn_depth, enn_dropout_p=C.enn_dropout_p, enn_hidden_dim=C.enn_hidden_dim, mlp1_depth=C.mlp1_depth, mlp1_dropout_p=C.mlp1_dropout_p, mlp1_hidden_dim=C.mlp1_hidden_dim, mlp2_dropout_p=C.mlp2_dropout_p, mlp2_depth=C.mlp2_depth, mlp2_hidden_dim=C.mlp2_hidden_dim, hidden_node_features=hidden_node_features, initialization=C.weights_initialization, message_passes=C.message_passes, message_size=C.message_size, n_nodes_largest_graph=C.max_n_nodes, node_features=C.dim_nodes[1], s2v_lstm_computations=C.s2v_lstm_computations, s2v_memory_size=C.s2v_memory_size, ) elif C.model == "AttS2V": net = gnn.mpnn.AttentionS2V( f_add_elems=C.dim_f_add_p1, att_depth=C.att_depth, att_dropout_p=C.att_dropout_p, att_hidden_dim=C.att_hidden_dim, edge_features=C.dim_edges[2], enn_depth=C.enn_depth, enn_dropout_p=C.enn_dropout_p, enn_hidden_dim=C.enn_hidden_dim, mlp1_depth=C.mlp1_depth, mlp1_dropout_p=C.mlp1_dropout_p, mlp1_hidden_dim=C.mlp1_hidden_dim, mlp2_depth=C.mlp2_depth, mlp2_dropout_p=C.mlp2_dropout_p, mlp2_hidden_dim=C.mlp2_hidden_dim, hidden_node_features=hidden_node_features, initialization=C.weights_initialization, message_passes=C.message_passes, message_size=C.message_size, n_nodes_largest_graph=C.max_n_nodes, node_features=C.dim_nodes[1], s2v_lstm_computations=C.s2v_lstm_computations, s2v_memory_size=C.s2v_memory_size, ) elif C.model == "GGNN": net = gnn.mpnn.GGNN( f_add_elems=C.dim_f_add_p1, edge_features=C.dim_edges[2], enn_depth=C.enn_depth, enn_dropout_p=C.enn_dropout_p, enn_hidden_dim=C.enn_hidden_dim, mlp1_depth=C.mlp1_depth, mlp1_dropout_p=C.mlp1_dropout_p, mlp1_hidden_dim=C.mlp1_hidden_dim, mlp2_depth=C.mlp2_depth, mlp2_dropout_p=C.mlp2_dropout_p, mlp2_hidden_dim=C.mlp2_hidden_dim, gather_att_depth=C.gather_att_depth, gather_att_dropout_p=C.gather_att_dropout_p, gather_att_hidden_dim=C.gather_att_hidden_dim, gather_width=C.gather_width, gather_emb_depth=C.gather_emb_depth, gather_emb_dropout_p=C.gather_emb_dropout_p, gather_emb_hidden_dim=C.gather_emb_hidden_dim, hidden_node_features=hidden_node_features, initialization=C.weights_initialization, message_passes=C.message_passes, message_size=C.message_size, n_nodes_largest_graph=C.max_n_nodes, node_features=C.dim_nodes[1], ) elif C.model == "AttGGNN": net = gnn.mpnn.AttentionGGNN( f_add_elems=C.dim_f_add_p1, att_depth=C.att_depth, att_dropout_p=C.att_dropout_p, att_hidden_dim=C.att_hidden_dim, edge_features=C.dim_edges[2], mlp1_depth=C.mlp1_depth, mlp1_dropout_p=C.mlp1_dropout_p, mlp1_hidden_dim=C.mlp1_hidden_dim, mlp2_depth=C.mlp2_depth, mlp2_dropout_p=C.mlp2_dropout_p, mlp2_hidden_dim=C.mlp2_hidden_dim, gather_att_depth=C.gather_att_depth, gather_att_dropout_p=C.gather_att_dropout_p, gather_att_hidden_dim=C.gather_att_hidden_dim, gather_emb_depth=C.gather_emb_depth, gather_emb_dropout_p=C.gather_emb_dropout_p, gather_emb_hidden_dim=C.gather_emb_hidden_dim, gather_width=C.gather_width, hidden_node_features=hidden_node_features, initialization=C.weights_initialization, message_passes=C.message_passes, message_size=C.message_size, msg_depth=C.msg_depth, msg_dropout_p=C.msg_dropout_p, msg_hidden_dim=C.msg_hidden_dim, n_nodes_largest_graph=C.max_n_nodes, node_features=C.dim_nodes[1], ) elif C.model == "EMN": net = gnn.mpnn.EMN( f_add_elems=C.dim_f_add_p1, att_depth=C.att_depth, att_dropout_p=C.att_dropout_p, att_hidden_dim=C.att_hidden_dim, edge_emb_depth=C.edge_emb_depth, edge_emb_dropout_p=C.edge_emb_dropout_p, edge_emb_hidden_dim=edge_emb_hidden_dim, edge_emb_size=C.edge_emb_size, edge_features=C.dim_edges[2], mlp1_depth=C.mlp1_depth, mlp1_dropout_p=C.mlp1_dropout_p, mlp1_hidden_dim=C.mlp1_hidden_dim, mlp2_depth=C.mlp2_depth, mlp2_dropout_p=C.mlp2_dropout_p, mlp2_hidden_dim=C.mlp2_hidden_dim, gather_att_depth=C.gather_att_depth, gather_att_dropout_p=C.gather_att_dropout_p, gather_att_hidden_dim=C.gather_att_hidden_dim, gather_emb_depth=C.gather_emb_depth, gather_emb_dropout_p=C.gather_emb_dropout_p, gather_emb_hidden_dim=C.gather_emb_hidden_dim, gather_width=C.gather_width, initialization=C.weights_initialization, message_passes=C.message_passes, msg_depth=C.msg_depth, msg_dropout_p=C.msg_dropout_p, msg_hidden_dim=C.msg_hidden_dim, n_nodes_largest_graph=C.max_n_nodes, node_features=C.dim_nodes[1], ) elif C.model == "RNN": raise NotImplementedError else: raise NotImplementedError("Model is not defined.") net = net.to("cuda", non_blocking=True) return net
def initialize_model(): """ Initializes the model to be trained. Possible models: "MNN", "S2V", "AttS2V", "GGNN", "AttGGNN", or "EMN". Returns: model (modules.SummationMPNN or modules.AggregationMPNN or modules.EdgeMPNN) : Neural net model. """ try: hidden_node_features = C.hidden_node_features except AttributeError: # raised for EMN model only hidden_node_features = None edge_emb_hidden_dim = C.edge_emb_hidden_dim if C.model == "MNN": net = gnn.mpnn.MNN( f_add_elems=C.dim_f_add_p1, edge_features=C.dim_edges[2], mlp1_depth=C.mlp1_depth, mlp1_dropout_p=C.mlp1_dropout_p, mlp1_hidden_dim=C.mlp1_hidden_dim, mlp2_depth=C.mlp2_depth, mlp2_dropout_p=C.mlp2_dropout_p, mlp2_hidden_dim=C.mlp2_hidden_dim, hidden_node_features=hidden_node_features, initialization=C.weights_initialization, message_passes=C.message_passes, message_size=C.message_size, n_nodes_largest_graph=C.max_n_nodes, node_features=C.dim_nodes[1], ) elif C.model == "S2V": net = gnn.mpnn.S2V( f_add_elems=C.dim_f_add_p1, edge_features=C.dim_edges[2], enn_depth=C.enn_depth, enn_dropout_p=C.enn_dropout_p, enn_hidden_dim=C.enn_hidden_dim, mlp1_depth=C.mlp1_depth, mlp1_dropout_p=C.mlp1_dropout_p, mlp1_hidden_dim=C.mlp1_hidden_dim, mlp2_dropout_p=C.mlp2_dropout_p, mlp2_depth=C.mlp2_depth, mlp2_hidden_dim=C.mlp2_hidden_dim, hidden_node_features=hidden_node_features, initialization=C.weights_initialization, message_passes=C.message_passes, message_size=C.message_size, n_nodes_largest_graph=C.max_n_nodes, node_features=C.dim_nodes[1], s2v_lstm_computations=C.s2v_lstm_computations, s2v_memory_size=C.s2v_memory_size, ) elif C.model == "AttS2V": net = gnn.mpnn.AttentionS2V( f_add_elems=C.dim_f_add_p1, att_depth=C.att_depth, att_dropout_p=C.att_dropout_p, att_hidden_dim=C.att_hidden_dim, edge_features=C.dim_edges[2], enn_depth=C.enn_depth, enn_dropout_p=C.enn_dropout_p, enn_hidden_dim=C.enn_hidden_dim, mlp1_depth=C.mlp1_depth, mlp1_dropout_p=C.mlp1_dropout_p, mlp1_hidden_dim=C.mlp1_hidden_dim, mlp2_depth=C.mlp2_depth, mlp2_dropout_p=C.mlp2_dropout_p, mlp2_hidden_dim=C.mlp2_hidden_dim, hidden_node_features=hidden_node_features, initialization=C.weights_initialization, message_passes=C.message_passes, message_size=C.message_size, n_nodes_largest_graph=C.max_n_nodes, node_features=C.dim_nodes[1], s2v_lstm_computations=C.s2v_lstm_computations, s2v_memory_size=C.s2v_memory_size, ) elif C.model == "GGNN": net = gnn.mpnn.GGNN( f_add_elems=C.dim_f_add_p1, edge_features=C.dim_edges[2], enn_depth=C.enn_depth, enn_dropout_p=C.enn_dropout_p, enn_hidden_dim=C.enn_hidden_dim, mlp1_depth=C.mlp1_depth, mlp1_dropout_p=C.mlp1_dropout_p, mlp1_hidden_dim=C.mlp1_hidden_dim, mlp2_depth=C.mlp2_depth, mlp2_dropout_p=C.mlp2_dropout_p, mlp2_hidden_dim=C.mlp2_hidden_dim, gather_att_depth=C.gather_att_depth, gather_att_dropout_p=C.gather_att_dropout_p, gather_att_hidden_dim=C.gather_att_hidden_dim, gather_width=C.gather_width, gather_emb_depth=C.gather_emb_depth, gather_emb_dropout_p=C.gather_emb_dropout_p, gather_emb_hidden_dim=C.gather_emb_hidden_dim, hidden_node_features=hidden_node_features, initialization=C.weights_initialization, message_passes=C.message_passes, message_size=C.message_size, n_nodes_largest_graph=C.max_n_nodes, node_features=C.dim_nodes[1], ) elif C.model == "AttGGNN": net = gnn.mpnn.AttentionGGNN( f_add_elems=C.dim_f_add_p1, att_depth=C.att_depth, att_dropout_p=C.att_dropout_p, att_hidden_dim=C.att_hidden_dim, edge_features=C.dim_edges[2], mlp1_depth=C.mlp1_depth, mlp1_dropout_p=C.mlp1_dropout_p, mlp1_hidden_dim=C.mlp1_hidden_dim, mlp2_depth=C.mlp2_depth, mlp2_dropout_p=C.mlp2_dropout_p, mlp2_hidden_dim=C.mlp2_hidden_dim, gather_att_depth=C.gather_att_depth, gather_att_dropout_p=C.gather_att_dropout_p, gather_att_hidden_dim=C.gather_att_hidden_dim, gather_emb_depth=C.gather_emb_depth, gather_emb_dropout_p=C.gather_emb_dropout_p, gather_emb_hidden_dim=C.gather_emb_hidden_dim, gather_width=C.gather_width, hidden_node_features=hidden_node_features, initialization=C.weights_initialization, message_passes=C.message_passes, message_size=C.message_size, msg_depth=C.msg_depth, msg_dropout_p=C.msg_dropout_p, msg_hidden_dim=C.msg_hidden_dim, n_nodes_largest_graph=C.max_n_nodes, node_features=C.dim_nodes[1], ) elif C.model == "EMN": net = gnn.mpnn.EMN( f_add_elems=C.dim_f_add_p1, att_depth=C.att_depth, att_dropout_p=C.att_dropout_p, att_hidden_dim=C.att_hidden_dim, edge_emb_depth=C.edge_emb_depth, edge_emb_dropout_p=C.edge_emb_dropout_p, edge_emb_hidden_dim=edge_emb_hidden_dim, edge_emb_size=C.edge_emb_size, edge_features=C.dim_edges[2], mlp1_depth=C.mlp1_depth, mlp1_dropout_p=C.mlp1_dropout_p, mlp1_hidden_dim=C.mlp1_hidden_dim, mlp2_depth=C.mlp2_depth, mlp2_dropout_p=C.mlp2_dropout_p, mlp2_hidden_dim=C.mlp2_hidden_dim, gather_att_depth=C.gather_att_depth, gather_att_dropout_p=C.gather_att_dropout_p, gather_att_hidden_dim=C.gather_att_hidden_dim, gather_emb_depth=C.gather_emb_depth, gather_emb_dropout_p=C.gather_emb_dropout_p, gather_emb_hidden_dim=C.gather_emb_hidden_dim, gather_width=C.gather_width, initialization=C.weights_initialization, message_passes=C.message_passes, msg_depth=C.msg_depth, msg_dropout_p=C.msg_dropout_p, msg_hidden_dim=C.msg_hidden_dim, n_nodes_largest_graph=C.max_n_nodes, node_features=C.dim_nodes[1], ) elif C.model == "RNN": raise NotImplementedError else: raise NotImplementedError("Model is not defined.") net = net.to("cuda", non_blocking=True) return net
Python
def graph_generation_loss(output, target_output): """ Calculated the loss using the KL divergence. Args: output (torch.Tensor) : Predicted APD tensor. target_output (torch.Tensor) : Target APD tensor. Returns: loss (float) : Average loss for this output. """ # define activation function; note that one must use the softmax in the # KLDiv, never the sigmoid, as the distribution must sum to 1 LogSoftmax = torch.nn.LogSoftmax(dim=1) output = LogSoftmax(output) # normalize the target output (as can contain information on > 1 graph) target_output = target_output/torch.sum(target_output, dim=1, keepdim=True) # define loss function and calculate the los criterion = torch.nn.KLDivLoss(reduction="batchmean") loss = criterion(target=target_output, input=output) return loss
def graph_generation_loss(output, target_output): """ Calculated the loss using the KL divergence. Args: output (torch.Tensor) : Predicted APD tensor. target_output (torch.Tensor) : Target APD tensor. Returns: loss (float) : Average loss for this output. """ # define activation function; note that one must use the softmax in the # KLDiv, never the sigmoid, as the distribution must sum to 1 LogSoftmax = torch.nn.LogSoftmax(dim=1) output = LogSoftmax(output) # normalize the target output (as can contain information on > 1 graph) target_output = target_output/torch.sum(target_output, dim=1, keepdim=True) # define loss function and calculate the los criterion = torch.nn.KLDivLoss(reduction="batchmean") loss = criterion(target=target_output, input=output) return loss
Python
def message_terms(self, nodes, node_neighbours, edges): """ Message passing function, to be implemented in all `SummationMPNN` subclasses. Args: nodes (torch.Tensor) : Batch of size {total number of nodes in batch, number of node features}. node_neighbours (torch.Tensor) : Batch of size {total number of nodes in batch, max node degree, number of node features}. edges (torch.Tensor) : Batch of size {total number of nodes in batch, max node degree, number of edge features}. """ raise NotImplementedError
def message_terms(self, nodes, node_neighbours, edges): """ Message passing function, to be implemented in all `SummationMPNN` subclasses. Args: nodes (torch.Tensor) : Batch of size {total number of nodes in batch, number of node features}. node_neighbours (torch.Tensor) : Batch of size {total number of nodes in batch, max node degree, number of node features}. edges (torch.Tensor) : Batch of size {total number of nodes in batch, max node degree, number of edge features}. """ raise NotImplementedError
Python
def update(self, nodes, messages): """ Message update function, to be implemented in all `SummationMPNN` subclasses. Args: nodes (torch.Tensor) : Batch of size {total number of nodes in batch, number of node features}. messages (torch.Tensor) : Batch of size {total number of nodes in batch, number of node features}. """ raise NotImplementedError
def update(self, nodes, messages): """ Message update function, to be implemented in all `SummationMPNN` subclasses. Args: nodes (torch.Tensor) : Batch of size {total number of nodes in batch, number of node features}. messages (torch.Tensor) : Batch of size {total number of nodes in batch, number of node features}. """ raise NotImplementedError
Python
def main(): """ Defines the type of job (preprocessing, training, generation, or testing), runs it, and writes the job parameters used. """ # fix date/time _ = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") workflow = Workflow(constants=C) job_type = C.job_type print(f"* Run mode: '{job_type}'", flush=True) if job_type == "preprocess": # write preprocessing parameters util.write_preprocessing_parameters(params=C) # preprocess all datasets workflow.preprocess_phase() elif job_type == "train": # write training parameters util.write_job_parameters(params=C) # train model and generate graphs workflow.training_phase() elif job_type == "generate": # write generation parameters util.write_job_parameters(params=C) # generate molecules only workflow.generation_phase() elif job_type == "benchmark": # TODO not integrated with MOSES, at the moment benchmarking is done by # generating N structures, copying the generated SMILES to the MOSES # dir, and running the benchmarking job according to MOSES instructions raise NotImplementedError elif job_type == "test": # write testing parameters util.write_job_parameters(params=C) # evaluate best model using the test set data workflow.testing_phase() else: return NotImplementedError("Not a valid `job_type`.")
def main(): """ Defines the type of job (preprocessing, training, generation, or testing), runs it, and writes the job parameters used. """ # fix date/time _ = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") workflow = Workflow(constants=C) job_type = C.job_type print(f"* Run mode: '{job_type}'", flush=True) if job_type == "preprocess": # write preprocessing parameters util.write_preprocessing_parameters(params=C) # preprocess all datasets workflow.preprocess_phase() elif job_type == "train": # write training parameters util.write_job_parameters(params=C) # train model and generate graphs workflow.training_phase() elif job_type == "generate": # write generation parameters util.write_job_parameters(params=C) # generate molecules only workflow.generation_phase() elif job_type == "benchmark": # TODO not integrated with MOSES, at the moment benchmarking is done by # generating N structures, copying the generated SMILES to the MOSES # dir, and running the benchmarking job according to MOSES instructions raise NotImplementedError elif job_type == "test": # write testing parameters util.write_job_parameters(params=C) # evaluate best model using the test set data workflow.testing_phase() else: return NotImplementedError("Not a valid `job_type`.")
Python
def aggregate_message(self, nodes, node_neighbours, edges, mask): """ Message aggregation function, to be implemented in all `AggregationMPNN` subclasses. Args: nodes (torch.Tensor) : Batch of size {total number of nodes in batch, number of node features}. node_neighbours (torch.Tensor) : Batch of size {total number of nodes in batch, max node degree, number of node features}. edges (torch.Tensor) : Batch of size {total number of nodes in batch, max node degree, number of edge features}. mask (torch.Tensor) : Batch of size {total number of nodes in batch, max node degree}, where elements are 1 if corresponding neighbour exists and 0 otherwise. """ raise NotImplementedError
def aggregate_message(self, nodes, node_neighbours, edges, mask): """ Message aggregation function, to be implemented in all `AggregationMPNN` subclasses. Args: nodes (torch.Tensor) : Batch of size {total number of nodes in batch, number of node features}. node_neighbours (torch.Tensor) : Batch of size {total number of nodes in batch, max node degree, number of node features}. edges (torch.Tensor) : Batch of size {total number of nodes in batch, max node degree, number of edge features}. mask (torch.Tensor) : Batch of size {total number of nodes in batch, max node degree}, where elements are 1 if corresponding neighbour exists and 0 otherwise. """ raise NotImplementedError
Python
def submit(): """ Creates and submits submission script. Uses global variables defined at top of this file. """ check_paths() # create an output directory data_path_minus_data = data_path[:-5] dataset_output_path = f"{data_path_minus_data}output_{dataset}" tensorboard_path = os.path.join(dataset_output_path, "tensorboard") if jobname != "": dataset_output_path = os.path.join(dataset_output_path, jobname) tensorboard_path = os.path.join(tensorboard_path, jobname) os.makedirs(dataset_output_path, exist_ok=True) os.makedirs(tensorboard_path, exist_ok=True) print(f"* Creating dataset directory {dataset_output_path}/", flush=True) # submit `n_jobs` separate jobs jobdir_end_idx = jobdir_start_idx + n_jobs for job_idx in range(jobdir_start_idx, jobdir_end_idx): # specify and create the job subdirectory if it does not exist params["job_dir"] = f"{dataset_output_path}/job_{job_idx}/" params["tensorboard_dir"] = f"{tensorboard_path}/job_{job_idx}/" # create the directory if it does not exist already, otherwise raises an error, # which is good because *might* not want to override data our existing directories! os.makedirs(params["tensorboard_dir"], exist_ok=True) try: os.makedirs(params["job_dir"], exist_ok=bool(job_type in ["generate", "test"] or force_overwrite)) print( f"* Creating model subdirectory {dataset_output_path}/job_{job_idx}/", flush=True, ) except FileExistsError: print( f"-- Model subdirectory {dataset_output_path}/job_{job_idx}/ already exists.", flush=True, ) if not restart: continue # write the `input.csv` file write_input_csv(params_dict=params, filename="input.csv") # write `submit.sh` and submit if use_slurm: print("* Writing submission script.", flush=True) write_submission_script(job_dir=params["job_dir"], job_idx=job_idx, job_type=params["job_type"], max_n_nodes=params["max_n_nodes"], runtime=run_time, mem=mem_GB, ptn=partition, cpu_per_task=cpus_per_task, python_bin_path=python_path) print("-- Submitting job to SLURM.", flush=True) subprocess.run(["sbatch", params["job_dir"] + "submit.sh"]) else: print("* Running job as a normal process.", flush=True) subprocess.run(["ls", f"{python_path}"]) subprocess.run([f"{python_path}", f"{graphinvent_path}main.py", "--job-dir", params["job_dir"]]) # sleep a few secs before submitting next job print(f"-- Sleeping 2 seconds.") time.sleep(2)
def submit(): """ Creates and submits submission script. Uses global variables defined at top of this file. """ check_paths() # create an output directory data_path_minus_data = data_path[:-5] dataset_output_path = f"{data_path_minus_data}output_{dataset}" tensorboard_path = os.path.join(dataset_output_path, "tensorboard") if jobname != "": dataset_output_path = os.path.join(dataset_output_path, jobname) tensorboard_path = os.path.join(tensorboard_path, jobname) os.makedirs(dataset_output_path, exist_ok=True) os.makedirs(tensorboard_path, exist_ok=True) print(f"* Creating dataset directory {dataset_output_path}/", flush=True) # submit `n_jobs` separate jobs jobdir_end_idx = jobdir_start_idx + n_jobs for job_idx in range(jobdir_start_idx, jobdir_end_idx): # specify and create the job subdirectory if it does not exist params["job_dir"] = f"{dataset_output_path}/job_{job_idx}/" params["tensorboard_dir"] = f"{tensorboard_path}/job_{job_idx}/" # create the directory if it does not exist already, otherwise raises an error, # which is good because *might* not want to override data our existing directories! os.makedirs(params["tensorboard_dir"], exist_ok=True) try: os.makedirs(params["job_dir"], exist_ok=bool(job_type in ["generate", "test"] or force_overwrite)) print( f"* Creating model subdirectory {dataset_output_path}/job_{job_idx}/", flush=True, ) except FileExistsError: print( f"-- Model subdirectory {dataset_output_path}/job_{job_idx}/ already exists.", flush=True, ) if not restart: continue # write the `input.csv` file write_input_csv(params_dict=params, filename="input.csv") # write `submit.sh` and submit if use_slurm: print("* Writing submission script.", flush=True) write_submission_script(job_dir=params["job_dir"], job_idx=job_idx, job_type=params["job_type"], max_n_nodes=params["max_n_nodes"], runtime=run_time, mem=mem_GB, ptn=partition, cpu_per_task=cpus_per_task, python_bin_path=python_path) print("-- Submitting job to SLURM.", flush=True) subprocess.run(["sbatch", params["job_dir"] + "submit.sh"]) else: print("* Running job as a normal process.", flush=True) subprocess.run(["ls", f"{python_path}"]) subprocess.run([f"{python_path}", f"{graphinvent_path}main.py", "--job-dir", params["job_dir"]]) # sleep a few secs before submitting next job print(f"-- Sleeping 2 seconds.") time.sleep(2)
Python
def group_subgraphs(init_idx, molecule_set, dataset_dict, is_training_set, ts_properties_old=None): """ Collects graphs along all graphs in the decoding route for molecules in the training dataset by checking if they are equivalent (*not* isomorphic). Then, sums APDs for equivalent graphs and saves to HDF. Args: init_idx (int) : Index at which to start analyzing molecules in `molecule_set`; this is needed as analysis is done in blocks/slices, so `init_idx` is the start index for the next block/slice. molecule_set (list) : Contains `rdkit.Chem.Mol` objects. dataset_dict (dict) : Contains `h5py.Dataset`s. is_training_set (bool) : Indicates if data belongs to training set (as opposed to validation or testing set), in which case the `PreprocessingGraph` objects will be saved. ts_properties_old (dict or None) : If provided, it contains the properties of the previously processed group of structures. This is only done for the training set. Return: molecules_processed (int) : Total number of molecules processed, used for slicing `molecule_set` in the next block (i.e. even though each group contains `C.group_size` subgraphs, it is a variable number of molecules that may have generated `C.group_size` subgraphs, so this needs to be kept track of). dataset_dict (dict) : Contains `h5py.Dataset`s. group_size (int) : Either `C.group_size` or size of last processed group. ts_properties (dict or None) : If provided, it contains the properties of the current group of structures. This is only done for the training set. """ data_subgraphs = [] # initialize data_APDs = [] # initialize molecular_graph_list = [] # initialize # convert all molecules in `molecule_set` to `MolecularGraphs` to loop over molecular_graph_generator = map(get_graph, molecule_set) molecules_processed = 0 # start counting # of molecules processed for graph in molecular_graph_generator: molecules_processed += 1 # store `PreprocessingGraph` object molecular_graph_list.append(graph) # get the number of decoding graphs n_SGs = apd.get_decoding_route_length(molecular_graph=graph) for new_SG_idx in range(n_SGs): # **note: "idx" == "idx" # `get_decoding_route_state() returns a list of [`SG`, `APD`], # where `SG := "subgraph"; APD := "action probability distribution" SG, APD = apd.get_decoding_route_state(molecular_graph=graph, subgraph_idx=new_SG_idx) # "collect" all APDs corresponding to pre-existing subgraphs, # otherwise append both new subgraph and new APD count = 0 for idx, existing_subgraph in enumerate(data_subgraphs): count += 1 # check if subgraph `SG` is "already" in `data_subgraphs` as # `existing_subgraph`, and if so, add the "new" APD to the "old" try: # first compare the node feature matrices nodes_equal = (SG[0] == existing_subgraph[0]).all() except AttributeError: nodes_equal = False try: # then compare the edge feature tensors edges_equal = (SG[1] == existing_subgraph[1]).all() except AttributeError: edges_equal = False # if both matrices have a match, then subgraphs are the same if nodes_equal and edges_equal: existing_APD = data_APDs[idx] # add APDs existing_APD += APD break # if subgraph is not already in `data_subgraphs`, append it if count == len(data_subgraphs) or count == 0: data_subgraphs.append(SG) data_APDs.append(APD) # if `C.group_size` unique subgraphs have been processed, save # group to the HDF dataset len_data_subgraphs = len(data_subgraphs) if len_data_subgraphs == C.group_size: dataset_dict = save_group(dataset_dict=dataset_dict, group_size=C.group_size, data_subgraphs=data_subgraphs, data_APDs=data_APDs, init_idx=init_idx) # get molecular properties for group iff it's the training set ts_properties = get_ts_properties( is_training_set=is_training_set, molecular_graphs=molecular_graph_list, group_size=C.group_size, ts_properties_old=ts_properties_old) # return the datasets, now updated with an additional group return molecules_processed, dataset_dict, C.group_size, ts_properties # save group with < `C.group_size` subgraphs (e.g. the last block) dataset_dict = save_group(dataset_dict=dataset_dict, group_size=len_data_subgraphs, data_subgraphs=data_subgraphs, data_APDs=data_APDs, init_idx=init_idx) # get molecular properties for this group iff it's the training set ts_properties = get_ts_properties(is_training_set=is_training_set, molecular_graphs=molecular_graph_list, group_size=len_data_subgraphs, ts_properties_old=ts_properties_old) # return the datasets, now updated with an additional group return molecules_processed, dataset_dict, len_data_subgraphs, ts_properties
def group_subgraphs(init_idx, molecule_set, dataset_dict, is_training_set, ts_properties_old=None): """ Collects graphs along all graphs in the decoding route for molecules in the training dataset by checking if they are equivalent (*not* isomorphic). Then, sums APDs for equivalent graphs and saves to HDF. Args: init_idx (int) : Index at which to start analyzing molecules in `molecule_set`; this is needed as analysis is done in blocks/slices, so `init_idx` is the start index for the next block/slice. molecule_set (list) : Contains `rdkit.Chem.Mol` objects. dataset_dict (dict) : Contains `h5py.Dataset`s. is_training_set (bool) : Indicates if data belongs to training set (as opposed to validation or testing set), in which case the `PreprocessingGraph` objects will be saved. ts_properties_old (dict or None) : If provided, it contains the properties of the previously processed group of structures. This is only done for the training set. Return: molecules_processed (int) : Total number of molecules processed, used for slicing `molecule_set` in the next block (i.e. even though each group contains `C.group_size` subgraphs, it is a variable number of molecules that may have generated `C.group_size` subgraphs, so this needs to be kept track of). dataset_dict (dict) : Contains `h5py.Dataset`s. group_size (int) : Either `C.group_size` or size of last processed group. ts_properties (dict or None) : If provided, it contains the properties of the current group of structures. This is only done for the training set. """ data_subgraphs = [] # initialize data_APDs = [] # initialize molecular_graph_list = [] # initialize # convert all molecules in `molecule_set` to `MolecularGraphs` to loop over molecular_graph_generator = map(get_graph, molecule_set) molecules_processed = 0 # start counting # of molecules processed for graph in molecular_graph_generator: molecules_processed += 1 # store `PreprocessingGraph` object molecular_graph_list.append(graph) # get the number of decoding graphs n_SGs = apd.get_decoding_route_length(molecular_graph=graph) for new_SG_idx in range(n_SGs): # **note: "idx" == "idx" # `get_decoding_route_state() returns a list of [`SG`, `APD`], # where `SG := "subgraph"; APD := "action probability distribution" SG, APD = apd.get_decoding_route_state(molecular_graph=graph, subgraph_idx=new_SG_idx) # "collect" all APDs corresponding to pre-existing subgraphs, # otherwise append both new subgraph and new APD count = 0 for idx, existing_subgraph in enumerate(data_subgraphs): count += 1 # check if subgraph `SG` is "already" in `data_subgraphs` as # `existing_subgraph`, and if so, add the "new" APD to the "old" try: # first compare the node feature matrices nodes_equal = (SG[0] == existing_subgraph[0]).all() except AttributeError: nodes_equal = False try: # then compare the edge feature tensors edges_equal = (SG[1] == existing_subgraph[1]).all() except AttributeError: edges_equal = False # if both matrices have a match, then subgraphs are the same if nodes_equal and edges_equal: existing_APD = data_APDs[idx] # add APDs existing_APD += APD break # if subgraph is not already in `data_subgraphs`, append it if count == len(data_subgraphs) or count == 0: data_subgraphs.append(SG) data_APDs.append(APD) # if `C.group_size` unique subgraphs have been processed, save # group to the HDF dataset len_data_subgraphs = len(data_subgraphs) if len_data_subgraphs == C.group_size: dataset_dict = save_group(dataset_dict=dataset_dict, group_size=C.group_size, data_subgraphs=data_subgraphs, data_APDs=data_APDs, init_idx=init_idx) # get molecular properties for group iff it's the training set ts_properties = get_ts_properties( is_training_set=is_training_set, molecular_graphs=molecular_graph_list, group_size=C.group_size, ts_properties_old=ts_properties_old) # return the datasets, now updated with an additional group return molecules_processed, dataset_dict, C.group_size, ts_properties # save group with < `C.group_size` subgraphs (e.g. the last block) dataset_dict = save_group(dataset_dict=dataset_dict, group_size=len_data_subgraphs, data_subgraphs=data_subgraphs, data_APDs=data_APDs, init_idx=init_idx) # get molecular properties for this group iff it's the training set ts_properties = get_ts_properties(is_training_set=is_training_set, molecular_graphs=molecular_graph_list, group_size=len_data_subgraphs, ts_properties_old=ts_properties_old) # return the datasets, now updated with an additional group return molecules_processed, dataset_dict, len_data_subgraphs, ts_properties
Python
def create_datasets(hdf_file, max_length, dataset_name_list, dims): """ Creates a dictionary of HDF5 datasets. Args: hdf_file (h5py._hl.files.File) : HDF5 file to contain datasets. max_length (int) : Max dataset length (to be resized later; can only be resized if `chunks`==`True`). dataset_name_list (list) : Contains names of datasets. dims (dict) : Contains the dimensions to use for each dataset. """ ds = {} # initialize # use the name of the dataset as keys in the dictionary of datasets for ds_name in dataset_name_list: ds[ds_name] = hdf_file.create_dataset(ds_name, (max_length, *dims[ds_name]), chunks=True, dtype=np.dtype("int8")) return ds
def create_datasets(hdf_file, max_length, dataset_name_list, dims): """ Creates a dictionary of HDF5 datasets. Args: hdf_file (h5py._hl.files.File) : HDF5 file to contain datasets. max_length (int) : Max dataset length (to be resized later; can only be resized if `chunks`==`True`). dataset_name_list (list) : Contains names of datasets. dims (dict) : Contains the dimensions to use for each dataset. """ ds = {} # initialize # use the name of the dataset as keys in the dictionary of datasets for ds_name in dataset_name_list: ds[ds_name] = hdf_file.create_dataset(ds_name, (max_length, *dims[ds_name]), chunks=True, dtype=np.dtype("int8")) return ds
Python
def create_HDF_file(path, is_training_set=False): """ Preprocesses training data specified in `path` and writes it to HDF. Args: path (str) : Full path/filename to SMILES file containing molecules. is_training_set (bool) : Indicates if this is the training set. """ # load the molecules molecule_set = load.molecules(path) # calculate the total number of molecules and the total number of subgraphs n_molecules = len(molecule_set) total_n_subgraphs = get_n_subgraphs(molecule_set=molecule_set) print(f"-- {n_molecules} molecules in set.", flush=True) print(f"-- {total_n_subgraphs} total subgraphs in set.", flush=True) # create special datatype for each set of arrays dataset_names = ["nodes", "edges", "APDs"] dims = get_dataset_dims() # prepare HDF5 file to save 6 different datasets to it with h5py.File(f"{path[:-3]}h5.chunked", "a") as hdf_file: # if a restart file exists and job is set to restart, then restart the # preprocessing where it left off, otherwise process as normal restart_index_file = C.dataset_dir + "index.restart" if C.restart and os.path.exists(restart_index_file): last_molecule_idx = util.read_last_molecule_idx(restart_file_path=C.dataset_dir) skip_collection = bool(last_molecule_idx == n_molecules and is_training_set) # load dictionary of previously created datasets (`ds` below) ds = load_datasets(hdf_file=hdf_file, dataset_name_list=dataset_names) else: last_molecule_idx = 0 skip_collection = False # create a dictionary of HDF datasets (`ds` below) ds = create_datasets(hdf_file=hdf_file, max_length=total_n_subgraphs, dataset_name_list=dataset_names, dims=dims) dataset_size = 0 # keep track of size to resize dataset later ts_properties = None # loop over subgraphs in blocks of size `C.group_size` for init_idx in range(0, total_n_subgraphs, C.group_size): # if `skip_collection` == True, skip directly to resizing/shuffling # of HDF datasets (e.g. skip the bit below) if not skip_collection: # get a slice of molecules based on the molecules that have # already been processed, indicated by `last_molecule_idx` molecule_subset = get_molecule_subset(molecule_set=molecule_set, init_idx=last_molecule_idx, n_molecules=n_molecules, subset_size=C.group_size) # collect equivalent subgraphs (final_molecule_idx, ds, group_size, ts_properties) = group_subgraphs(init_idx=init_idx, molecule_set=molecule_subset, dataset_dict=ds, is_training_set=is_training_set, ts_properties_old=ts_properties) # keep track of the last molecule to be processed last_molecule_idx += final_molecule_idx util.write_last_molecule_idx(last_molecule_idx=last_molecule_idx, restart_file_path=C.dataset_dir) dataset_size += group_size # grouping of graphs' APDs means that the number of groups will be # less than (`total_n_subgraphs` % `C.group_size`), so line below # breaks the loop once the last molecule in group is the last in # the dataset if last_molecule_idx == n_molecules: # resize HDF datasets by removing extra padding from initialization resize_datasets(dataset_dict=ds, dataset_names=dataset_names, dataset_size=dataset_size, dataset_dims=dims) print("Datasets resized.", flush=True) if is_training_set: print("Writing training set properties.", flush=True) util.write_ts_properties(ts_properties_dict=ts_properties) print("Shuffling training dataset.", flush=True) for _ in range(int(np.sqrt(dataset_size))): random1 = random.randrange(0, dataset_size, 5) random2 = random.randrange(0, dataset_size, 5) ds = shuffle_datasets(dataset_dict=ds, dataset_names=dataset_names, idx1=random1, idx2=random2) break print(f"* Resaving datasets in unchunked format.") with h5py.File(f"{path[:-3]}h5.chunked", "r", swmr=True) as chunked_file: keys = list(chunked_file.keys()) data = [chunked_file.get(key)[:] for key in keys] data_zipped = tuple(zip(data, keys)) with h5py.File(f"{path[:-3]}h5", "w") as unchunked_file: for d, k in tqdm(data_zipped): unchunked_file.create_dataset(k, chunks=None, data=d, dtype=np.dtype("int8")) # remove the restart file and chunked file if all steps are done os.remove(restart_index_file) os.remove(f"{path[:-3]}h5.chunked") return None
def create_HDF_file(path, is_training_set=False): """ Preprocesses training data specified in `path` and writes it to HDF. Args: path (str) : Full path/filename to SMILES file containing molecules. is_training_set (bool) : Indicates if this is the training set. """ # load the molecules molecule_set = load.molecules(path) # calculate the total number of molecules and the total number of subgraphs n_molecules = len(molecule_set) total_n_subgraphs = get_n_subgraphs(molecule_set=molecule_set) print(f"-- {n_molecules} molecules in set.", flush=True) print(f"-- {total_n_subgraphs} total subgraphs in set.", flush=True) # create special datatype for each set of arrays dataset_names = ["nodes", "edges", "APDs"] dims = get_dataset_dims() # prepare HDF5 file to save 6 different datasets to it with h5py.File(f"{path[:-3]}h5.chunked", "a") as hdf_file: # if a restart file exists and job is set to restart, then restart the # preprocessing where it left off, otherwise process as normal restart_index_file = C.dataset_dir + "index.restart" if C.restart and os.path.exists(restart_index_file): last_molecule_idx = util.read_last_molecule_idx(restart_file_path=C.dataset_dir) skip_collection = bool(last_molecule_idx == n_molecules and is_training_set) # load dictionary of previously created datasets (`ds` below) ds = load_datasets(hdf_file=hdf_file, dataset_name_list=dataset_names) else: last_molecule_idx = 0 skip_collection = False # create a dictionary of HDF datasets (`ds` below) ds = create_datasets(hdf_file=hdf_file, max_length=total_n_subgraphs, dataset_name_list=dataset_names, dims=dims) dataset_size = 0 # keep track of size to resize dataset later ts_properties = None # loop over subgraphs in blocks of size `C.group_size` for init_idx in range(0, total_n_subgraphs, C.group_size): # if `skip_collection` == True, skip directly to resizing/shuffling # of HDF datasets (e.g. skip the bit below) if not skip_collection: # get a slice of molecules based on the molecules that have # already been processed, indicated by `last_molecule_idx` molecule_subset = get_molecule_subset(molecule_set=molecule_set, init_idx=last_molecule_idx, n_molecules=n_molecules, subset_size=C.group_size) # collect equivalent subgraphs (final_molecule_idx, ds, group_size, ts_properties) = group_subgraphs(init_idx=init_idx, molecule_set=molecule_subset, dataset_dict=ds, is_training_set=is_training_set, ts_properties_old=ts_properties) # keep track of the last molecule to be processed last_molecule_idx += final_molecule_idx util.write_last_molecule_idx(last_molecule_idx=last_molecule_idx, restart_file_path=C.dataset_dir) dataset_size += group_size # grouping of graphs' APDs means that the number of groups will be # less than (`total_n_subgraphs` % `C.group_size`), so line below # breaks the loop once the last molecule in group is the last in # the dataset if last_molecule_idx == n_molecules: # resize HDF datasets by removing extra padding from initialization resize_datasets(dataset_dict=ds, dataset_names=dataset_names, dataset_size=dataset_size, dataset_dims=dims) print("Datasets resized.", flush=True) if is_training_set: print("Writing training set properties.", flush=True) util.write_ts_properties(ts_properties_dict=ts_properties) print("Shuffling training dataset.", flush=True) for _ in range(int(np.sqrt(dataset_size))): random1 = random.randrange(0, dataset_size, 5) random2 = random.randrange(0, dataset_size, 5) ds = shuffle_datasets(dataset_dict=ds, dataset_names=dataset_names, idx1=random1, idx2=random2) break print(f"* Resaving datasets in unchunked format.") with h5py.File(f"{path[:-3]}h5.chunked", "r", swmr=True) as chunked_file: keys = list(chunked_file.keys()) data = [chunked_file.get(key)[:] for key in keys] data_zipped = tuple(zip(data, keys)) with h5py.File(f"{path[:-3]}h5", "w") as unchunked_file: for d, k in tqdm(data_zipped): unchunked_file.create_dataset(k, chunks=None, data=d, dtype=np.dtype("int8")) # remove the restart file and chunked file if all steps are done os.remove(restart_index_file) os.remove(f"{path[:-3]}h5.chunked") return None
Python
def resize_datasets(dataset_dict, dataset_names, dataset_size, dataset_dims): """ Resizes the input HDF datasets in `dataset_dict`. Originally a much longer dataset is created when creating the HDF dataset because it is impossible to precisely predict how many graphs will be equivalent beforehand, so the datasets are all made as long as the upper bound. """ for dataset_name in dataset_names: try: dataset_dict[dataset_name].resize( (dataset_size, *dataset_dims[dataset_name])) except KeyError: # `f_term` has no extra dims dataset_dict[dataset_name].resize((dataset_size,)) return dataset_dict
def resize_datasets(dataset_dict, dataset_names, dataset_size, dataset_dims): """ Resizes the input HDF datasets in `dataset_dict`. Originally a much longer dataset is created when creating the HDF dataset because it is impossible to precisely predict how many graphs will be equivalent beforehand, so the datasets are all made as long as the upper bound. """ for dataset_name in dataset_names: try: dataset_dict[dataset_name].resize( (dataset_size, *dataset_dims[dataset_name])) except KeyError: # `f_term` has no extra dims dataset_dict[dataset_name].resize((dataset_size,)) return dataset_dict
Python
def load_datasets(hdf_file, dataset_name_list): """ Creates a dictionary of HDF datasets which have been previously created (for restart jobs only). Args: hdf_file (h5py._hl.files.File) : HDF5 file containing all the datasets. dataset_name_list (list) : Contains names (strings) of datasets. Returns: ds (dict) : Dictionary of datasets. """ ds = {} # initialize # use the name of the dataset as keys in the dictionary of datasets for ds_name in dataset_name_list: ds[ds_name] = hdf_file.get(ds_name) return ds
def load_datasets(hdf_file, dataset_name_list): """ Creates a dictionary of HDF datasets which have been previously created (for restart jobs only). Args: hdf_file (h5py._hl.files.File) : HDF5 file containing all the datasets. dataset_name_list (list) : Contains names (strings) of datasets. Returns: ds (dict) : Dictionary of datasets. """ ds = {} # initialize # use the name of the dataset as keys in the dictionary of datasets for ds_name in dataset_name_list: ds[ds_name] = hdf_file.get(ds_name) return ds
Python
def save_group(dataset_dict, data_subgraphs, data_APDs, group_size, init_idx): """ Saves a group of padded subgraphs and their corresponding APDs to the existing HDF5 file as `numpy.ndarray`s. Args: dataset_dict (dict) : Contains HDF5 datasets. data_subgraphs (list) : Contains molecular subgraphs. data_APDs (list) : Contains APDs. group_size (int) : Size of HDF5 "slice". init_idx (int) : Index to begin slicing. """ # convert to `np.ndarray`s nodes = np.array([graph_tuple[0] for graph_tuple in data_subgraphs]) edges = np.array([graph_tuple[1] for graph_tuple in data_subgraphs]) APDs = np.array(data_APDs) end_idx = init_idx + group_size # idx to end slicing # once data is padded, save it to dataset slice dataset_dict["nodes"][init_idx:end_idx] = nodes dataset_dict["edges"][init_idx:end_idx] = edges dataset_dict["APDs"][init_idx:end_idx] = APDs return dataset_dict
def save_group(dataset_dict, data_subgraphs, data_APDs, group_size, init_idx): """ Saves a group of padded subgraphs and their corresponding APDs to the existing HDF5 file as `numpy.ndarray`s. Args: dataset_dict (dict) : Contains HDF5 datasets. data_subgraphs (list) : Contains molecular subgraphs. data_APDs (list) : Contains APDs. group_size (int) : Size of HDF5 "slice". init_idx (int) : Index to begin slicing. """ # convert to `np.ndarray`s nodes = np.array([graph_tuple[0] for graph_tuple in data_subgraphs]) edges = np.array([graph_tuple[1] for graph_tuple in data_subgraphs]) APDs = np.array(data_APDs) end_idx = init_idx + group_size # idx to end slicing # once data is padded, save it to dataset slice dataset_dict["nodes"][init_idx:end_idx] = nodes dataset_dict["edges"][init_idx:end_idx] = edges dataset_dict["APDs"][init_idx:end_idx] = APDs return dataset_dict
Python
def trade(self, price): """ this method opens new positions or closes already opened positions depending on intrinsics events directionalChangeToDown: directional change to down mode directionalChangeToDown: directional change to down mode downOvershoot: overshoot given a downwards direction upOvershoot: overshoot given a upwards direction :param price: :return: """ self.liquidity_indicator.run(price) event = self.events_recorder.record_event(price) assert event in ('NOevent', 'upOvershoot', 'downOvershoot', 'directionalChangeToUp', 'directionalChangeToDown'), "{} this is not a valid event".format(event) assert self.agent_mode in ('long', 'short'), "{} not a valid long_short value".format(self.agent_mode) if event != 'NOevent': if self.agent_mode == 'long': if event == 'downOvershoot' or event == 'directionalChangeToUp': self.open_new_order(price, event) elif event == 'upOvershoot': self.sell_opened_positions(price, event) else: if event == 'upOvershoot' or event == 'directionalChangeToDown': self.open_new_order(price, event) elif event == 'downOvershoot': self.sell_opened_positions(price, event) return 0
def trade(self, price): """ this method opens new positions or closes already opened positions depending on intrinsics events directionalChangeToDown: directional change to down mode directionalChangeToDown: directional change to down mode downOvershoot: overshoot given a downwards direction upOvershoot: overshoot given a upwards direction :param price: :return: """ self.liquidity_indicator.run(price) event = self.events_recorder.record_event(price) assert event in ('NOevent', 'upOvershoot', 'downOvershoot', 'directionalChangeToUp', 'directionalChangeToDown'), "{} this is not a valid event".format(event) assert self.agent_mode in ('long', 'short'), "{} not a valid long_short value".format(self.agent_mode) if event != 'NOevent': if self.agent_mode == 'long': if event == 'downOvershoot' or event == 'directionalChangeToUp': self.open_new_order(price, event) elif event == 'upOvershoot': self.sell_opened_positions(price, event) else: if event == 'upOvershoot' or event == 'directionalChangeToDown': self.open_new_order(price, event) elif event == 'downOvershoot': self.sell_opened_positions(price, event) return 0
Python
def record_event(self, price): """ Records an event given a price :param price :return: NOevent, directionalChangeToDown, directionalChangeToDown, downOvershoot, upOvershoot """ assert self.market_mode in ('up', 'down'), '{} is not a valid market mode'.format(self.market_mode) if not self.initialized: self.initialized = True self.reference = self.extreme = price.get_mid() self.compute_expected_directional_change() self.compute_expected_overshoot() return 'NOevent' if self.market_mode == 'up': if price.get_bid() > self.extreme: self.extreme = price.get_bid() self.compute_expected_directional_change() if price.get_bid() > self.expected_overshoot_price: self.reference = self.extreme self.compute_expected_overshoot() return 'upOvershoot' elif price.get_ask() <= self.expected_directional_change_price: self.reference = self.extreme = price.get_ask() self.market_mode = 'down' self.compute_expected_directional_change() self.compute_expected_overshoot() return 'directionalChangeToDown' else: if price.get_ask() < self.extreme: self.extreme = price.get_ask() self.compute_expected_directional_change() if price.get_ask() < self.expected_overshoot_price: self.reference = self.extreme self.compute_expected_overshoot() return 'downOvershoot' elif price.get_bid() >= self.expected_directional_change_price: self.reference = self.extreme = price.get_bid() self.market_mode = 'up' self.compute_expected_directional_change() self.compute_expected_overshoot() return 'directionalChangeToUp' return 'NOevent'
def record_event(self, price): """ Records an event given a price :param price :return: NOevent, directionalChangeToDown, directionalChangeToDown, downOvershoot, upOvershoot """ assert self.market_mode in ('up', 'down'), '{} is not a valid market mode'.format(self.market_mode) if not self.initialized: self.initialized = True self.reference = self.extreme = price.get_mid() self.compute_expected_directional_change() self.compute_expected_overshoot() return 'NOevent' if self.market_mode == 'up': if price.get_bid() > self.extreme: self.extreme = price.get_bid() self.compute_expected_directional_change() if price.get_bid() > self.expected_overshoot_price: self.reference = self.extreme self.compute_expected_overshoot() return 'upOvershoot' elif price.get_ask() <= self.expected_directional_change_price: self.reference = self.extreme = price.get_ask() self.market_mode = 'down' self.compute_expected_directional_change() self.compute_expected_overshoot() return 'directionalChangeToDown' else: if price.get_ask() < self.extreme: self.extreme = price.get_ask() self.compute_expected_directional_change() if price.get_ask() < self.expected_overshoot_price: self.reference = self.extreme self.compute_expected_overshoot() return 'downOvershoot' elif price.get_bid() >= self.expected_directional_change_price: self.reference = self.extreme = price.get_bid() self.market_mode = 'up' self.compute_expected_directional_change() self.compute_expected_overshoot() return 'directionalChangeToUp' return 'NOevent'
Python
def execute_sentence(sentence, cursor): """ By calling this method, and using the PyMySQL library, you can execute a sentence in the connected database. :param sentence: the sentence you want to execute :return: the results of the sentence """ try: if cursor.connection: cursor.execute(sentence) return cursor.fetchall() else: print("impossible to connect to the database") except Exception as e: return str(e)
def execute_sentence(sentence, cursor): """ By calling this method, and using the PyMySQL library, you can execute a sentence in the connected database. :param sentence: the sentence you want to execute :return: the results of the sentence """ try: if cursor.connection: cursor.execute(sentence) return cursor.fetchall() else: print("impossible to connect to the database") except Exception as e: return str(e)
Python
def datetime_to_float(date, time): """ Auxiliar method used to convert a date and a time into a floating time value (time value that isn't tied to a specific time zone) :param date: date of a row of the dataset. time: time of a row of the dataset :return: the datetime in floating format """ year, month, day = date.split("-") hour, minute, second = time.split(":") date = dt.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second)) return date.timestamp()
def datetime_to_float(date, time): """ Auxiliar method used to convert a date and a time into a floating time value (time value that isn't tied to a specific time zone) :param date: date of a row of the dataset. time: time of a row of the dataset :return: the datetime in floating format """ year, month, day = date.split("-") hour, minute, second = time.split(":") date = dt.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second)) return date.timestamp()
Python
def float_to_datetime(float): """ Auxiliar method used to convert a floating datetime into a python datetime value :param float: floating datetime value :return: python datetime value """ return str(dt.datetime.fromtimestamp(float))
def float_to_datetime(float): """ Auxiliar method used to convert a floating datetime into a python datetime value :param float: floating datetime value :return: python datetime value """ return str(dt.datetime.fromtimestamp(float))
Python
def trade(self, last_price): """ this method opens new positions or closes already opened positions depending on intrinsics events directionalChangeToDown: directional change to down mode directionalChangeToDown: directional change to down mode downOvershoot: overshoot given a downwards direction upOvershoot: overshoot given a upwards direction :return: """ print(f'connection status: {self.broker.is_connected()}') print(f'last price is : {last_price}') # call liquidity here self.liquidity_indicator.run(last_price) event = self.events_recorder.record_event(last_price) print(f'event: {event}') assert event in ('NOevent', 'upOvershoot', 'downOvershoot', 'directionalChangeToUp', 'directionalChangeToDown'), "{} this is not a valid event".format(event) assert self.agent_mode in ('long', 'short'), "{} not a valid long_short value".format(self.agent_mode) if event != 'NOevent': if self.agent_mode == 'long': if event == 'downOvershoot' or event == 'directionalChangeToUp': self.open_new_order() elif event == 'upOvershoot': self.sell_opened_positions(last_price) else: pass else: if event == 'upOvershoot' or event == 'directionalChangeToDown': self.open_new_order() elif event == 'downOvershoot': self.sell_opened_positions(last_price) else: pass return 0
def trade(self, last_price): """ this method opens new positions or closes already opened positions depending on intrinsics events directionalChangeToDown: directional change to down mode directionalChangeToDown: directional change to down mode downOvershoot: overshoot given a downwards direction upOvershoot: overshoot given a upwards direction :return: """ print(f'connection status: {self.broker.is_connected()}') print(f'last price is : {last_price}') # call liquidity here self.liquidity_indicator.run(last_price) event = self.events_recorder.record_event(last_price) print(f'event: {event}') assert event in ('NOevent', 'upOvershoot', 'downOvershoot', 'directionalChangeToUp', 'directionalChangeToDown'), "{} this is not a valid event".format(event) assert self.agent_mode in ('long', 'short'), "{} not a valid long_short value".format(self.agent_mode) if event != 'NOevent': if self.agent_mode == 'long': if event == 'downOvershoot' or event == 'directionalChangeToUp': self.open_new_order() elif event == 'upOvershoot': self.sell_opened_positions(last_price) else: pass else: if event == 'upOvershoot' or event == 'directionalChangeToDown': self.open_new_order() elif event == 'downOvershoot': self.sell_opened_positions(last_price) else: pass return 0
Python
def record_event(self, last_price): """ Records an event given a price :param last_price :return: NOevent, directionalChangeToDown, directionalChangeToDown, downOvershoot, upOvershoot """ assert self.market_mode in ('up', 'down'), '{} is not a valid market mode'.format(self.market_mode) if not self.initialized: self.initialized = True self.reference = self.extreme = (last_price.Bid + last_price.Ask) / 2 self.compute_expected_directional_change() self.compute_expected_overshoot() return 'NOevent' if self.market_mode == 'up': if last_price.Bid > self.extreme: self.extreme = last_price.Bid self.compute_expected_directional_change() if last_price.Bid > self.expected_overshoot_price: self.reference = self.extreme self.compute_expected_overshoot() return 'upOvershoot' elif last_price.Ask <= self.expected_directional_change_price: self.reference = self.extreme = last_price.Ask self.market_mode = 'down' self.compute_expected_directional_change() self.compute_expected_overshoot() return 'directionalChangeToDown' else: if last_price.Ask < self.extreme: self.extreme = last_price.Ask self.compute_expected_directional_change() if last_price.Ask < self.expected_overshoot_price: self.reference = self.extreme self.compute_expected_overshoot() return 'downOvershoot' elif last_price.Bid >= self.expected_directional_change_price: self.reference = self.extreme = last_price.Bid self.market_mode = 'up' self.compute_expected_directional_change() self.compute_expected_overshoot() return 'directionalChangeToUp' return 'NOevent'
def record_event(self, last_price): """ Records an event given a price :param last_price :return: NOevent, directionalChangeToDown, directionalChangeToDown, downOvershoot, upOvershoot """ assert self.market_mode in ('up', 'down'), '{} is not a valid market mode'.format(self.market_mode) if not self.initialized: self.initialized = True self.reference = self.extreme = (last_price.Bid + last_price.Ask) / 2 self.compute_expected_directional_change() self.compute_expected_overshoot() return 'NOevent' if self.market_mode == 'up': if last_price.Bid > self.extreme: self.extreme = last_price.Bid self.compute_expected_directional_change() if last_price.Bid > self.expected_overshoot_price: self.reference = self.extreme self.compute_expected_overshoot() return 'upOvershoot' elif last_price.Ask <= self.expected_directional_change_price: self.reference = self.extreme = last_price.Ask self.market_mode = 'down' self.compute_expected_directional_change() self.compute_expected_overshoot() return 'directionalChangeToDown' else: if last_price.Ask < self.extreme: self.extreme = last_price.Ask self.compute_expected_directional_change() if last_price.Ask < self.expected_overshoot_price: self.reference = self.extreme self.compute_expected_overshoot() return 'downOvershoot' elif last_price.Bid >= self.expected_directional_change_price: self.reference = self.extreme = last_price.Bid self.market_mode = 'up' self.compute_expected_directional_change() self.compute_expected_overshoot() return 'directionalChangeToUp' return 'NOevent'
Python
def _update_cvssv3_version_from_schema(self, root_element): """ Tries to update CVSS 3.x version from schema.""" cvss_3_regex = r'.*cvss-v(3\.[01]).*' potential_cvss3 = None # iterate over namespaces for name_space in root_element.nsmap.values(): match = re.match(cvss_3_regex, name_space) if match: cvss_version_matched = match.groups()[0] # no potential cvss version found yet -> store it if not potential_cvss3: potential_cvss3 = cvss_version_matched # already have some version, but it's the same as currently matched -> ok, continue elif potential_cvss3 == cvss_version_matched: continue # else we have two different potential cvss versions -> skip this step comletely else: return if potential_cvss3: logging.info('Default CVSS v3.x version set to %s based on document XML schemas.', potential_cvss3) self.vulnerability.default_cvss_version = potential_cvss3
def _update_cvssv3_version_from_schema(self, root_element): """ Tries to update CVSS 3.x version from schema.""" cvss_3_regex = r'.*cvss-v(3\.[01]).*' potential_cvss3 = None # iterate over namespaces for name_space in root_element.nsmap.values(): match = re.match(cvss_3_regex, name_space) if match: cvss_version_matched = match.groups()[0] # no potential cvss version found yet -> store it if not potential_cvss3: potential_cvss3 = cvss_version_matched # already have some version, but it's the same as currently matched -> ok, continue elif potential_cvss3 == cvss_version_matched: continue # else we have two different potential cvss versions -> skip this step comletely else: return if potential_cvss3: logging.info('Default CVSS v3.x version set to %s based on document XML schemas.', potential_cvss3) self.vulnerability.default_cvss_version = potential_cvss3
Python
def convert_file(self, path) -> dict: """Wrapper to read/parse CVRF and parse it to CSAF JSON structure""" root = DocumentHandler._open_and_validate_file(path) self._parse(root) return self._compose_final_csaf()
def convert_file(self, path) -> dict: """Wrapper to read/parse CVRF and parse it to CSAF JSON structure""" root = DocumentHandler._open_and_validate_file(path) self._parse(root) return self._compose_final_csaf()
Python
def validate_output_against_schema(self, final_csaf) -> bool: """ Validates the CSAF output against the CSAF JSON schema return: True if valid, False if invalid """ with open(self.CSAF_SCHEMA_FILE, encoding='utf-8') as f: csaf_schema_content = json.loads(f.read()) try: Draft202012Validator.check_schema(csaf_schema_content) validator = Draft202012Validator(csaf_schema_content, format_checker=draft202012_format_checker) validator.validate(final_csaf) except SchemaError as e: logging.error( 'CSAF schema validation error. Provided CSAF schema is invalid. Message: %s', e.message) return False except ValidationError as e: logging.error('CSAF schema validation error. Path: %s. Message: %s.', e.json_path, e.message) return False else: logging.info('CSAF schema validation OK.') return True
def validate_output_against_schema(self, final_csaf) -> bool: """ Validates the CSAF output against the CSAF JSON schema return: True if valid, False if invalid """ with open(self.CSAF_SCHEMA_FILE, encoding='utf-8') as f: csaf_schema_content = json.loads(f.read()) try: Draft202012Validator.check_schema(csaf_schema_content) validator = Draft202012Validator(csaf_schema_content, format_checker=draft202012_format_checker) validator.validate(final_csaf) except SchemaError as e: logging.error( 'CSAF schema validation error. Provided CSAF schema is invalid. Message: %s', e.message) return False except ValidationError as e: logging.error('CSAF schema validation error. Path: %s. Message: %s.', e.json_path, e.message) return False else: logging.info('CSAF schema validation OK.') return True
Python
def critical_exit(msg, status_code=1): """ A critical error encountered, converter is not able to proceed and exits with a status code (default 1) """ logging.critical(msg) sys.exit(status_code)
def critical_exit(msg, status_code=1): """ A critical error encountered, converter is not able to proceed and exits with a status code (default 1) """ logging.critical(msg) sys.exit(status_code)
Python
def handle_boolean_config_values(key, val): """ Converts string representation of boolean value to boolean. """ try: if isinstance(val, bool): return val if val.strip().lower() in {'true', 'yes', '1', 'y'}: return True if val.strip().lower() in {'false', 'no', '0', 'n'}: return False raise ValueError("unexpected value") except (AttributeError, ValueError) as e: critical_exit(f"Reading config.yaml failed. " f"Invalid value for config key {key}: {val} {e}.")
def handle_boolean_config_values(key, val): """ Converts string representation of boolean value to boolean. """ try: if isinstance(val, bool): return val if val.strip().lower() in {'true', 'yes', '1', 'y'}: return True if val.strip().lower() in {'false', 'no', '0', 'n'}: return False raise ValueError("unexpected value") except (AttributeError, ValueError) as e: critical_exit(f"Reading config.yaml failed. " f"Invalid value for config key {key}: {val} {e}.")
Python
def store_json(json_dict, fpath): """ Saves json to file, creates directory if needed.""" try: path = Path(fpath) base_dir = path.parent.absolute() if not os.path.exists(base_dir): os.mkdir(base_dir) print(f"Created output folder {base_dir}.") if os.path.exists(fpath): logging.warning("Output %s already exists. Overwriting it.", fpath) if not fpath.lower().endswith('.json'): logging.warning("Given output file %s does not contain valid .json suffix.", fpath) with open(fpath, 'w', encoding='utf-8') as f: json.dump(json_dict, f, ensure_ascii=False, indent=2) logging.info("Successfully wrote %s.", fpath) # pylint: disable=broad-except except Exception as e: critical_exit(f"Writing output file {fpath} failed. {e}")
def store_json(json_dict, fpath): """ Saves json to file, creates directory if needed.""" try: path = Path(fpath) base_dir = path.parent.absolute() if not os.path.exists(base_dir): os.mkdir(base_dir) print(f"Created output folder {base_dir}.") if os.path.exists(fpath): logging.warning("Output %s already exists. Overwriting it.", fpath) if not fpath.lower().endswith('.json'): logging.warning("Given output file %s does not contain valid .json suffix.", fpath) with open(fpath, 'w', encoding='utf-8') as f: json.dump(json_dict, f, ensure_ascii=False, indent=2) logging.info("Successfully wrote %s.", fpath) # pylint: disable=broad-except except Exception as e: critical_exit(f"Writing output file {fpath} failed. {e}")
Python
def check_for_version_t(revision_history): """ Checks whether all version numbers in /document/tracking/revision_history match semantic versioning. Semantic version is defined in version_t definition. see: https://docs.oasis-open.org/csaf/csaf/v2.0/csd01/csaf-v2.0-csd01.html#3111-version-type and section 9.1.5 Conformance Clause 5: CVRF CSAF converter """ pattern = ( r'^((0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)' r'(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)' r'(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))' r'?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?)$' ) return all(re.match(pattern, revision['number']) for revision in revision_history)
def check_for_version_t(revision_history): """ Checks whether all version numbers in /document/tracking/revision_history match semantic versioning. Semantic version is defined in version_t definition. see: https://docs.oasis-open.org/csaf/csaf/v2.0/csd01/csaf-v2.0-csd01.html#3111-version-type and section 9.1.5 Conformance Clause 5: CVRF CSAF converter """ pattern = ( r'^((0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)' r'(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)' r'(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))' r'?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?)$' ) return all(re.match(pattern, revision['number']) for revision in revision_history)
Python
def _as_int_tuple(text: str) -> Tuple[int]: """ Converts string of dotted numbers into tuple of ints """ try: return tuple(int(part) for part in text.split('.')) except ValueError: return (sys.maxsize,)
def _as_int_tuple(text: str) -> Tuple[int]: """ Converts string of dotted numbers into tuple of ints """ try: return tuple(int(part) for part in text.split('.')) except ValueError: return (sys.maxsize,)
Python
def create_csaf(self, root_element): """ Parses XML element and stores in JSON structure (self.csaf variable). """ try: self._process_mandatory_elements(root_element) # pylint: disable=broad-except except Exception as e: logging.error( 'Something went wrong when processing mandatory elements for %s. Reason: %s', root_element.tag, e) try: self._process_optional_elements(root_element) # pylint: disable=broad-except except Exception as e: logging.error( 'Something went wrong when processing optional elements for %s. Reason: %s', root_element.tag, e)
def create_csaf(self, root_element): """ Parses XML element and stores in JSON structure (self.csaf variable). """ try: self._process_mandatory_elements(root_element) # pylint: disable=broad-except except Exception as e: logging.error( 'Something went wrong when processing mandatory elements for %s. Reason: %s', root_element.tag, e) try: self._process_optional_elements(root_element) # pylint: disable=broad-except except Exception as e: logging.error( 'Something went wrong when processing optional elements for %s. Reason: %s', root_element.tag, e)
Python
def _handle_branches_recursive(self, root_element): """ Recursive method for handling the branches, branch can have either list of another branches, or a single FullProductName inside """ if not hasattr(root_element, 'Branch') and not hasattr(root_element, 'FullProductName'): # The ProductTree section doesn't contain Branches at all return None if 'Branch' in root_element.tag and hasattr(root_element, 'FullProductName'): # Make sure we are inside a Branch (and not in the top ProductTree element, # where FullProductName can occur) then root_element is the leaf branch leaf_branch = { 'name': root_element.attrib['Name'], 'category': self._get_branch_type(root_element.attrib['Type']), 'product': self._get_full_product_name(root_element.FullProductName) } return leaf_branch if hasattr(root_element, 'Branch'): branches = [] for branch_elem in root_element.Branch: if hasattr(branch_elem, 'FullProductName'): branches.append(self._handle_branches_recursive(branch_elem)) else: branches.append({ 'name': branch_elem.attrib['Name'], 'category': self._get_branch_type(branch_elem.attrib['Type']), 'branches': self._handle_branches_recursive(branch_elem) }) return branches return None
def _handle_branches_recursive(self, root_element): """ Recursive method for handling the branches, branch can have either list of another branches, or a single FullProductName inside """ if not hasattr(root_element, 'Branch') and not hasattr(root_element, 'FullProductName'): # The ProductTree section doesn't contain Branches at all return None if 'Branch' in root_element.tag and hasattr(root_element, 'FullProductName'): # Make sure we are inside a Branch (and not in the top ProductTree element, # where FullProductName can occur) then root_element is the leaf branch leaf_branch = { 'name': root_element.attrib['Name'], 'category': self._get_branch_type(root_element.attrib['Type']), 'product': self._get_full_product_name(root_element.FullProductName) } return leaf_branch if hasattr(root_element, 'Branch'): branches = [] for branch_elem in root_element.Branch: if hasattr(branch_elem, 'FullProductName'): branches.append(self._handle_branches_recursive(branch_elem)) else: branches.append({ 'name': branch_elem.attrib['Name'], 'category': self._get_branch_type(branch_elem.attrib['Type']), 'branches': self._handle_branches_recursive(branch_elem) }) return branches return None
Python
def register_run_on_failure_keyword(self, keyword: str) -> None: """ This keyword lets you change the keyword that runs on failure during test execution. The default is `Take Screenshot`, which is set on library import. You can set ``None`` to this keyword, if you do not want to run any keyword on failure. Example: | Register Run On Failure Keyword | None | # no keyword is run on failure | | Register Run On Failure Keyword | Custom Keyword | # Custom Keyword is run on failure | """ if keyword.lower() == "none": self.run_on_failure_keyword = None else: self.run_on_failure_keyword = keyword
def register_run_on_failure_keyword(self, keyword: str) -> None: """ This keyword lets you change the keyword that runs on failure during test execution. The default is `Take Screenshot`, which is set on library import. You can set ``None`` to this keyword, if you do not want to run any keyword on failure. Example: | Register Run On Failure Keyword | None | # no keyword is run on failure | | Register Run On Failure Keyword | Custom Keyword | # Custom Keyword is run on failure | """ if keyword.lower() == "none": self.run_on_failure_keyword = None else: self.run_on_failure_keyword = keyword
Python
def change_timeout(self, seconds: int) -> None: """ Change the timeout for connection in seconds. Example: | Change Timeout | 3 | """ self.timeout = seconds
def change_timeout(self, seconds: int) -> None: """ Change the timeout for connection in seconds. Example: | Change Timeout | 3 | """ self.timeout = seconds
Python
def open_connection( self, host: str, LU: Optional[str] = None, port: int = 23, extra_args: Optional[Union[List[str], os.PathLike]] = None, ): """Create a connection to IBM3270 mainframe with the default port 23. To make a connection with the mainframe you only must inform the Host. You can pass the Logical Unit Name and the Port as optional. If you wish, you can provide further configuration data to ``extra_args``. ``extra_args`` takes in a list, or a path to a file, containing [https://x3270.miraheze.org/wiki/Category:Command-line_options|x3270 command line options]. Entries in the argfile can be on one line or multiple lines. Lines starting with "#" are considered comments. | # example_argfile_oneline.txt | --accepthostname myhost.com | # example_argfile_multiline.txt | --accepthostname myhost.com | # this is a comment | --charset french Please make sure the arguments you are providing are available for your specific x3270 application and version. Example: | Open Connection | Hostname | | Open Connection | Hostname | LU=LUname | | Open Connection | Hostname | port=992 | | ${extra_args} | Create List | --accepthostname | myhost.com | --cafile | ${CURDIR}/cafile.crt | | Open Connection | Hostname | extra_args=${extra_args} | | Open Connection | Hostname | extra_args=${CURDIR}/argfile.txt | """ self.host = host self.lu = LU self.port = port if self.lu: self.credential = "%s@%s:%s" % (self.lu, self.host, self.port) else: self.credential = "%s:%s" % (self.host, self.port) if self.mf: self.close_connection() self.mf = Emulator(self.visible, self.timeout, extra_args) self.mf.connect(self.credential)
def open_connection( self, host: str, LU: Optional[str] = None, port: int = 23, extra_args: Optional[Union[List[str], os.PathLike]] = None, ): """Create a connection to IBM3270 mainframe with the default port 23. To make a connection with the mainframe you only must inform the Host. You can pass the Logical Unit Name and the Port as optional. If you wish, you can provide further configuration data to ``extra_args``. ``extra_args`` takes in a list, or a path to a file, containing [https://x3270.miraheze.org/wiki/Category:Command-line_options|x3270 command line options]. Entries in the argfile can be on one line or multiple lines. Lines starting with "#" are considered comments. | # example_argfile_oneline.txt | --accepthostname myhost.com | # example_argfile_multiline.txt | --accepthostname myhost.com | # this is a comment | --charset french Please make sure the arguments you are providing are available for your specific x3270 application and version. Example: | Open Connection | Hostname | | Open Connection | Hostname | LU=LUname | | Open Connection | Hostname | port=992 | | ${extra_args} | Create List | --accepthostname | myhost.com | --cafile | ${CURDIR}/cafile.crt | | Open Connection | Hostname | extra_args=${extra_args} | | Open Connection | Hostname | extra_args=${CURDIR}/argfile.txt | """ self.host = host self.lu = LU self.port = port if self.lu: self.credential = "%s@%s:%s" % (self.lu, self.host, self.port) else: self.credential = "%s:%s" % (self.host, self.port) if self.mf: self.close_connection() self.mf = Emulator(self.visible, self.timeout, extra_args) self.mf.connect(self.credential)
Python
def change_wait_time(self, wait_time: float) -> None: """To give time for the mainframe screen to be "drawn" and receive the next commands, a "wait time" has been created, which by default is set to 0.5 seconds. This is a sleep applied AFTER the following keywords: - `Execute Command` - `Send Enter` - `Send PF` - `Write` - `Write in position` If you want to change this value, just use this keyword passing the time in seconds. Example: | Change Wait Time | 0.1 | | Change Wait Time | 2 | """ self.wait = wait_time
def change_wait_time(self, wait_time: float) -> None: """To give time for the mainframe screen to be "drawn" and receive the next commands, a "wait time" has been created, which by default is set to 0.5 seconds. This is a sleep applied AFTER the following keywords: - `Execute Command` - `Send Enter` - `Send PF` - `Write` - `Write in position` If you want to change this value, just use this keyword passing the time in seconds. Example: | Change Wait Time | 0.1 | | Change Wait Time | 2 | """ self.wait = wait_time
Python
def change_wait_time_after_write(self, wait_time_after_write: float) -> None: """To give the user time to see what is happening inside the mainframe, a "wait time after write" has been created, which by default is set to 0 seconds. This is a sleep applied AFTER sending a string in these keywords: - `Write` - `Write Bare` - `Write in position` - `Write Bare in position` If you want to change this value, just use this keyword passing the time in seconds. Note: This keyword is useful for debug purpose Example: | Change Wait Time After Write | 0.5 | | Change Wait Time After Write | 2 | """ self.wait_write = wait_time_after_write
def change_wait_time_after_write(self, wait_time_after_write: float) -> None: """To give the user time to see what is happening inside the mainframe, a "wait time after write" has been created, which by default is set to 0 seconds. This is a sleep applied AFTER sending a string in these keywords: - `Write` - `Write Bare` - `Write in position` - `Write Bare in position` If you want to change this value, just use this keyword passing the time in seconds. Note: This keyword is useful for debug purpose Example: | Change Wait Time After Write | 0.5 | | Change Wait Time After Write | 2 | """ self.wait_write = wait_time_after_write
Python
def read_all_screen(self) -> str: """Read the current screen and returns all content in one string. This is useful if your automation scripts should take different routes depending on a message shown on the screen. Example: | ${screen} | Read All Screen | | IF | 'certain text' in '''${screen}''' | | | Do Something | | ELSE | | | | Do Something Else | | END | | """ return self._read_all_screen()
def read_all_screen(self) -> str: """Read the current screen and returns all content in one string. This is useful if your automation scripts should take different routes depending on a message shown on the screen. Example: | ${screen} | Read All Screen | | IF | 'certain text' in '''${screen}''' | | | Do Something | | ELSE | | | | Do Something Else | | END | | """ return self._read_all_screen()
Python
def take_screenshot(self, height: int = 410, width: int = 670) -> None: """Generate a screenshot of the IBM 3270 Mainframe in a html format. The default folder is the log folder of RobotFramework, if you want change see the `Set Screenshot Folder`. The Screenshot is printed in a iframe log, with the values of height=410 and width=670, you can change this values passing them to the keyword. Example: | Take Screenshot | | Take Screenshot | height=500 | width=700 | """ filename_prefix = "screenshot" extension = "html" filename_sufix = round(time.time() * 1000) filepath = os.path.join( self.imgfolder, "%s_%s.%s" % (filename_prefix, filename_sufix, extension) ) self.mf.save_screen(os.path.join(self.output_folder, filepath)) logger.write( '<iframe src="%s" height="%s" width="%s"></iframe>' % (filepath.replace("\\", "/"), height, width), level="INFO", html=True, )
def take_screenshot(self, height: int = 410, width: int = 670) -> None: """Generate a screenshot of the IBM 3270 Mainframe in a html format. The default folder is the log folder of RobotFramework, if you want change see the `Set Screenshot Folder`. The Screenshot is printed in a iframe log, with the values of height=410 and width=670, you can change this values passing them to the keyword. Example: | Take Screenshot | | Take Screenshot | height=500 | width=700 | """ filename_prefix = "screenshot" extension = "html" filename_sufix = round(time.time() * 1000) filepath = os.path.join( self.imgfolder, "%s_%s.%s" % (filename_prefix, filename_sufix, extension) ) self.mf.save_screen(os.path.join(self.output_folder, filepath)) logger.write( '<iframe src="%s" height="%s" width="%s"></iframe>' % (filepath.replace("\\", "/"), height, width), level="INFO", html=True, )
Python
def wait_field_detected(self) -> None: """Wait until the screen is ready, the cursor has been positioned on a modifiable field, and the keyboard is unlocked. Sometimes the server will "unlock" the keyboard but the screen will not yet be ready. In that case, an attempt to read or write to the screen will result in a 'E' keyboard status because we tried to read from a screen that is not ready yet. Using this method tells the client to wait until a field is detected and the cursor has been positioned on it. """ self.mf.wait_for_field()
def wait_field_detected(self) -> None: """Wait until the screen is ready, the cursor has been positioned on a modifiable field, and the keyboard is unlocked. Sometimes the server will "unlock" the keyboard but the screen will not yet be ready. In that case, an attempt to read or write to the screen will result in a 'E' keyboard status because we tried to read from a screen that is not ready yet. Using this method tells the client to wait until a field is detected and the cursor has been positioned on it. """ self.mf.wait_for_field()
Python
def delete_char( self, ypos: Optional[int] = None, xpos: Optional[int] = None ) -> None: """Delete the character under the cursor. If you want to delete a character that is in another position, simply pass the coordinates ``ypos`` / ``xpos``. Co-ordinates are 1 based, as listed in the status area of the terminal. Example: | Delete Char | | Delete Char | ypos=9 | xpos=25 | """ if ypos is not None and xpos is not None: self.mf.move_to(ypos, xpos) self.mf.exec_command(b"Delete")
def delete_char( self, ypos: Optional[int] = None, xpos: Optional[int] = None ) -> None: """Delete the character under the cursor. If you want to delete a character that is in another position, simply pass the coordinates ``ypos`` / ``xpos``. Co-ordinates are 1 based, as listed in the status area of the terminal. Example: | Delete Char | | Delete Char | ypos=9 | xpos=25 | """ if ypos is not None and xpos is not None: self.mf.move_to(ypos, xpos) self.mf.exec_command(b"Delete")
Python
def delete_field( self, ypos: Optional[int] = None, xpos: Optional[int] = None ) -> None: """Delete the entire content of a field at the current cursor location and positions the cursor at beginning of field. If you want to delete a field that is in another position, simply pass the coordinates ``ypos`` / ``xpos`` of any part in the field. Co-ordinates are 1 based, as listed in the status area of the terminal. Example: | Delete Field | | Delete Field | ypos=12 | xpos=6 | """ if ypos is not None and xpos is not None: self.mf.move_to(ypos, xpos) self.mf.delete_field()
def delete_field( self, ypos: Optional[int] = None, xpos: Optional[int] = None ) -> None: """Delete the entire content of a field at the current cursor location and positions the cursor at beginning of field. If you want to delete a field that is in another position, simply pass the coordinates ``ypos`` / ``xpos`` of any part in the field. Co-ordinates are 1 based, as listed in the status area of the terminal. Example: | Delete Field | | Delete Field | ypos=12 | xpos=6 | """ if ypos is not None and xpos is not None: self.mf.move_to(ypos, xpos) self.mf.delete_field()
Python
def send_PF(self, PF: str) -> None: """Send a Program Function to the screen. Example: | Send PF | 3 | """ self.mf.exec_command(("PF(" + PF + ")").encode("utf-8")) time.sleep(self.wait)
def send_PF(self, PF: str) -> None: """Send a Program Function to the screen. Example: | Send PF | 3 | """ self.mf.exec_command(("PF(" + PF + ")").encode("utf-8")) time.sleep(self.wait)
Python
def write(self, txt: str) -> None: """Send a string *and Enter* to the screen at the current cursor location. Example: | Write | something | """ self._write(txt, enter=1)
def write(self, txt: str) -> None: """Send a string *and Enter* to the screen at the current cursor location. Example: | Write | something | """ self._write(txt, enter=1)
Python
def write_bare(self, txt: str) -> None: """Send only the string to the screen at the current cursor location. Example: | Write Bare | something | """ self._write(txt)
def write_bare(self, txt: str) -> None: """Send only the string to the screen at the current cursor location. Example: | Write Bare | something | """ self._write(txt)
Python
def wait_until_string(self, txt: str, timeout: int = 5) -> str: """Wait until a string exists on the mainframe screen to perform the next step. If the string does not appear in 5 seconds, the keyword will raise an exception. You can define a different timeout. Example: | Wait Until String | something | | Wait Until String | something | timeout=10 | """ max_time = time.ctime(int(time.time()) + timeout) while time.ctime(int(time.time())) < max_time: result = self._search_string(str(txt)) if result: return txt raise Exception( 'String "' + txt + '" not found in ' + str(timeout) + " seconds" )
def wait_until_string(self, txt: str, timeout: int = 5) -> str: """Wait until a string exists on the mainframe screen to perform the next step. If the string does not appear in 5 seconds, the keyword will raise an exception. You can define a different timeout. Example: | Wait Until String | something | | Wait Until String | something | timeout=10 | """ max_time = time.ctime(int(time.time()) + timeout) while time.ctime(int(time.time())) < max_time: result = self._search_string(str(txt)) if result: return txt raise Exception( 'String "' + txt + '" not found in ' + str(timeout) + " seconds" )
Python
def _search_string(self, string: str, ignore_case: bool = False) -> bool: """Search if a string exists on the mainframe screen and return True or False.""" def __read_screen(string: str, ignore_case: bool) -> bool: for ypos in range(24): line = self.mf.string_get(ypos + 1, 1, 80) if ignore_case: line = line.lower() if string in line: return True return False status = __read_screen(string, ignore_case) return status
def _search_string(self, string: str, ignore_case: bool = False) -> bool: """Search if a string exists on the mainframe screen and return True or False.""" def __read_screen(string: str, ignore_case: bool) -> bool: for ypos in range(24): line = self.mf.string_get(ypos + 1, 1, 80) if ignore_case: line = line.lower() if string in line: return True return False status = __read_screen(string, ignore_case) return status
Python
def page_should_contain_string( self, txt: str, ignore_case: bool = False, error_message: Optional[str] = None ) -> None: """Assert that a given string exists on the mainframe screen. The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Contain String | something | | Page Should Contain String | someTHING | ignore_case=True | | Page Should Contain String | something | error_message=New error message | """ message = 'The string "' + txt + '" was not found' if error_message: message = error_message if ignore_case: txt = txt.lower() result = self._search_string(txt, ignore_case) if not result: raise Exception(message) logger.info('The string "' + txt + '" was found')
def page_should_contain_string( self, txt: str, ignore_case: bool = False, error_message: Optional[str] = None ) -> None: """Assert that a given string exists on the mainframe screen. The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Contain String | something | | Page Should Contain String | someTHING | ignore_case=True | | Page Should Contain String | something | error_message=New error message | """ message = 'The string "' + txt + '" was not found' if error_message: message = error_message if ignore_case: txt = txt.lower() result = self._search_string(txt, ignore_case) if not result: raise Exception(message) logger.info('The string "' + txt + '" was found')
Python
def page_should_not_contain_string( self, txt: str, ignore_case: bool = False, error_message: Optional[str] = None ) -> None: """Assert that a given string does NOT exists on the mainframe screen. The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Not Contain String | something | | Page Should Not Contain String | someTHING | ignore_case=True | | Page Should Not Contain String | something | error_message=New error message | """ message = 'The string "' + txt + '" was found' if error_message: message = error_message if ignore_case: txt = txt.lower() result = self._search_string(txt, ignore_case) if result: raise Exception(message)
def page_should_not_contain_string( self, txt: str, ignore_case: bool = False, error_message: Optional[str] = None ) -> None: """Assert that a given string does NOT exists on the mainframe screen. The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Not Contain String | something | | Page Should Not Contain String | someTHING | ignore_case=True | | Page Should Not Contain String | something | error_message=New error message | """ message = 'The string "' + txt + '" was found' if error_message: message = error_message if ignore_case: txt = txt.lower() result = self._search_string(txt, ignore_case) if result: raise Exception(message)
Python
def page_should_contain_any_string( self, list_string: List[str], ignore_case: bool = False, error_message: Optional[str] = None, ) -> None: """Assert that one of the strings in a given list exists on the mainframe screen. The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Contain Any String | ${list_of_string} | | Page Should Contain Any String | ${list_of_string} | ignore_case=True | | Page Should Contain Any String | ${list_of_string} | error_message=New error message | """ message = 'The strings "' + str(list_string) + '" were not found' if error_message: message = error_message if ignore_case: list_string = [item.lower() for item in list_string] for string in list_string: result = self._search_string(string, ignore_case) if result: break if not result: raise Exception(message)
def page_should_contain_any_string( self, list_string: List[str], ignore_case: bool = False, error_message: Optional[str] = None, ) -> None: """Assert that one of the strings in a given list exists on the mainframe screen. The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Contain Any String | ${list_of_string} | | Page Should Contain Any String | ${list_of_string} | ignore_case=True | | Page Should Contain Any String | ${list_of_string} | error_message=New error message | """ message = 'The strings "' + str(list_string) + '" were not found' if error_message: message = error_message if ignore_case: list_string = [item.lower() for item in list_string] for string in list_string: result = self._search_string(string, ignore_case) if result: break if not result: raise Exception(message)
Python
def page_should_not_contain_any_string( self, list_string: List[str], ignore_case: bool = False, error_message: Optional[str] = None, ) -> None: """Assert that none of the strings in a given list exists on the mainframe screen. If one or more of the string are found, the keyword will raise a exception. The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Not Contain Any Strings | ${list_of_string} | | Page Should Not Contain Any Strings | ${list_of_string} | ignore_case=True | | Page Should Not Contain Any Strings | ${list_of_string} | error_message=New error message | """ self._compare_all_list_with_screen_text( list_string, ignore_case, error_message, should_match=False )
def page_should_not_contain_any_string( self, list_string: List[str], ignore_case: bool = False, error_message: Optional[str] = None, ) -> None: """Assert that none of the strings in a given list exists on the mainframe screen. If one or more of the string are found, the keyword will raise a exception. The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Not Contain Any Strings | ${list_of_string} | | Page Should Not Contain Any Strings | ${list_of_string} | ignore_case=True | | Page Should Not Contain Any Strings | ${list_of_string} | error_message=New error message | """ self._compare_all_list_with_screen_text( list_string, ignore_case, error_message, should_match=False )
Python
def page_should_contain_all_strings( self, list_string: List[str], ignore_case: bool = False, error_message: Optional[str] = None, ) -> None: """Assert that all of the strings in a given list exist on the mainframe screen. The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Contain All Strings | ${list_of_string} | | Page Should Contain All Strings | ${list_of_string} | ignore_case=True | | Page Should Contain All Strings | ${list_of_string} | error_message=New error message | """ self._compare_all_list_with_screen_text( list_string, ignore_case, error_message, should_match=True )
def page_should_contain_all_strings( self, list_string: List[str], ignore_case: bool = False, error_message: Optional[str] = None, ) -> None: """Assert that all of the strings in a given list exist on the mainframe screen. The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Contain All Strings | ${list_of_string} | | Page Should Contain All Strings | ${list_of_string} | ignore_case=True | | Page Should Contain All Strings | ${list_of_string} | error_message=New error message | """ self._compare_all_list_with_screen_text( list_string, ignore_case, error_message, should_match=True )
Python
def page_should_not_contain_all_strings( self, list_string: List[str], ignore_case: bool = False, error_message: Optional[str] = None, ) -> None: """Fails if one of the strings in a given list exists on the mainframe screen. If one of the string are found, the keyword will raise a exception. The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Not Contain All Strings | ${list_of_string} | | Page Should Not Contain All Strings | ${list_of_string} | ignore_case=True | | Page Should Not Contain All Strings | ${list_of_string} | error_message=New error message | """ message = error_message if ignore_case: list_string = [item.lower() for item in list_string] for string in list_string: result = self._search_string(string, ignore_case) if result: if message is None: message = 'The string "' + string + '" was found' raise Exception(message)
def page_should_not_contain_all_strings( self, list_string: List[str], ignore_case: bool = False, error_message: Optional[str] = None, ) -> None: """Fails if one of the strings in a given list exists on the mainframe screen. If one of the string are found, the keyword will raise a exception. The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Not Contain All Strings | ${list_of_string} | | Page Should Not Contain All Strings | ${list_of_string} | ignore_case=True | | Page Should Not Contain All Strings | ${list_of_string} | error_message=New error message | """ message = error_message if ignore_case: list_string = [item.lower() for item in list_string] for string in list_string: result = self._search_string(string, ignore_case) if result: if message is None: message = 'The string "' + string + '" was found' raise Exception(message)
Python
def page_should_contain_string_x_times( self, txt: str, number: int, ignore_case: bool = False, error_message: Optional[str] = None, ) -> None: """Asserts that the entered string appears the desired number of times on the mainframe screen. The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Contain String X Times | something | 3 | | Page Should Contain String X Times | someTHING | 3 | ignore_case=True | | Page Should Contain String X Times | something | 3 | error_message=New error message | """ message = error_message number = number all_screen = self._read_all_screen() if ignore_case: txt = txt.lower() all_screen = all_screen.lower() number_of_times = all_screen.count(txt) if number_of_times != number: if message is None: message = f'The string "{txt}" was not found "{number}" times, it appears "{number_of_times}" times' raise Exception(message) logger.info('The string "' + txt + '" was found "' + str(number) + '" times')
def page_should_contain_string_x_times( self, txt: str, number: int, ignore_case: bool = False, error_message: Optional[str] = None, ) -> None: """Asserts that the entered string appears the desired number of times on the mainframe screen. The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Contain String X Times | something | 3 | | Page Should Contain String X Times | someTHING | 3 | ignore_case=True | | Page Should Contain String X Times | something | 3 | error_message=New error message | """ message = error_message number = number all_screen = self._read_all_screen() if ignore_case: txt = txt.lower() all_screen = all_screen.lower() number_of_times = all_screen.count(txt) if number_of_times != number: if message is None: message = f'The string "{txt}" was not found "{number}" times, it appears "{number_of_times}" times' raise Exception(message) logger.info('The string "' + txt + '" was found "' + str(number) + '" times')
Python
def page_should_match_regex(self, regex_pattern: str) -> None: r"""Fails if string does not match pattern as a regular expression. Regular expression check is implemented using the Python [https://docs.python.org/2/library/re.html|re module]. Python's regular expression syntax is derived from Perl, and it is thus also very similar to the syntax used, for example, in Java, Ruby and .NET. Backslash is an escape character in the test data, and possible backslashes in the pattern must thus be escaped with another backslash (e.g. \\d\\w+). """ page_text = self._read_all_screen() if not re.findall(regex_pattern, page_text, re.MULTILINE): raise Exception('No matches found for "' + regex_pattern + '" pattern')
def page_should_match_regex(self, regex_pattern: str) -> None: r"""Fails if string does not match pattern as a regular expression. Regular expression check is implemented using the Python [https://docs.python.org/2/library/re.html|re module]. Python's regular expression syntax is derived from Perl, and it is thus also very similar to the syntax used, for example, in Java, Ruby and .NET. Backslash is an escape character in the test data, and possible backslashes in the pattern must thus be escaped with another backslash (e.g. \\d\\w+). """ page_text = self._read_all_screen() if not re.findall(regex_pattern, page_text, re.MULTILINE): raise Exception('No matches found for "' + regex_pattern + '" pattern')
Python
def page_should_not_match_regex(self, regex_pattern: str) -> None: r"""Fails if string does match pattern as a regular expression. Regular expression check is implemented using the Python [https://docs.python.org/2/library/re.html|re module]. Python's regular expression syntax is derived from Perl, and it is thus also very similar to the syntax used, for example, in Java, Ruby and .NET. Backslash is an escape character in the test data, and possible backslashes in the pattern must thus be escaped with another backslash (e.g. \\d\\w+). """ page_text = self._read_all_screen() if re.findall(regex_pattern, page_text, re.MULTILINE): raise Exception( 'There are matches found for "' + regex_pattern + '" pattern' )
def page_should_not_match_regex(self, regex_pattern: str) -> None: r"""Fails if string does match pattern as a regular expression. Regular expression check is implemented using the Python [https://docs.python.org/2/library/re.html|re module]. Python's regular expression syntax is derived from Perl, and it is thus also very similar to the syntax used, for example, in Java, Ruby and .NET. Backslash is an escape character in the test data, and possible backslashes in the pattern must thus be escaped with another backslash (e.g. \\d\\w+). """ page_text = self._read_all_screen() if re.findall(regex_pattern, page_text, re.MULTILINE): raise Exception( 'There are matches found for "' + regex_pattern + '" pattern' )
Python
def page_should_contain_match( self, txt: str, ignore_case: bool = False, error_message: Optional[str] = None ) -> None: """Assert that the text displayed on the mainframe screen matches the given pattern. Pattern matching is similar to matching files in a shell, and it is always case sensitive. In the pattern, * matches anything and ? matches any single character. Note that for this keyword the entire screen is considered a string. So if you want to search for the string "something" and it is somewhere other than at the beginning or end of the screen, it should be reported as follows: **something** The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Contain Match | **something** | | Page Should Contain Match | **so???hing** | | Page Should Contain Match | **someTHING** | ignore_case=True | | Page Should Contain Match | **something** | error_message=New error message | """ message = error_message all_screen = self._read_all_screen() if ignore_case: txt = txt.lower() all_screen = all_screen.lower() matcher = Matcher(txt, caseless=False, spaceless=False) result = matcher.match(all_screen) if not result: if message is None: message = 'No matches found for "' + txt + '" pattern' raise Exception(message)
def page_should_contain_match( self, txt: str, ignore_case: bool = False, error_message: Optional[str] = None ) -> None: """Assert that the text displayed on the mainframe screen matches the given pattern. Pattern matching is similar to matching files in a shell, and it is always case sensitive. In the pattern, * matches anything and ? matches any single character. Note that for this keyword the entire screen is considered a string. So if you want to search for the string "something" and it is somewhere other than at the beginning or end of the screen, it should be reported as follows: **something** The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Contain Match | **something** | | Page Should Contain Match | **so???hing** | | Page Should Contain Match | **someTHING** | ignore_case=True | | Page Should Contain Match | **something** | error_message=New error message | """ message = error_message all_screen = self._read_all_screen() if ignore_case: txt = txt.lower() all_screen = all_screen.lower() matcher = Matcher(txt, caseless=False, spaceless=False) result = matcher.match(all_screen) if not result: if message is None: message = 'No matches found for "' + txt + '" pattern' raise Exception(message)
Python
def page_should_not_contain_match( self, txt: str, ignore_case: bool = False, error_message: Optional[str] = None ) -> None: """Assert that the text displayed on the mainframe screen does NOT match the given pattern. Pattern matching is similar to matching files in a shell, and it is always case sensitive. In the pattern, * matches anything and ? matches any single character. Note that for this keyword the entire screen is considered a string. So if you want to search for the string "something" and it is somewhere other than at the beginning or end of the screen, it should be reported as follows: **something** The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Not Contain Match | **something** | | Page Should Not Contain Match | **so???hing** | | Page Should Not Contain Match | **someTHING** | ignore_case=True | | Page Should Not Contain Match | **something** | error_message=New error message | """ message = error_message all_screen = self._read_all_screen() if ignore_case: txt = txt.lower() all_screen = all_screen.lower() matcher = Matcher(txt, caseless=False, spaceless=False) result = matcher.match(all_screen) if result: if message is None: message = 'There are matches found for "' + txt + '" pattern' raise Exception(message)
def page_should_not_contain_match( self, txt: str, ignore_case: bool = False, error_message: Optional[str] = None ) -> None: """Assert that the text displayed on the mainframe screen does NOT match the given pattern. Pattern matching is similar to matching files in a shell, and it is always case sensitive. In the pattern, * matches anything and ? matches any single character. Note that for this keyword the entire screen is considered a string. So if you want to search for the string "something" and it is somewhere other than at the beginning or end of the screen, it should be reported as follows: **something** The assertion is case sensitive. If you want it to be case insensitive, you can pass the argument ignore_case=True. You can change the exception message by setting a custom string to error_message. Example: | Page Should Not Contain Match | **something** | | Page Should Not Contain Match | **so???hing** | | Page Should Not Contain Match | **someTHING** | ignore_case=True | | Page Should Not Contain Match | **something** | error_message=New error message | """ message = error_message all_screen = self._read_all_screen() if ignore_case: txt = txt.lower() all_screen = all_screen.lower() matcher = Matcher(txt, caseless=False, spaceless=False) result = matcher.match(all_screen) if result: if message is None: message = 'There are matches found for "' + txt + '" pattern' raise Exception(message)
Python
def _read_all_screen(self) -> str: """Read all the mainframe screen and return in a single string.""" full_text = "" for ypos in range(24): full_text += self.mf.string_get(ypos + 1, 1, 80) return full_text
def _read_all_screen(self) -> str: """Read all the mainframe screen and return in a single string.""" full_text = "" for ypos in range(24): full_text += self.mf.string_get(ypos + 1, 1, 80) return full_text
Python
def _check_limits(ypos: int, xpos: int): """Checks if the user has passed some coordinate y / x greater than that existing in the mainframe""" if ypos > 24: raise Exception( "You have exceeded the y-axis limit of the mainframe screen" ) if xpos > 80: raise Exception( "You have exceeded the x-axis limit of the mainframe screen" )
def _check_limits(ypos: int, xpos: int): """Checks if the user has passed some coordinate y / x greater than that existing in the mainframe""" if ypos > 24: raise Exception( "You have exceeded the y-axis limit of the mainframe screen" ) if xpos > 80: raise Exception( "You have exceeded the x-axis limit of the mainframe screen" )
Python
def lint_python(c): """Perform python code formatting with black, isort and flake8""" c.run("black ./setup.py ./tasks.py Mainframe3270/ utest/") c.run("isort ./setup.py ./tasks.py Mainframe3270/ utest/") c.run("flake8 ./setup.py ./tasks.py Mainframe3270/ utest/") c.run("mypy ./setup.py ./tasks.py Mainframe3270/")
def lint_python(c): """Perform python code formatting with black, isort and flake8""" c.run("black ./setup.py ./tasks.py Mainframe3270/ utest/") c.run("isort ./setup.py ./tasks.py Mainframe3270/ utest/") c.run("flake8 ./setup.py ./tasks.py Mainframe3270/ utest/") c.run("mypy ./setup.py ./tasks.py Mainframe3270/")
Python
def lint(c): """ Perform code formatting for both robot and python code. Short option for `inv lint-python && inv lint-robot`. """ pass
def lint(c): """ Perform code formatting for both robot and python code. Short option for `inv lint-python && inv lint-robot`. """ pass
Python
def kw_docs(c): """Generates the keyword documentation with libdoc. Creates a html and a xml file and places them under doc/. """ c.run("python -m robot.libdoc Mainframe3270/ doc/Mainframe3270.html") c.run("python -m robot.libdoc Mainframe3270/ doc/Mainframe3270.xml")
def kw_docs(c): """Generates the keyword documentation with libdoc. Creates a html and a xml file and places them under doc/. """ c.run("python -m robot.libdoc Mainframe3270/ doc/Mainframe3270.html") c.run("python -m robot.libdoc Mainframe3270/ doc/Mainframe3270.xml")
Python
def f_init_login(param_acc, param_pass, param_exe): """ Initialize conexion and Login into a Meta Trader 5 account in the local computer where this code is executed, using the MetaTrader5 python package. Parameters ---------- param_acc: int accout number used to login into MetaTrader5 Web/Desktop App (normally is a 8-9 digit integer number) param_acc = 41668916 param_pass: str accout trader's password (or just password) to login into MetaTrader5 Web/Desktop App (normally alphanumeric include uppercase and sometimes symbols). If the investor's password is provided, the some actions do not work like open trades. param_pass = "n2eunlnt" param_direxe: str Route in disk where is the executable file of the MetaTrader5 desktop app which will be used param_direxe = 'C:\\Program Files\\MetaTrader 5\\terminal64.exe' Return ------ if connection is succesful then returns connected client object and prints message, if connection is not succesful then returns error message and attempts a shutdown of connection. References ---------- https://www.mql5.com/en/docs/integration/python_metatrader5/mt5login_py """ # server name (as it is specified in the terminal) mt5_ser = "MetaQuotes-Demo" # timeout (in miliseconds) mt5_tmo = 10000 # Perform initialization handshake ini_message = mt5.initialize(param_exe, login=param_acc, password=param_pass, server=mt5_ser, timeout=mt5_tmo, portable=False) # resulting message if not ini_message: print(" **** init_login failed, error code =", mt5.last_error()) mt5.shutdown() else: print(" ++++ init_login succeded, message = ", ini_message) # returns an instance of a connection object (or client) return mt5
def f_init_login(param_acc, param_pass, param_exe): """ Initialize conexion and Login into a Meta Trader 5 account in the local computer where this code is executed, using the MetaTrader5 python package. Parameters ---------- param_acc: int accout number used to login into MetaTrader5 Web/Desktop App (normally is a 8-9 digit integer number) param_acc = 41668916 param_pass: str accout trader's password (or just password) to login into MetaTrader5 Web/Desktop App (normally alphanumeric include uppercase and sometimes symbols). If the investor's password is provided, the some actions do not work like open trades. param_pass = "n2eunlnt" param_direxe: str Route in disk where is the executable file of the MetaTrader5 desktop app which will be used param_direxe = 'C:\\Program Files\\MetaTrader 5\\terminal64.exe' Return ------ if connection is succesful then returns connected client object and prints message, if connection is not succesful then returns error message and attempts a shutdown of connection. References ---------- https://www.mql5.com/en/docs/integration/python_metatrader5/mt5login_py """ # server name (as it is specified in the terminal) mt5_ser = "MetaQuotes-Demo" # timeout (in miliseconds) mt5_tmo = 10000 # Perform initialization handshake ini_message = mt5.initialize(param_exe, login=param_acc, password=param_pass, server=mt5_ser, timeout=mt5_tmo, portable=False) # resulting message if not ini_message: print(" **** init_login failed, error code =", mt5.last_error()) mt5.shutdown() else: print(" ++++ init_login succeded, message = ", ini_message) # returns an instance of a connection object (or client) return mt5
Python
def f_acc_info(param_ct): """ Get the info of the account associated with the initialized client param_ct Params ------ param_ct: MetaTrader5 initialized client object this is an already succesfully initialized conexion object to MetaTrader5 Desktop App Returns ------- df_acc_info: pd.DataFrame Pandas DataFrame with the account info References ---------- https://www.mql5.com/en/docs/integration/python_metatrader5/mt5login_py """ # get the account info and transform it into a dataframe format acc_info = param_ct.account_info()._asdict() # select especific info to display df_acc_info = pd.DataFrame(list(acc_info.items()), columns=['property','value']) # return dataframe with the account info return df_acc_info
def f_acc_info(param_ct): """ Get the info of the account associated with the initialized client param_ct Params ------ param_ct: MetaTrader5 initialized client object this is an already succesfully initialized conexion object to MetaTrader5 Desktop App Returns ------- df_acc_info: pd.DataFrame Pandas DataFrame with the account info References ---------- https://www.mql5.com/en/docs/integration/python_metatrader5/mt5login_py """ # get the account info and transform it into a dataframe format acc_info = param_ct.account_info()._asdict() # select especific info to display df_acc_info = pd.DataFrame(list(acc_info.items()), columns=['property','value']) # return dataframe with the account info return df_acc_info