language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def convert_item(self, item): """Convert the item that the MulticomponentMatcher deals with into the item that your component plugin is responsible for. The multicomponent matcher will pass the item that it has received, and it is up to your matcher plugin to get the object that it works with from that object. By default, returns the item itself. """ return item
def convert_item(self, item): """Convert the item that the MulticomponentMatcher deals with into the item that your component plugin is responsible for. The multicomponent matcher will pass the item that it has received, and it is up to your matcher plugin to get the object that it works with from that object. By default, returns the item itself. """ return item
Python
def project_updater(name, tag_prefix, project_path, start_after, silent): """ help for step by step updating a (django) project. """ if not tag_prefix: tag_prefix = name click.echo("-----------------------------------") click.echo("#### Fetching tags with prefix \"%s\"" % tag_prefix) try: tags = subprocess.check_output('cd %s && git tag | grep %s' % (project_path, tag_prefix), shell=True) except subprocess.CalledProcessError as e: if e.output: click.echo(e.output) else: click.echo("No Tags found, aborting!") exit() tags = tags.splitlines() # check if current tag is in taglist, if yes, start from there onwards current_tag = None if start_after: current_tag = start_after else: try: current_tag = subprocess.check_output('cd %s && git describe --tags' % project_path, shell=True) except subprocess.CalledProcessError as e: current_tag = '' current_tag = current_tag.strip() if current_tag in tags: while not tags[0] == current_tag.strip(): tags.pop(0) tags.pop(0) click.echo("-----------------------------------") click.echo("#### Starting right after %s" % current_tag) for tag in tags: click.echo("-----------------------------------") click.echo("#### To step {}".format(tag)) click.echo("-----------------------------------") if not silent: if not click.confirm('Do you want to continue?', default=True): exit() else: time.sleep(2) try: git_out = subprocess.check_output('cd %s && git checkout %s' % (project_path, tag), shell=True) except subprocess.CalledProcessError as e: click.echo('Tag not found, aborting: %s' % tag) exit() try: command = os.path.join(project_path, name, tag + ".sh") if os.path.isfile(command): click.echo("-----------------------------------") click.echo("running: %s" % command) file = open(command, 'r') click.echo(file.read()) click.echo("-----------------------------------") file.close() subprocess.check_call(('chmod', 'u+x', command, )) subprocess.check_call(command) # undo chmod, if needed! subprocess.check_call(('git', 'checkout', '.')) else: click.echo("No script for %s. Going ahead." % tag) # click.echo(command) except subprocess.CalledProcessError as e: if e.output: click.echo(e.output) else: click.echo("Unknown error running %s!" % command) exit() has_break = os.path.join(project_path, name, tag + ".break") if os.path.isfile(has_break): click.echo("-----------------------------------") click.echo("#### Hasta la vista. Break after %s" % tag) click.echo("-----------------------------------") exit(0) click.echo("-----------------------------------") click.echo("#### Finished {}".format(tag)) click.echo("-----------------------------------")
def project_updater(name, tag_prefix, project_path, start_after, silent): """ help for step by step updating a (django) project. """ if not tag_prefix: tag_prefix = name click.echo("-----------------------------------") click.echo("#### Fetching tags with prefix \"%s\"" % tag_prefix) try: tags = subprocess.check_output('cd %s && git tag | grep %s' % (project_path, tag_prefix), shell=True) except subprocess.CalledProcessError as e: if e.output: click.echo(e.output) else: click.echo("No Tags found, aborting!") exit() tags = tags.splitlines() # check if current tag is in taglist, if yes, start from there onwards current_tag = None if start_after: current_tag = start_after else: try: current_tag = subprocess.check_output('cd %s && git describe --tags' % project_path, shell=True) except subprocess.CalledProcessError as e: current_tag = '' current_tag = current_tag.strip() if current_tag in tags: while not tags[0] == current_tag.strip(): tags.pop(0) tags.pop(0) click.echo("-----------------------------------") click.echo("#### Starting right after %s" % current_tag) for tag in tags: click.echo("-----------------------------------") click.echo("#### To step {}".format(tag)) click.echo("-----------------------------------") if not silent: if not click.confirm('Do you want to continue?', default=True): exit() else: time.sleep(2) try: git_out = subprocess.check_output('cd %s && git checkout %s' % (project_path, tag), shell=True) except subprocess.CalledProcessError as e: click.echo('Tag not found, aborting: %s' % tag) exit() try: command = os.path.join(project_path, name, tag + ".sh") if os.path.isfile(command): click.echo("-----------------------------------") click.echo("running: %s" % command) file = open(command, 'r') click.echo(file.read()) click.echo("-----------------------------------") file.close() subprocess.check_call(('chmod', 'u+x', command, )) subprocess.check_call(command) # undo chmod, if needed! subprocess.check_call(('git', 'checkout', '.')) else: click.echo("No script for %s. Going ahead." % tag) # click.echo(command) except subprocess.CalledProcessError as e: if e.output: click.echo(e.output) else: click.echo("Unknown error running %s!" % command) exit() has_break = os.path.join(project_path, name, tag + ".break") if os.path.isfile(has_break): click.echo("-----------------------------------") click.echo("#### Hasta la vista. Break after %s" % tag) click.echo("-----------------------------------") exit(0) click.echo("-----------------------------------") click.echo("#### Finished {}".format(tag)) click.echo("-----------------------------------")
Python
def south2django(project_path): """ recursivly rename migrations to south_migrations, add migrations folder with __init__.py """ # works. thx. # http://stackoverflow.com/questions/14798220/how-can-i-search-sub-folders-using-glob-glob-module-in-python migration_folders = [os.path.join(dirpath, dir) for dirpath, dirnames, files in os.walk(project_path) for dir in fnmatch.filter(dirnames, 'migrations')] for folder in migration_folders: rename = folder.replace('migrations', 'south_migrations') init_py = os.path.join(folder, '__init__.py') if os.path.isdir(rename): click.echo("skipping (south_migrations exists) '%s'" % folder) continue try: shutil.move(folder, rename) except shutil.Error: print "error renaming '%s'" % folder continue os.mkdir(folder) open(init_py, 'a').close() print "moved '%s'" % folder
def south2django(project_path): """ recursivly rename migrations to south_migrations, add migrations folder with __init__.py """ # works. thx. # http://stackoverflow.com/questions/14798220/how-can-i-search-sub-folders-using-glob-glob-module-in-python migration_folders = [os.path.join(dirpath, dir) for dirpath, dirnames, files in os.walk(project_path) for dir in fnmatch.filter(dirnames, 'migrations')] for folder in migration_folders: rename = folder.replace('migrations', 'south_migrations') init_py = os.path.join(folder, '__init__.py') if os.path.isdir(rename): click.echo("skipping (south_migrations exists) '%s'" % folder) continue try: shutil.move(folder, rename) except shutil.Error: print "error renaming '%s'" % folder continue os.mkdir(folder) open(init_py, 'a').close() print "moved '%s'" % folder
Python
def read_rating(filepth): ''' read rating csv file format is as follows. userId, movieId, rating, timestamp 1, 307, 3.5, 1256677221 1, 481, 3.5, 1256677456 1, 1091, 1.5, 1256677471 ''' #print('\nread_rating start') rating_df = pd.read_csv(filepth) rating_df = rating_df.sort_values(['userId', 'timestamp']) rating_df = rating_df.reset_index(drop=True) #print('read_rating end') return rating_df
def read_rating(filepth): ''' read rating csv file format is as follows. userId, movieId, rating, timestamp 1, 307, 3.5, 1256677221 1, 481, 3.5, 1256677456 1, 1091, 1.5, 1256677471 ''' #print('\nread_rating start') rating_df = pd.read_csv(filepth) rating_df = rating_df.sort_values(['userId', 'timestamp']) rating_df = rating_df.reset_index(drop=True) #print('read_rating end') return rating_df
Python
def make_output_dir(base_dir_name=os.path.join('.','result'), dir_name='result', with_datetime=True): ''' make output directory return output direcotry path ''' dir_path = dir_name if with_datetime: dir_path = dir_path + '_' + datetime.now().strftime('%Y-%m-%d-%H-%M-%S') dir_path = os.path.join(base_dir_name, dir_path) # os.makedirs(dir_path, exist_ok=True) return dir_path
def make_output_dir(base_dir_name=os.path.join('.','result'), dir_name='result', with_datetime=True): ''' make output directory return output direcotry path ''' dir_path = dir_name if with_datetime: dir_path = dir_path + '_' + datetime.now().strftime('%Y-%m-%d-%H-%M-%S') dir_path = os.path.join(base_dir_name, dir_path) # os.makedirs(dir_path, exist_ok=True) return dir_path
Python
def model_summary(model, save_filename=None, print_console=True): ''' save model summary to *.txt. print model summary to console. ''' # save model to txt if save_filename is not None: with open(save_filename, "w") as fp: model.summary(print_fn=lambda x: fp.write(x + "\n")) # if print_console: model.summary() return
def model_summary(model, save_filename=None, print_console=True): ''' save model summary to *.txt. print model summary to console. ''' # save model to txt if save_filename is not None: with open(save_filename, "w") as fp: model.summary(print_fn=lambda x: fp.write(x + "\n")) # if print_console: model.summary() return
Python
def make_model_mf(self, user_bias=True, item_bias=True, cross_term=True, latent_num=10, cross_term_l2=0): ''' make normal matrix factorization model with keras. rating = all_mean + user_bias + item_bias + cross_term ''' input_user_id = Input(shape=(1,), name='user_id') input_item_id = Input(shape=(1,), name='item_id') #user bias u_bias = None if user_bias: u_bias = self.__bias_term(input_id=input_user_id, unique_id_num=self.unique_user_num, l2=0, latent_layer_name='user_bias') #item bias i_bias = None if item_bias: i_bias = self.__bias_term(input_id=input_item_id, unique_id_num=self.unique_item_num, l2=0, latent_layer_name='item_bias') #cross term crs_trm = None if cross_term: crs_u = self.__single_term(input_id=input_user_id, unique_id_num=self.unique_user_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='user_latent') crs_i = self.__single_term(input_id=input_item_id, unique_id_num=self.unique_item_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='item_latent') crs_trm = self.__cross_term(crs_u, crs_i, merge='sum') #concatenate def append_isNotNone(lst, v): tls = copy.copy(lst) if v is not None: tls.append(v) return tls concats = [] concats = append_isNotNone(concats, u_bias) concats = append_isNotNone(concats, i_bias) concats = append_isNotNone(concats, crs_trm) if len(concats) > 1: y = Add(name='add_bias_crossTerm')(concats) else: y = concats[0] # add mean y = Lambda(lambda x: x*self.rating_scale + self.all_rating_mean, name='scaling')(y) self.model = Model(inputs=[input_user_id, input_item_id], outputs=y) return
def make_model_mf(self, user_bias=True, item_bias=True, cross_term=True, latent_num=10, cross_term_l2=0): ''' make normal matrix factorization model with keras. rating = all_mean + user_bias + item_bias + cross_term ''' input_user_id = Input(shape=(1,), name='user_id') input_item_id = Input(shape=(1,), name='item_id') #user bias u_bias = None if user_bias: u_bias = self.__bias_term(input_id=input_user_id, unique_id_num=self.unique_user_num, l2=0, latent_layer_name='user_bias') #item bias i_bias = None if item_bias: i_bias = self.__bias_term(input_id=input_item_id, unique_id_num=self.unique_item_num, l2=0, latent_layer_name='item_bias') #cross term crs_trm = None if cross_term: crs_u = self.__single_term(input_id=input_user_id, unique_id_num=self.unique_user_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='user_latent') crs_i = self.__single_term(input_id=input_item_id, unique_id_num=self.unique_item_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='item_latent') crs_trm = self.__cross_term(crs_u, crs_i, merge='sum') #concatenate def append_isNotNone(lst, v): tls = copy.copy(lst) if v is not None: tls.append(v) return tls concats = [] concats = append_isNotNone(concats, u_bias) concats = append_isNotNone(concats, i_bias) concats = append_isNotNone(concats, crs_trm) if len(concats) > 1: y = Add(name='add_bias_crossTerm')(concats) else: y = concats[0] # add mean y = Lambda(lambda x: x*self.rating_scale + self.all_rating_mean, name='scaling')(y) self.model = Model(inputs=[input_user_id, input_item_id], outputs=y) return
Python
def make_model_dmf_deepLatent(self, user_bias=True, item_bias=True, cross_term=True, latent_num=10, cross_term_l2=0, hidden_nodes_latent=[10], hidden_l2=[0], hidden_dropout_rates=[]): ''' make normal matrix factorization model with keras. rating = all_mean + user_bias + item_bias + cross_term ''' input_user_id = Input(shape=(1,), name='user_id') input_item_id = Input(shape=(1,), name='item_id') #user bias u_bias = None if user_bias: u_bias = self.__bias_term(input_id=input_user_id, unique_id_num=self.unique_user_num, l2=0, latent_layer_name='user_bias') #item bias i_bias = None if item_bias: i_bias = self.__bias_term(input_id=input_item_id, unique_id_num=self.unique_item_num, l2=0, latent_layer_name='item_bias') #cross term crs_trm = None if cross_term: crs_u = self.__single_term(input_id=input_user_id, unique_id_num=self.unique_user_num, output_dim=latent_num, l2=cross_term_l2, hidden_nodes=hidden_nodes_latent, hidden_l2s=hidden_l2, hidden_dropout_rates=hidden_dropout_rates, latent_layer_name='user_latent') crs_i = self.__single_term(input_id=input_item_id, unique_id_num=self.unique_item_num, output_dim=latent_num, l2=cross_term_l2, hidden_nodes=hidden_nodes_latent, hidden_l2s=hidden_l2, hidden_dropout_rates=hidden_dropout_rates, latent_layer_name='item_latent') crs_trm = self.__cross_term(crs_u, crs_i, merge='sum') #concatenate def append_isNotNone(lst, v): tls = copy.copy(lst) if v is not None: tls.append(v) return tls concats = [] concats = append_isNotNone(concats, u_bias) concats = append_isNotNone(concats, i_bias) concats = append_isNotNone(concats, crs_trm) if len(concats) > 1: y = Add(name='add_bias_crossTerm')(concats) else: y = concats[0] # add mean y = Lambda(lambda x: x*self.rating_scale + self.all_rating_mean, name='scaling')(y) self.model = Model(inputs=[input_user_id, input_item_id], outputs=y) return
def make_model_dmf_deepLatent(self, user_bias=True, item_bias=True, cross_term=True, latent_num=10, cross_term_l2=0, hidden_nodes_latent=[10], hidden_l2=[0], hidden_dropout_rates=[]): ''' make normal matrix factorization model with keras. rating = all_mean + user_bias + item_bias + cross_term ''' input_user_id = Input(shape=(1,), name='user_id') input_item_id = Input(shape=(1,), name='item_id') #user bias u_bias = None if user_bias: u_bias = self.__bias_term(input_id=input_user_id, unique_id_num=self.unique_user_num, l2=0, latent_layer_name='user_bias') #item bias i_bias = None if item_bias: i_bias = self.__bias_term(input_id=input_item_id, unique_id_num=self.unique_item_num, l2=0, latent_layer_name='item_bias') #cross term crs_trm = None if cross_term: crs_u = self.__single_term(input_id=input_user_id, unique_id_num=self.unique_user_num, output_dim=latent_num, l2=cross_term_l2, hidden_nodes=hidden_nodes_latent, hidden_l2s=hidden_l2, hidden_dropout_rates=hidden_dropout_rates, latent_layer_name='user_latent') crs_i = self.__single_term(input_id=input_item_id, unique_id_num=self.unique_item_num, output_dim=latent_num, l2=cross_term_l2, hidden_nodes=hidden_nodes_latent, hidden_l2s=hidden_l2, hidden_dropout_rates=hidden_dropout_rates, latent_layer_name='item_latent') crs_trm = self.__cross_term(crs_u, crs_i, merge='sum') #concatenate def append_isNotNone(lst, v): tls = copy.copy(lst) if v is not None: tls.append(v) return tls concats = [] concats = append_isNotNone(concats, u_bias) concats = append_isNotNone(concats, i_bias) concats = append_isNotNone(concats, crs_trm) if len(concats) > 1: y = Add(name='add_bias_crossTerm')(concats) else: y = concats[0] # add mean y = Lambda(lambda x: x*self.rating_scale + self.all_rating_mean, name='scaling')(y) self.model = Model(inputs=[input_user_id, input_item_id], outputs=y) return
Python
def make_model_dmf_deepCrossterm(self, user_bias=True, item_bias=True, cross_term=True, latent_num=10, cross_term_l2=0, hidden_nodes_crossterm=[10], hidden_l2=[0], hidden_dropout_rates=[]): ''' make normal matrix factorization model with keras. rating = all_mean + user_bias + item_bias + cross_term ''' input_user_id = Input(shape=(1,), name='user_id') input_item_id = Input(shape=(1,), name='item_id') #user bias u_bias = None if user_bias: u_bias = self.__bias_term(input_id=input_user_id, unique_id_num=self.unique_user_num, l2=0, latent_layer_name='user_bias') #item bias i_bias = None if item_bias: i_bias = self.__bias_term(input_id=input_item_id, unique_id_num=self.unique_item_num, l2=0, latent_layer_name='item_bias') #cross term crs_trm = None if cross_term: crs_u = self.__single_term(input_id=input_user_id, unique_id_num=self.unique_user_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='user_latent') crs_i = self.__single_term(input_id=input_item_id, unique_id_num=self.unique_item_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='item_latent') crs_trm = self.__cross_term(crs_u, crs_i, merge='sum', hidden_nodes=hidden_nodes_crossterm, hidden_l2s=hidden_l2, hidden_dropout_rates=hidden_dropout_rates) #concatenate def append_isNotNone(lst, v): tls = copy.copy(lst) if v is not None: tls.append(v) return tls concats = [] concats = append_isNotNone(concats, u_bias) concats = append_isNotNone(concats, i_bias) concats = append_isNotNone(concats, crs_trm) if len(concats) > 1: y = Add(name='add_bias_crossTerm')(concats) else: y = concats[0] # add mean y = Lambda(lambda x: x*self.rating_scale + self.all_rating_mean, name='scaling')(y) self.model = Model(inputs=[input_user_id, input_item_id], outputs=y) return
def make_model_dmf_deepCrossterm(self, user_bias=True, item_bias=True, cross_term=True, latent_num=10, cross_term_l2=0, hidden_nodes_crossterm=[10], hidden_l2=[0], hidden_dropout_rates=[]): ''' make normal matrix factorization model with keras. rating = all_mean + user_bias + item_bias + cross_term ''' input_user_id = Input(shape=(1,), name='user_id') input_item_id = Input(shape=(1,), name='item_id') #user bias u_bias = None if user_bias: u_bias = self.__bias_term(input_id=input_user_id, unique_id_num=self.unique_user_num, l2=0, latent_layer_name='user_bias') #item bias i_bias = None if item_bias: i_bias = self.__bias_term(input_id=input_item_id, unique_id_num=self.unique_item_num, l2=0, latent_layer_name='item_bias') #cross term crs_trm = None if cross_term: crs_u = self.__single_term(input_id=input_user_id, unique_id_num=self.unique_user_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='user_latent') crs_i = self.__single_term(input_id=input_item_id, unique_id_num=self.unique_item_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='item_latent') crs_trm = self.__cross_term(crs_u, crs_i, merge='sum', hidden_nodes=hidden_nodes_crossterm, hidden_l2s=hidden_l2, hidden_dropout_rates=hidden_dropout_rates) #concatenate def append_isNotNone(lst, v): tls = copy.copy(lst) if v is not None: tls.append(v) return tls concats = [] concats = append_isNotNone(concats, u_bias) concats = append_isNotNone(concats, i_bias) concats = append_isNotNone(concats, crs_trm) if len(concats) > 1: y = Add(name='add_bias_crossTerm')(concats) else: y = concats[0] # add mean y = Lambda(lambda x: x*self.rating_scale + self.all_rating_mean, name='scaling')(y) self.model = Model(inputs=[input_user_id, input_item_id], outputs=y) return
Python
def make_model_dmf_deepLatent_deepCrossterm(self, user_bias=True, item_bias=True, cross_term=True, latent_num=10, cross_term_l2=0, hidden_nodes_latent=[10], hidden_nodes_crossterm=[10], hidden_l2=[0], hidden_dropout_rates=[]): ''' make normal matrix factorization model with keras. rating = all_mean + user_bias + item_bias + cross_term ''' input_user_id = Input(shape=(1,), name='user_id') input_item_id = Input(shape=(1,), name='item_id') #user bias u_bias = None if user_bias: u_bias = self.__bias_term(input_id=input_user_id, unique_id_num=self.unique_user_num, l2=0, latent_layer_name='user_bias') #item bias i_bias = None if item_bias: i_bias = self.__bias_term(input_id=input_item_id, unique_id_num=self.unique_item_num, l2=0, latent_layer_name='item_bias') #cross term crs_trm = None if cross_term: crs_u = self.__single_term(input_id=input_user_id, unique_id_num=self.unique_user_num, output_dim=latent_num, l2=cross_term_l2, hidden_nodes=hidden_nodes_latent, hidden_dropout_rates=hidden_dropout_rates, latent_layer_name='user_latent') crs_i = self.__single_term(input_id=input_item_id, unique_id_num=self.unique_item_num, output_dim=latent_num, l2=cross_term_l2, hidden_nodes=hidden_nodes_latent, hidden_dropout_rates=hidden_dropout_rates, latent_layer_name='item_latent') crs_trm = self.__cross_term(crs_u, crs_i, merge='sum', hidden_nodes=hidden_nodes_crossterm, hidden_l2s=hidden_l2, hidden_dropout_rates=hidden_dropout_rates) #concatenate def append_isNotNone(lst, v): tls = copy.copy(lst) if v is not None: tls.append(v) return tls concats = [] concats = append_isNotNone(concats, u_bias) concats = append_isNotNone(concats, i_bias) concats = append_isNotNone(concats, crs_trm) if len(concats) > 1: y = Add(name='add_bias_crossTerm')(concats) else: y = concats[0] # add mean y = Lambda(lambda x: x*self.rating_scale + self.all_rating_mean, name='scaling')(y) self.model = Model(inputs=[input_user_id, input_item_id], outputs=y) return
def make_model_dmf_deepLatent_deepCrossterm(self, user_bias=True, item_bias=True, cross_term=True, latent_num=10, cross_term_l2=0, hidden_nodes_latent=[10], hidden_nodes_crossterm=[10], hidden_l2=[0], hidden_dropout_rates=[]): ''' make normal matrix factorization model with keras. rating = all_mean + user_bias + item_bias + cross_term ''' input_user_id = Input(shape=(1,), name='user_id') input_item_id = Input(shape=(1,), name='item_id') #user bias u_bias = None if user_bias: u_bias = self.__bias_term(input_id=input_user_id, unique_id_num=self.unique_user_num, l2=0, latent_layer_name='user_bias') #item bias i_bias = None if item_bias: i_bias = self.__bias_term(input_id=input_item_id, unique_id_num=self.unique_item_num, l2=0, latent_layer_name='item_bias') #cross term crs_trm = None if cross_term: crs_u = self.__single_term(input_id=input_user_id, unique_id_num=self.unique_user_num, output_dim=latent_num, l2=cross_term_l2, hidden_nodes=hidden_nodes_latent, hidden_dropout_rates=hidden_dropout_rates, latent_layer_name='user_latent') crs_i = self.__single_term(input_id=input_item_id, unique_id_num=self.unique_item_num, output_dim=latent_num, l2=cross_term_l2, hidden_nodes=hidden_nodes_latent, hidden_dropout_rates=hidden_dropout_rates, latent_layer_name='item_latent') crs_trm = self.__cross_term(crs_u, crs_i, merge='sum', hidden_nodes=hidden_nodes_crossterm, hidden_l2s=hidden_l2, hidden_dropout_rates=hidden_dropout_rates) #concatenate def append_isNotNone(lst, v): tls = copy.copy(lst) if v is not None: tls.append(v) return tls concats = [] concats = append_isNotNone(concats, u_bias) concats = append_isNotNone(concats, i_bias) concats = append_isNotNone(concats, crs_trm) if len(concats) > 1: y = Add(name='add_bias_crossTerm')(concats) else: y = concats[0] # add mean y = Lambda(lambda x: x*self.rating_scale + self.all_rating_mean, name='scaling')(y) self.model = Model(inputs=[input_user_id, input_item_id], outputs=y) return
Python
def make_model_dmf_residualDeepCrossterm(self, user_bias=True, item_bias=True, cross_term=True, latent_num=10, cross_term_l2=0, hidden_nodes_crossterm=[10], hidden_l2=[0], hidden_dropout_rates=[]): ''' make normal matrix factorization model with keras. rating = all_mean + user_bias + item_bias + cross_term ''' input_user_id = Input(shape=(1,), name='user_id') input_item_id = Input(shape=(1,), name='item_id') #user bias u_bias = None if user_bias: u_bias = self.__bias_term(input_id=input_user_id, unique_id_num=self.unique_user_num, l2=0, latent_layer_name='user_bias') #item bias i_bias = None if item_bias: i_bias = self.__bias_term(input_id=input_item_id, unique_id_num=self.unique_item_num, l2=0, latent_layer_name='item_bias') #cross term crs_trm = None res_crs_trm = None if cross_term: crs_u = self.__single_term(input_id=input_user_id, unique_id_num=self.unique_user_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='user_latent') crs_i = self.__single_term(input_id=input_item_id, unique_id_num=self.unique_item_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='item_latent') res_crs_trm = self.__res_cross_term(crs_u, crs_i, merge='sum', hidden_nodes=hidden_nodes_crossterm, hidden_l2s=hidden_l2, hidden_dropout_rates=hidden_dropout_rates) #concatenate def append_isNotNone(lst, v): tls = copy.copy(lst) if v is not None: tls.append(v) return tls concats = [] concats = append_isNotNone(concats, u_bias) concats = append_isNotNone(concats, i_bias) concats = append_isNotNone(concats, res_crs_trm) if len(concats) > 1: y = Add(name='add_bias_crossTerm')(concats) else: y = concats[0] # add mean y = Lambda(lambda x: x*self.rating_scale + self.all_rating_mean, name='scaling')(y) self.model = Model(inputs=[input_user_id, input_item_id], outputs=y) return
def make_model_dmf_residualDeepCrossterm(self, user_bias=True, item_bias=True, cross_term=True, latent_num=10, cross_term_l2=0, hidden_nodes_crossterm=[10], hidden_l2=[0], hidden_dropout_rates=[]): ''' make normal matrix factorization model with keras. rating = all_mean + user_bias + item_bias + cross_term ''' input_user_id = Input(shape=(1,), name='user_id') input_item_id = Input(shape=(1,), name='item_id') #user bias u_bias = None if user_bias: u_bias = self.__bias_term(input_id=input_user_id, unique_id_num=self.unique_user_num, l2=0, latent_layer_name='user_bias') #item bias i_bias = None if item_bias: i_bias = self.__bias_term(input_id=input_item_id, unique_id_num=self.unique_item_num, l2=0, latent_layer_name='item_bias') #cross term crs_trm = None res_crs_trm = None if cross_term: crs_u = self.__single_term(input_id=input_user_id, unique_id_num=self.unique_user_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='user_latent') crs_i = self.__single_term(input_id=input_item_id, unique_id_num=self.unique_item_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='item_latent') res_crs_trm = self.__res_cross_term(crs_u, crs_i, merge='sum', hidden_nodes=hidden_nodes_crossterm, hidden_l2s=hidden_l2, hidden_dropout_rates=hidden_dropout_rates) #concatenate def append_isNotNone(lst, v): tls = copy.copy(lst) if v is not None: tls.append(v) return tls concats = [] concats = append_isNotNone(concats, u_bias) concats = append_isNotNone(concats, i_bias) concats = append_isNotNone(concats, res_crs_trm) if len(concats) > 1: y = Add(name='add_bias_crossTerm')(concats) else: y = concats[0] # add mean y = Lambda(lambda x: x*self.rating_scale + self.all_rating_mean, name='scaling')(y) self.model = Model(inputs=[input_user_id, input_item_id], outputs=y) return
Python
def __cross_term(self, input1, input2, merge='sum', hidden_nodes=[], activation='lrelu', activation_last='lrelu', hidden_l2s=[], dropout_rate=0, hidden_dropout_rates=[]): ''' input1 and input2 must be already embedded. (input1, input2) -> Multiply -> dropout -> hidden_layer (-> dense -> activation -> dropout -> ... -> dense -> activation -> dropout -> dense -> activation_last -> dropout) -> merge(ex. sum, mean) -> output ''' multiplied = Multiply()([input1, input2]) #hidden layer hl = multiplied for ih, h_dim in enumerate(hidden_nodes): l2_h = 0 if len(hidden_l2s)==0 else hidden_l2s[ih] # dense hl = Dense(h_dim, kernel_regularizer=regularizers.l2(l2_h))(hl) # activation act = activation if ih != len(hidden_nodes)-1 else activation_last hl = KerasBase.activation(act)(hl) # dropout drp_rt = 0 if len(hidden_dropout_rates)==0 else hidden_dropout_rates[ih] hl = Dropout(drp_rt)(hl) #merge layer if merge=='sum': self.__count_call_sum_model += 1 crs_trm = KerasBase.sum_layer(name='sum' + str(self.__count_call_sum_model))(hl) elif merge=='mean': self.__count_call_mean_model += 1 crs_trm = KerasBase.mean_layer(name='mean' + str(self.__count_call_mean_model))(hl) return crs_trm
def __cross_term(self, input1, input2, merge='sum', hidden_nodes=[], activation='lrelu', activation_last='lrelu', hidden_l2s=[], dropout_rate=0, hidden_dropout_rates=[]): ''' input1 and input2 must be already embedded. (input1, input2) -> Multiply -> dropout -> hidden_layer (-> dense -> activation -> dropout -> ... -> dense -> activation -> dropout -> dense -> activation_last -> dropout) -> merge(ex. sum, mean) -> output ''' multiplied = Multiply()([input1, input2]) #hidden layer hl = multiplied for ih, h_dim in enumerate(hidden_nodes): l2_h = 0 if len(hidden_l2s)==0 else hidden_l2s[ih] # dense hl = Dense(h_dim, kernel_regularizer=regularizers.l2(l2_h))(hl) # activation act = activation if ih != len(hidden_nodes)-1 else activation_last hl = KerasBase.activation(act)(hl) # dropout drp_rt = 0 if len(hidden_dropout_rates)==0 else hidden_dropout_rates[ih] hl = Dropout(drp_rt)(hl) #merge layer if merge=='sum': self.__count_call_sum_model += 1 crs_trm = KerasBase.sum_layer(name='sum' + str(self.__count_call_sum_model))(hl) elif merge=='mean': self.__count_call_mean_model += 1 crs_trm = KerasBase.mean_layer(name='mean' + str(self.__count_call_mean_model))(hl) return crs_trm
Python
def __res_cross_term(self, input1, input2, merge='sum', hidden_nodes=[], activation='lrelu', activation_last='lrelu', hidden_l2s=[], dropout_rate=0, hidden_dropout_rates=[]): ''' input1 and input2 must be already embedded. (input1, input2) -> Multiply -> dropout -> hidden_layer (-> dense -> activation -> dropout -> ... -> dense -> activation -> dropout -> dense -> activation_last -> dropout) -> merge(ex. sum, mean) -> output ''' multiplied = Multiply()([input1, input2]) #hidden layer hl = multiplied for ih, h_dim in enumerate(hidden_nodes): l2_h = 0 if len(hidden_l2s)==0 else hidden_l2s[ih] # dense hl = Dense(h_dim, kernel_regularizer=regularizers.l2(l2_h))(hl) # activation act = activation if ih != len(hidden_nodes)-1 else activation_last hl = KerasBase.activation(act)(hl) # dropout drp_rt = 0 if len(hidden_dropout_rates)==0 else hidden_dropout_rates[ih] hl = Dropout(drp_rt)(hl) #add hl = Add()([multiplied, hl]) #merge layer if merge=='sum': self.__count_call_sum_model += 1 crs_trm = KerasBase.sum_layer(name='sum' + str(self.__count_call_sum_model))(hl) elif merge=='mean': self.__count_call_mean_model += 1 crs_trm = KerasBase.mean_layer(name='mean' + str(self.__count_call_mean_model))(hl) return crs_trm
def __res_cross_term(self, input1, input2, merge='sum', hidden_nodes=[], activation='lrelu', activation_last='lrelu', hidden_l2s=[], dropout_rate=0, hidden_dropout_rates=[]): ''' input1 and input2 must be already embedded. (input1, input2) -> Multiply -> dropout -> hidden_layer (-> dense -> activation -> dropout -> ... -> dense -> activation -> dropout -> dense -> activation_last -> dropout) -> merge(ex. sum, mean) -> output ''' multiplied = Multiply()([input1, input2]) #hidden layer hl = multiplied for ih, h_dim in enumerate(hidden_nodes): l2_h = 0 if len(hidden_l2s)==0 else hidden_l2s[ih] # dense hl = Dense(h_dim, kernel_regularizer=regularizers.l2(l2_h))(hl) # activation act = activation if ih != len(hidden_nodes)-1 else activation_last hl = KerasBase.activation(act)(hl) # dropout drp_rt = 0 if len(hidden_dropout_rates)==0 else hidden_dropout_rates[ih] hl = Dropout(drp_rt)(hl) #add hl = Add()([multiplied, hl]) #merge layer if merge=='sum': self.__count_call_sum_model += 1 crs_trm = KerasBase.sum_layer(name='sum' + str(self.__count_call_sum_model))(hl) elif merge=='mean': self.__count_call_mean_model += 1 crs_trm = KerasBase.mean_layer(name='mean' + str(self.__count_call_mean_model))(hl) return crs_trm
Python
def dataset_1(datafilename='ratings_test.csv', rating_num_threshold=10, train_rate=0.8, analysis=False): ''' return pandas DataFrame that have columns = [userId, movieId, rating, timestamp] ''' ############ # constant ############ RATING_NUM_THRESHOLD = rating_num_threshold FILE_PATH = os.path.join('.', 'movieLens_data', datafilename) #################### # movieLens data set #################### ml_data = movieLens.RatingDataSet() # read file rating_df = ml_data.read_rating(FILE_PATH) # delete small number of rating rating_df = ml_data.delete_small_number_rating(rating_df, RATING_NUM_THRESHOLD, target='userId') rating_df = ml_data.delete_small_number_rating(rating_df, RATING_NUM_THRESHOLD, target='movieId') rating_df = ml_data.delete_small_number_rating(rating_df, RATING_NUM_THRESHOLD, target='userId') # analysis if analysis: ml_data.rating_analysis(rating_df) ######################## # train and test data ######################## # split rating_df_train, rating_df_test = ml_data.train_test_split_rating_df(rating_df, train_rate=train_rate, target='userId', random_state=200) # rating_df_train, rating_df_test = ml_data.train_test_split_rating_df(rating_df, train_rate=0.8, target='movieId') # delete small number of rating rating_df_train = ml_data.delete_small_number_rating(rating_df_train, RATING_NUM_THRESHOLD, target='userId') rating_df_train = ml_data.delete_small_number_rating(rating_df_train, RATING_NUM_THRESHOLD, target='movieId') rating_df_train = ml_data.delete_small_number_rating(rating_df_train, RATING_NUM_THRESHOLD, target='userId') # rating_df_test = ml_data.delete_small_number_rating(rating_df_test, RATING_NUM_THRESHOLD, target='userId') rating_df_test = ml_data.delete_small_number_rating(rating_df_test, RATING_NUM_THRESHOLD, target='movieId') rating_df_test = ml_data.delete_small_number_rating(rating_df_test, RATING_NUM_THRESHOLD, target='userId') # rating_df_test = ml_data.delete_include_only_test_rating_df(rating_df_train, rating_df_test, target='userId') rating_df_test = ml_data.delete_include_only_test_rating_df(rating_df_train, rating_df_test, target='movieId') # analysis if analysis: ml_data.rating_analysis(rating_df_train) ml_data.rating_analysis(rating_df_test) ################ # rating matrix ################ #rating_mtrx_train = ml_data.rating_matrix(rating_df_train) #rating_mtrx_test = ml_data.rating_matrix(rating_df_test) #return rating_df_train, rating_df_test, rating_mtrx_train, rating_mtrx_test return rating_df_train, rating_df_test
def dataset_1(datafilename='ratings_test.csv', rating_num_threshold=10, train_rate=0.8, analysis=False): ''' return pandas DataFrame that have columns = [userId, movieId, rating, timestamp] ''' ############ # constant ############ RATING_NUM_THRESHOLD = rating_num_threshold FILE_PATH = os.path.join('.', 'movieLens_data', datafilename) #################### # movieLens data set #################### ml_data = movieLens.RatingDataSet() # read file rating_df = ml_data.read_rating(FILE_PATH) # delete small number of rating rating_df = ml_data.delete_small_number_rating(rating_df, RATING_NUM_THRESHOLD, target='userId') rating_df = ml_data.delete_small_number_rating(rating_df, RATING_NUM_THRESHOLD, target='movieId') rating_df = ml_data.delete_small_number_rating(rating_df, RATING_NUM_THRESHOLD, target='userId') # analysis if analysis: ml_data.rating_analysis(rating_df) ######################## # train and test data ######################## # split rating_df_train, rating_df_test = ml_data.train_test_split_rating_df(rating_df, train_rate=train_rate, target='userId', random_state=200) # rating_df_train, rating_df_test = ml_data.train_test_split_rating_df(rating_df, train_rate=0.8, target='movieId') # delete small number of rating rating_df_train = ml_data.delete_small_number_rating(rating_df_train, RATING_NUM_THRESHOLD, target='userId') rating_df_train = ml_data.delete_small_number_rating(rating_df_train, RATING_NUM_THRESHOLD, target='movieId') rating_df_train = ml_data.delete_small_number_rating(rating_df_train, RATING_NUM_THRESHOLD, target='userId') # rating_df_test = ml_data.delete_small_number_rating(rating_df_test, RATING_NUM_THRESHOLD, target='userId') rating_df_test = ml_data.delete_small_number_rating(rating_df_test, RATING_NUM_THRESHOLD, target='movieId') rating_df_test = ml_data.delete_small_number_rating(rating_df_test, RATING_NUM_THRESHOLD, target='userId') # rating_df_test = ml_data.delete_include_only_test_rating_df(rating_df_train, rating_df_test, target='userId') rating_df_test = ml_data.delete_include_only_test_rating_df(rating_df_train, rating_df_test, target='movieId') # analysis if analysis: ml_data.rating_analysis(rating_df_train) ml_data.rating_analysis(rating_df_test) ################ # rating matrix ################ #rating_mtrx_train = ml_data.rating_matrix(rating_df_train) #rating_mtrx_test = ml_data.rating_matrix(rating_df_test) #return rating_df_train, rating_df_test, rating_mtrx_train, rating_mtrx_test return rating_df_train, rating_df_test
Python
def tmpdir(tmp_path: Path) -> LEGACY_PATH: """Return a temporary directory path object which is unique to each test function invocation, created as a sub directory of the base temporary directory. By default, a new base temporary directory is created each test session, and old bases are removed after 3 sessions, to aid in debugging. If ``--basetemp`` is used then it is cleared each session. See :ref:`base temporary directory`. The returned object is a `legacy_path`_ object. .. _legacy_path: https://py.readthedocs.io/en/latest/path.html """ return legacy_path(tmp_path)
def tmpdir(tmp_path: Path) -> LEGACY_PATH: """Return a temporary directory path object which is unique to each test function invocation, created as a sub directory of the base temporary directory. By default, a new base temporary directory is created each test session, and old bases are removed after 3 sessions, to aid in debugging. If ``--basetemp`` is used then it is cleared each session. See :ref:`base temporary directory`. The returned object is a `legacy_path`_ object. .. _legacy_path: https://py.readthedocs.io/en/latest/path.html """ return legacy_path(tmp_path)
Python
def Cache_makedir(self: Cache, name: str) -> LEGACY_PATH: """Return a directory path object with the given name. Same as :func:`mkdir`, but returns a legacy py path instance. """ return legacy_path(self.mkdir(name))
def Cache_makedir(self: Cache, name: str) -> LEGACY_PATH: """Return a directory path object with the given name. Same as :func:`mkdir`, but returns a legacy py path instance. """ return legacy_path(self.mkdir(name))
Python
def TerminalReporter_startdir(self: TerminalReporter) -> LEGACY_PATH: """The directory from which pytest was invoked. Prefer to use ``startpath`` which is a :class:`pathlib.Path`. :type: LEGACY_PATH """ return legacy_path(self.startpath)
def TerminalReporter_startdir(self: TerminalReporter) -> LEGACY_PATH: """The directory from which pytest was invoked. Prefer to use ``startpath`` which is a :class:`pathlib.Path`. :type: LEGACY_PATH """ return legacy_path(self.startpath)
Python
def Config_invocation_dir(self: Config) -> LEGACY_PATH: """The directory from which pytest was invoked. Prefer to use :attr:`invocation_params.dir <InvocationParams.dir>`, which is a :class:`pathlib.Path`. :type: LEGACY_PATH """ return legacy_path(str(self.invocation_params.dir))
def Config_invocation_dir(self: Config) -> LEGACY_PATH: """The directory from which pytest was invoked. Prefer to use :attr:`invocation_params.dir <InvocationParams.dir>`, which is a :class:`pathlib.Path`. :type: LEGACY_PATH """ return legacy_path(str(self.invocation_params.dir))
Python
def create_sales_invoice_qr(doc, method): """Create QR Code after inserting Sales Inv """ region = get_region(doc.company) if region not in ['Saudi Arabia']: return # if QR Code field not present, do nothing if not hasattr(doc, 'qr_code'): return # Don't create QR Code if it already exists qr_code = doc.get("qr_code") if qr_code and frappe.db.exists({"doctype": "File", "file_url": qr_code}): return meta = frappe.get_meta('Sales Invoice') for field in meta.get_image_fields(): if field.fieldname == 'qr_code': from urllib.parse import urlencode # Creating public url to print format default_print_format = frappe.db.get_value('Property Setter', dict(property='default_print_format', doc_type=doc.doctype), "value") # System Language language = frappe.get_system_settings('language') params = urlencode({ 'format': default_print_format or 'Standard', '_lang': language, 'key': doc.get_signature() }) # creating qr code for the url url = f"{ frappe.utils.get_url() }/{ 'Sales%20Invoice' }/{ doc.name }?{ params }" qr_image = io.BytesIO() url = qr_create(url, error='L') url.png(qr_image, scale=2, quiet_zone=1) # making file filename = f"QR-CODE-{doc.name}.png".replace(os.path.sep, "__") _file = frappe.get_doc({ "doctype": "File", "file_name": filename, "is_private": 0, "content": qr_image.getvalue(), "attached_to_doctype": doc.get("doctype"), "attached_to_name": doc.get("name"), "attached_to_field": "qr_code" }) _file.save() # assigning to document doc.db_set('qr_code', _file.file_url) doc.notify_update() break
def create_sales_invoice_qr(doc, method): """Create QR Code after inserting Sales Inv """ region = get_region(doc.company) if region not in ['Saudi Arabia']: return # if QR Code field not present, do nothing if not hasattr(doc, 'qr_code'): return # Don't create QR Code if it already exists qr_code = doc.get("qr_code") if qr_code and frappe.db.exists({"doctype": "File", "file_url": qr_code}): return meta = frappe.get_meta('Sales Invoice') for field in meta.get_image_fields(): if field.fieldname == 'qr_code': from urllib.parse import urlencode # Creating public url to print format default_print_format = frappe.db.get_value('Property Setter', dict(property='default_print_format', doc_type=doc.doctype), "value") # System Language language = frappe.get_system_settings('language') params = urlencode({ 'format': default_print_format or 'Standard', '_lang': language, 'key': doc.get_signature() }) # creating qr code for the url url = f"{ frappe.utils.get_url() }/{ 'Sales%20Invoice' }/{ doc.name }?{ params }" qr_image = io.BytesIO() url = qr_create(url, error='L') url.png(qr_image, scale=2, quiet_zone=1) # making file filename = f"QR-CODE-{doc.name}.png".replace(os.path.sep, "__") _file = frappe.get_doc({ "doctype": "File", "file_name": filename, "is_private": 0, "content": qr_image.getvalue(), "attached_to_doctype": doc.get("doctype"), "attached_to_name": doc.get("name"), "attached_to_field": "qr_code" }) _file.save() # assigning to document doc.db_set('qr_code', _file.file_url) doc.notify_update() break
Python
def temperature_saturation(self, p=None): """Yields Tsat given a pressure p.""" # module deals with pressure in MPa if p is None: p = self.p.MPa else: p = Pressure(p).MPa if p < Pressure(611.213).unit('Pa').MPa or p > self.pc: raise ValueError('Pressure out of range.') # table 34 ni = array([1167.0521452767, -724213.16703206, -17.073846940092, 12020.82470247, -3232555.0322333, 14.91510861353, -4823.2657361591, 405113.40542057, -0.23855557567849, 650.17534844798], dtype='d') beta = p ** 0.25 E = 1 * beta ** 2 + ni[2] * beta + ni[5] F = ni[0] * beta ** 2 + ni[3] * beta + ni[6] G = ni[1] * beta ** 2 + ni[4] * beta + ni[7] D = 2 * G / (-F - (F**2 - 4 * E * G)**0.5) return Temperature((ni[9] + D - ((ni[9] + D) ** 2 - 4 * (ni[8] + ni[9] * D)) ** 0.5) * 0.5)
def temperature_saturation(self, p=None): """Yields Tsat given a pressure p.""" # module deals with pressure in MPa if p is None: p = self.p.MPa else: p = Pressure(p).MPa if p < Pressure(611.213).unit('Pa').MPa or p > self.pc: raise ValueError('Pressure out of range.') # table 34 ni = array([1167.0521452767, -724213.16703206, -17.073846940092, 12020.82470247, -3232555.0322333, 14.91510861353, -4823.2657361591, 405113.40542057, -0.23855557567849, 650.17534844798], dtype='d') beta = p ** 0.25 E = 1 * beta ** 2 + ni[2] * beta + ni[5] F = ni[0] * beta ** 2 + ni[3] * beta + ni[6] G = ni[1] * beta ** 2 + ni[4] * beta + ni[7] D = 2 * G / (-F - (F**2 - 4 * E * G)**0.5) return Temperature((ni[9] + D - ((ni[9] + D) ** 2 - 4 * (ni[8] + ni[9] * D)) ** 0.5) * 0.5)
Python
def pressure_saturation(self, T=None): """Yields Psat given a temperature T""" if T is None: T = self.T else: T = Temperature(T) if T < 273.15 or T > self.Tc: raise ValueError('Temperature out of range.') # table 34 ni = array([1167.0521452767, -724213.16703206, -17.073846940092, 12020.82470247, -3232555.0322333, 14.91510861353, -4823.2657361591, 405113.40542057, -0.23855557567849, 650.17534844798], dtype='d') v = T + ni[8] / (T - ni[9]) A = 1 * v ** 2 + ni[0] * v + ni[1] B = ni[2] * v ** 2 + ni[3] * v + ni[4] C = ni[5] * v ** 2 + ni[6] * v + ni[7] return Pressure((2 * C / (-B + (B ** 2 - 4 * A * C) ** 0.5)) ** 4 ).unit('MPa')
def pressure_saturation(self, T=None): """Yields Psat given a temperature T""" if T is None: T = self.T else: T = Temperature(T) if T < 273.15 or T > self.Tc: raise ValueError('Temperature out of range.') # table 34 ni = array([1167.0521452767, -724213.16703206, -17.073846940092, 12020.82470247, -3232555.0322333, 14.91510861353, -4823.2657361591, 405113.40542057, -0.23855557567849, 650.17534844798], dtype='d') v = T + ni[8] / (T - ni[9]) A = 1 * v ** 2 + ni[0] * v + ni[1] B = ni[2] * v ** 2 + ni[3] * v + ni[4] C = ni[5] * v ** 2 + ni[6] * v + ni[7] return Pressure((2 * C / (-B + (B ** 2 - 4 * A * C) ** 0.5)) ** 4 ).unit('MPa')
Python
def _is_in_region(self): """Finds a region for the (T, p) point (see IAPWS-IF97 for details). The usefulness of these regions are to divide the physical properties of water into different sets of coefficients and equations.""" # for the 2 - 3 boundary line ni = array([348.05185628969, -1.1671859879975, 0.0010192970039326, 572.54459862746, 13.91883977887], dtype='d') theta = self.T pressure23 = Pressure(ni[0] + ni[1] * theta + ni[2] * theta ** 2).unit('MPa') # exceptional cases if self.T == self.Tt and self.p == self.pt: return 1 # regular cases if self.T >= Temperature(273.15) and self.T <= Temperature(623.15): if self.T < self.temperature_saturation(self.p): return 1 else: return 2 elif self.T > Temperature(623.15) and self.T <= Temperature(1073.15): if self.p > pressure23: return 3 else: return 2 elif self.T >= Temperature(1073.15) and self.T <= Temperature(2273.15): return 5 else: raise Exception('Cannot assign region to the parameters p = ' + str(self.p) + 'T = ' + str(self.T) + 'given.')
def _is_in_region(self): """Finds a region for the (T, p) point (see IAPWS-IF97 for details). The usefulness of these regions are to divide the physical properties of water into different sets of coefficients and equations.""" # for the 2 - 3 boundary line ni = array([348.05185628969, -1.1671859879975, 0.0010192970039326, 572.54459862746, 13.91883977887], dtype='d') theta = self.T pressure23 = Pressure(ni[0] + ni[1] * theta + ni[2] * theta ** 2).unit('MPa') # exceptional cases if self.T == self.Tt and self.p == self.pt: return 1 # regular cases if self.T >= Temperature(273.15) and self.T <= Temperature(623.15): if self.T < self.temperature_saturation(self.p): return 1 else: return 2 elif self.T > Temperature(623.15) and self.T <= Temperature(1073.15): if self.p > pressure23: return 3 else: return 2 elif self.T >= Temperature(1073.15) and self.T <= Temperature(2273.15): return 5 else: raise Exception('Cannot assign region to the parameters p = ' + str(self.p) + 'T = ' + str(self.T) + 'given.')
Python
def gibbs_energy(self, p=None, T=None): """Returns the Gibbs Energy given p and T in kJ/kg.""" if p is None: p = self.p else: p = Pressure(p) if T is None: T = self.T else: T = Temperature(T) if self._is_in_region() == 1: return self._basic_equation1('gamma') * self.R * T elif self._is_in_region() == 2: return self._basic_equation2('gamma') * self.R * T elif self._is_in_region() == 3: pass elif self._is_in_region() == 4: pass elif self._is_in_region() == 5: pass
def gibbs_energy(self, p=None, T=None): """Returns the Gibbs Energy given p and T in kJ/kg.""" if p is None: p = self.p else: p = Pressure(p) if T is None: T = self.T else: T = Temperature(T) if self._is_in_region() == 1: return self._basic_equation1('gamma') * self.R * T elif self._is_in_region() == 2: return self._basic_equation2('gamma') * self.R * T elif self._is_in_region() == 3: pass elif self._is_in_region() == 4: pass elif self._is_in_region() == 5: pass
Python
def enthalpy(self, p=None, T=None): """Returns the enthalpy given p and T in kJ/kg.""" if p is None: p = self.p else: p = Pressure(p) if T is None: T = self.T else: T = Temperature(T) if self._is_in_region() == 1: return Enthalpy(self._basic_equation1('gamma_tau') * 1386 * self.R) elif self._is_in_region() == 2: return Enthalpy(540 * self.R * ( self._basic_equation2('gamma_0_tau') + self._basic_equation2('gamma_r_tau'))) elif self._is_in_region() == 3: # for region 3 rho needs to be solved numerically reg3_solved = self._basic_equation3('PHI_delta') delta = reg3_solved[1] / self.rhoc tau = self.Tc / self.T return Enthalpy((tau * self._basic_equation3('PHI_tau')[0] + delta * reg3_solved[0]) * self.R * self.T) elif self._is_in_region() == 4: return None elif self._is_in_region() == 5: pi = self.p / Pressure(1).unit('MPa') tau = Temperature(1000) / self.T return (tau * (self._basic_equation5('gamma_0_tau') + self._basic_equation5('gamma_r_tau')) * self.R * self.T)
def enthalpy(self, p=None, T=None): """Returns the enthalpy given p and T in kJ/kg.""" if p is None: p = self.p else: p = Pressure(p) if T is None: T = self.T else: T = Temperature(T) if self._is_in_region() == 1: return Enthalpy(self._basic_equation1('gamma_tau') * 1386 * self.R) elif self._is_in_region() == 2: return Enthalpy(540 * self.R * ( self._basic_equation2('gamma_0_tau') + self._basic_equation2('gamma_r_tau'))) elif self._is_in_region() == 3: # for region 3 rho needs to be solved numerically reg3_solved = self._basic_equation3('PHI_delta') delta = reg3_solved[1] / self.rhoc tau = self.Tc / self.T return Enthalpy((tau * self._basic_equation3('PHI_tau')[0] + delta * reg3_solved[0]) * self.R * self.T) elif self._is_in_region() == 4: return None elif self._is_in_region() == 5: pi = self.p / Pressure(1).unit('MPa') tau = Temperature(1000) / self.T return (tau * (self._basic_equation5('gamma_0_tau') + self._basic_equation5('gamma_r_tau')) * self.R * self.T)
Python
def heat_capacity(self, p=None, T=None): """Returns the isobaric heat capacity given p and T in kJ/(kg K).""" if p is None: p = self.p else: p = Pressure(p) if T is None: T = self.T else: T = Temperature(T) if self._is_in_region() == 1: tau = 1386 / T return (-1 * tau * tau * self.R * self._basic_equation1('gamma_tau_tau')) elif self._is_in_region() == 2: pass elif self._is_in_region() == 3: delta = rho / self.rhoc tau = self.Tc / self.T return (- tau ** 2 / self._basic_equation3('PHI_tau_tau') + (delta * self._basic_equation3('PHI_delta') - delta * tau * self._basic_equation3('PHI_tau_delta')) ** 2 / (2 * delta * self._basic_equation3('PHI_delta') + delta ** 2 * self._basic_equation3( 'PHI_delta_delta')) ) * self.R elif self._is_in_region() == 4: pass elif self._is_in_region() == 5: pass return -1
def heat_capacity(self, p=None, T=None): """Returns the isobaric heat capacity given p and T in kJ/(kg K).""" if p is None: p = self.p else: p = Pressure(p) if T is None: T = self.T else: T = Temperature(T) if self._is_in_region() == 1: tau = 1386 / T return (-1 * tau * tau * self.R * self._basic_equation1('gamma_tau_tau')) elif self._is_in_region() == 2: pass elif self._is_in_region() == 3: delta = rho / self.rhoc tau = self.Tc / self.T return (- tau ** 2 / self._basic_equation3('PHI_tau_tau') + (delta * self._basic_equation3('PHI_delta') - delta * tau * self._basic_equation3('PHI_tau_delta')) ** 2 / (2 * delta * self._basic_equation3('PHI_delta') + delta ** 2 * self._basic_equation3( 'PHI_delta_delta')) ) * self.R elif self._is_in_region() == 4: pass elif self._is_in_region() == 5: pass return -1
Python
def heat_capacity(self, T): """ Calculates the specific heat capacity in J/(mol K). """ Ta = np.array([1, T, T ** 2, T ** 3, T ** 4], dtype='d') if T >= self._T_limit_low and T <= 1000: return np.dot(self._low_coefs[:5], Ta) * _R elif T > 1000 and T <= self._T_limit_high: return np.dot(self._high_coefs[:5], Ta) * _R else: raise ValueError("Temperature out of range")
def heat_capacity(self, T): """ Calculates the specific heat capacity in J/(mol K). """ Ta = np.array([1, T, T ** 2, T ** 3, T ** 4], dtype='d') if T >= self._T_limit_low and T <= 1000: return np.dot(self._low_coefs[:5], Ta) * _R elif T > 1000 and T <= self._T_limit_high: return np.dot(self._high_coefs[:5], Ta) * _R else: raise ValueError("Temperature out of range")
Python
def heat_capacity_massic(self, T): """ Computes the specific heat capacity in J/(kg K) for a given temperature. """ return self.cpo(T) / self.mm
def heat_capacity_massic(self, T): """ Computes the specific heat capacity in J/(kg K) for a given temperature. """ return self.cpo(T) / self.mm
Python
def enthalpy(self, T): """ Computes the sensible enthalpy in J/mol. """ Ta = np.array([1, T / 2, T ** 2 / 3, T ** 3 / 4, T ** 4 / 5, 1 / T], 'd') if T >= self._T_limit_low and T <= 1000: partial = np.dot(self._low_coefs[:6], Ta) * _R * T elif T > 1000 and T <= self._T_limit_high: partial = np.dot(self._high_coefs[:6], Ta) * _R * T else: raise ValueError("Temperature out of range") return partial - self.h_formation
def enthalpy(self, T): """ Computes the sensible enthalpy in J/mol. """ Ta = np.array([1, T / 2, T ** 2 / 3, T ** 3 / 4, T ** 4 / 5, 1 / T], 'd') if T >= self._T_limit_low and T <= 1000: partial = np.dot(self._low_coefs[:6], Ta) * _R * T elif T > 1000 and T <= self._T_limit_high: partial = np.dot(self._high_coefs[:6], Ta) * _R * T else: raise ValueError("Temperature out of range") return partial - self.h_formation
Python
def enthalpy_massic(self, T): """ Computes the sensible enthalpy in J/kg. """ Ta = np.array([1, T / 2, T ** 2 / 3, T ** 3 / 4, T ** 4 / 5, 1 / T], 'd') if T >= self._T_limit_low and T <= 1000: partial = np.dot(self._low_coefs[:6], Ta) * _R * T / self.mm elif T > 1000 and T <= self._T_limit_high: partial = np.dot(self._high_coefs[:6], Ta) * _R * T / self.mm else: raise ValueError("Temperature out of range") return partial - self.h_formation
def enthalpy_massic(self, T): """ Computes the sensible enthalpy in J/kg. """ Ta = np.array([1, T / 2, T ** 2 / 3, T ** 3 / 4, T ** 4 / 5, 1 / T], 'd') if T >= self._T_limit_low and T <= 1000: partial = np.dot(self._low_coefs[:6], Ta) * _R * T / self.mm elif T > 1000 and T <= self._T_limit_high: partial = np.dot(self._high_coefs[:6], Ta) * _R * T / self.mm else: raise ValueError("Temperature out of range") return partial - self.h_formation
Python
def entropy(self, T): """ Computes enthropy in J/mol K. """ Ta = np.array([np.log(T), T, T ** 2 / 2, T ** 3 / 3, T ** 4 / 4, 0, 1], 'd') # right if T >= self._T_limit_low and T <= 1000: return np.dot(self._low_coefs, Ta) * _R elif T > 1000 and T <= self._T_limit_high: return np.dot(self._high_coefs, Ta) * _R else: raise ValueError("Temperature out of range")
def entropy(self, T): """ Computes enthropy in J/mol K. """ Ta = np.array([np.log(T), T, T ** 2 / 2, T ** 3 / 3, T ** 4 / 4, 0, 1], 'd') # right if T >= self._T_limit_low and T <= 1000: return np.dot(self._low_coefs, Ta) * _R elif T > 1000 and T <= self._T_limit_high: return np.dot(self._high_coefs, Ta) * _R else: raise ValueError("Temperature out of range")
Python
def gibbs_energy(self, T): """ Computes the Gibbs free energy from the sensible enthalpy in J/mol. """ if T >= self._T_limit_low and T < self._T_limit_high: return self.enthalpy(T) - self.entropy(T) * T else: raise ValueError("Temperature out of range")
def gibbs_energy(self, T): """ Computes the Gibbs free energy from the sensible enthalpy in J/mol. """ if T >= self._T_limit_low and T < self._T_limit_high: return self.enthalpy(T) - self.entropy(T) * T else: raise ValueError("Temperature out of range")
Python
def list_compound(self, cas_or_formula): """ Takes a string or a CAS number as input and output all matching results. Helpful for interactive use of the database. """ # determines if it is cas or not for char in cas_or_formula: if char.isalpha() is True: formula = cas_or_formula cas = None break else: cas = cas_or_formula formula = None matches = [] if cas is not None: for specie in self.db.findall('specie'): if cas == str(specie.get('CAS')): try: specie.find('phase') for phases in specie.findall('phase'): matches.append(phases.find('formula').text) except: pass return matches elif formula is not None: formula = formula.upper() for specie in self.db.findall('specie'): try: specie.find('phase') for phases in specie.findall('phase'): if formula in phases.find('formula').text.upper(): matches.append(phases.find('formula').text) except: pass return matches
def list_compound(self, cas_or_formula): """ Takes a string or a CAS number as input and output all matching results. Helpful for interactive use of the database. """ # determines if it is cas or not for char in cas_or_formula: if char.isalpha() is True: formula = cas_or_formula cas = None break else: cas = cas_or_formula formula = None matches = [] if cas is not None: for specie in self.db.findall('specie'): if cas == str(specie.get('CAS')): try: specie.find('phase') for phases in specie.findall('phase'): matches.append(phases.find('formula').text) except: pass return matches elif formula is not None: formula = formula.upper() for specie in self.db.findall('specie'): try: specie.find('phase') for phases in specie.findall('phase'): if formula in phases.find('formula').text.upper(): matches.append(phases.find('formula').text) except: pass return matches
Python
def _delta_enthalpy(self, T=None): """Reaction deltaH in J/mol""" if T is not None: self.T = T delta_h = 0 for (coefficient, compound) in zip(self._rcoefs, self.reagents): delta_h = delta_h - coefficient * compound.enthalpy_engineering( self.T) for (coefficient, compound) in zip(self._pcoefs, self.products): delta_h = delta_h + coefficient * compound.enthalpy_engineering( self.T) return delta_h
def _delta_enthalpy(self, T=None): """Reaction deltaH in J/mol""" if T is not None: self.T = T delta_h = 0 for (coefficient, compound) in zip(self._rcoefs, self.reagents): delta_h = delta_h - coefficient * compound.enthalpy_engineering( self.T) for (coefficient, compound) in zip(self._pcoefs, self.products): delta_h = delta_h + coefficient * compound.enthalpy_engineering( self.T) return delta_h
Python
def _delta_gibbs_energy(self, T=None): """Reaction deltaG in J/mol""" if T is not None: self.T = T deltag = 0 for (coef, comp) in zip(self._rcoefs, self.reagents): deltag = deltag - coef * comp.gibbs_energy(self.T) for (coef, comp) in zip(self._pcoefs, self.products): deltag = deltag + coef * comp.gibbs_energy(self.T) return deltag
def _delta_gibbs_energy(self, T=None): """Reaction deltaG in J/mol""" if T is not None: self.T = T deltag = 0 for (coef, comp) in zip(self._rcoefs, self.reagents): deltag = deltag - coef * comp.gibbs_energy(self.T) for (coef, comp) in zip(self._pcoefs, self.products): deltag = deltag + coef * comp.gibbs_energy(self.T) return deltag
Python
def is_valid_rut(rut: str) -> bool: """Determines if a given rut is valid Arguments: rut {str} -- Complete rut, including verification digit. It might contain dots and a dash. Returns: bool -- True if rut is valid. False otherwise. Raises: ValueError: when input is not valid to be processed. """ __raise_error_if_rut_input_format_not_valid(rut) rut = __clean_rut(rut) return get_verification_digit(rut[:-1]) == rut[-1]
def is_valid_rut(rut: str) -> bool: """Determines if a given rut is valid Arguments: rut {str} -- Complete rut, including verification digit. It might contain dots and a dash. Returns: bool -- True if rut is valid. False otherwise. Raises: ValueError: when input is not valid to be processed. """ __raise_error_if_rut_input_format_not_valid(rut) rut = __clean_rut(rut) return get_verification_digit(rut[:-1]) == rut[-1]
Python
def send_req_ircc(self, params, log_errors=True): """Send an IRCC command via HTTP to Sony Bravia.""" headers = {'SOAPACTION': '"urn:schemas-sony-com:service:IRCC:1#X_SendIRCC"'} if self._psk is not None: headers['X-Auth-PSK'] = self._psk root = Element('s:Envelope', {"xmlns:s": "http://schemas.xmlsoap.org/soap/envelope/", "s:encodingStyle": "http://schemas.xmlsoap.org/soap/encoding/"}) body = SubElement(root, "s:Body") sendIRCC = SubElement(body, "u:X_SendIRCC", {"xmlns:u": "urn:schemas-sony-com:service:IRCC:1"}) irccCode = SubElement(sendIRCC, "IRCCCode") irccCode.text = params xml_str = tostring(root, encoding='utf8') try: response = requests.post('http://' + self._host + '/sony/IRCC', headers=headers, cookies=self._cookies, data=xml_str, timeout=TIMEOUT) except requests.exceptions.HTTPError as exception_instance: if log_errors: _LOGGER.error("HTTPError: " + str(exception_instance)) except requests.exceptions.Timeout as exception_instance: if log_errors: _LOGGER.error("Timeout occurred: " + str(exception_instance)) except Exception as exception_instance: # pylint: disable=broad-except if log_errors: _LOGGER.error("Exception: " + str(exception_instance)) else: content = response.content return content
def send_req_ircc(self, params, log_errors=True): """Send an IRCC command via HTTP to Sony Bravia.""" headers = {'SOAPACTION': '"urn:schemas-sony-com:service:IRCC:1#X_SendIRCC"'} if self._psk is not None: headers['X-Auth-PSK'] = self._psk root = Element('s:Envelope', {"xmlns:s": "http://schemas.xmlsoap.org/soap/envelope/", "s:encodingStyle": "http://schemas.xmlsoap.org/soap/encoding/"}) body = SubElement(root, "s:Body") sendIRCC = SubElement(body, "u:X_SendIRCC", {"xmlns:u": "urn:schemas-sony-com:service:IRCC:1"}) irccCode = SubElement(sendIRCC, "IRCCCode") irccCode.text = params xml_str = tostring(root, encoding='utf8') try: response = requests.post('http://' + self._host + '/sony/IRCC', headers=headers, cookies=self._cookies, data=xml_str, timeout=TIMEOUT) except requests.exceptions.HTTPError as exception_instance: if log_errors: _LOGGER.error("HTTPError: " + str(exception_instance)) except requests.exceptions.Timeout as exception_instance: if log_errors: _LOGGER.error("Timeout occurred: " + str(exception_instance)) except Exception as exception_instance: # pylint: disable=broad-except if log_errors: _LOGGER.error("Exception: " + str(exception_instance)) else: content = response.content return content
Python
def bravia_req_json(self, url, params, log_errors=True): """ Send request command via HTTP json to Sony Bravia.""" headers = {} if self._psk is not None: headers['X-Auth-PSK'] = self._psk built_url = 'http://{}/{}'.format(self._host, url) try: response = requests.post(built_url, data=params.encode("UTF-8"), cookies=self._cookies, timeout=TIMEOUT, headers=headers) except requests.exceptions.HTTPError as exception_instance: if log_errors: _LOGGER.error("HTTPError: " + str(exception_instance)) except Exception as exception_instance: # pylint: disable=broad-except if log_errors: _LOGGER.error("Exception: " + str(exception_instance)) else: html = json.loads(response.content.decode('utf-8')) return html
def bravia_req_json(self, url, params, log_errors=True): """ Send request command via HTTP json to Sony Bravia.""" headers = {} if self._psk is not None: headers['X-Auth-PSK'] = self._psk built_url = 'http://{}/{}'.format(self._host, url) try: response = requests.post(built_url, data=params.encode("UTF-8"), cookies=self._cookies, timeout=TIMEOUT, headers=headers) except requests.exceptions.HTTPError as exception_instance: if log_errors: _LOGGER.error("HTTPError: " + str(exception_instance)) except Exception as exception_instance: # pylint: disable=broad-except if log_errors: _LOGGER.error("Exception: " + str(exception_instance)) else: html = json.loads(response.content.decode('utf-8')) return html
Python
def load_source_list(self): """ Load source list from Sony Bravia.""" original_content_list = [] resp = self.bravia_req_json("sony/avContent", self._jdata_build("getSourceList", {"scheme": "tv"})) if not resp.get('error'): results = resp.get('result')[0] for result in results: # tv:dvbc = via cable # tv:dvbt = via DTT # tv:dvbs = via satellite if result['source'] in ['tv:dvbc', 'tv:dvbt', 'tv:isdbt', 'tv:isdbbs', 'tv:isdbcs']: source = self.get_source(result['source']) original_content_list.extend(source) resp = self.bravia_req_json("sony/avContent", self._jdata_build("getSourceList", {"scheme": "extInput"})) if not resp.get('error'): results = resp.get('result')[0] for result in results: # physical inputs if result['source'] in ('extInput:hdmi', 'extInput:composite', 'extInput:component'): data = self._jdata_build("getContentList", result) resp = self.bravia_req_json("sony/avContent", data) if not resp.get('error'): original_content_list.extend(resp.get('result')[0]) resp = self.bravia_req_json("sony/appControl", self._jdata_build("getApplicationList", None)) if not resp.get('error'): results = resp.get('result')[0] original_content_list+=results return_value = collections.OrderedDict() for content_item in original_content_list: return_value[content_item['title']] = content_item['uri'] return return_value
def load_source_list(self): """ Load source list from Sony Bravia.""" original_content_list = [] resp = self.bravia_req_json("sony/avContent", self._jdata_build("getSourceList", {"scheme": "tv"})) if not resp.get('error'): results = resp.get('result')[0] for result in results: # tv:dvbc = via cable # tv:dvbt = via DTT # tv:dvbs = via satellite if result['source'] in ['tv:dvbc', 'tv:dvbt', 'tv:isdbt', 'tv:isdbbs', 'tv:isdbcs']: source = self.get_source(result['source']) original_content_list.extend(source) resp = self.bravia_req_json("sony/avContent", self._jdata_build("getSourceList", {"scheme": "extInput"})) if not resp.get('error'): results = resp.get('result')[0] for result in results: # physical inputs if result['source'] in ('extInput:hdmi', 'extInput:composite', 'extInput:component'): data = self._jdata_build("getContentList", result) resp = self.bravia_req_json("sony/avContent", data) if not resp.get('error'): original_content_list.extend(resp.get('result')[0]) resp = self.bravia_req_json("sony/appControl", self._jdata_build("getApplicationList", None)) if not resp.get('error'): results = resp.get('result')[0] original_content_list+=results return_value = collections.OrderedDict() for content_item in original_content_list: return_value[content_item['title']] = content_item['uri'] return return_value
Python
def _recreate_auth_cookie(self): """ The default cookie is for URL/sony. For some commands we need it for the root path """ cookies = requests.cookies.RequestsCookieJar() cookies.set("auth", self._cookies.get("auth")) return cookies
def _recreate_auth_cookie(self): """ The default cookie is for URL/sony. For some commands we need it for the root path """ cookies = requests.cookies.RequestsCookieJar() cookies.set("auth", self._cookies.get("auth")) return cookies
Python
def load_app_list(self, log_errors=True): """Get the list of installed apps""" headers = {} if self._psk is not None: headers['X-Auth-PSK'] = self._psk parsed_objects = {} url = 'http://{}/DIAL/sony/applist'.format(self._host) try: cookies = self._recreate_auth_cookie() response = requests.get(url, cookies=cookies, timeout=TIMEOUT, headers=headers) except requests.exceptions.HTTPError as exception_instance: if log_errors: _LOGGER.error("HTTPError: " + str(exception_instance)) except Exception as exception_instance: # pylint: disable=broad-except if log_errors: _LOGGER.error("Exception: " + str(exception_instance)) else: content = response.content from xml.dom import minidom parsed_xml = minidom.parseString(content) for obj in parsed_xml.getElementsByTagName("app"): if obj.getElementsByTagName("name")[0].firstChild and \ obj.getElementsByTagName("id")[0].firstChild: name = obj.getElementsByTagName("name")[0] id_elm = obj.getElementsByTagName("id")[0] parsed_objects[str(name.firstChild.nodeValue)] = \ str(id_elm.firstChild.nodeValue) return parsed_objects
def load_app_list(self, log_errors=True): """Get the list of installed apps""" headers = {} if self._psk is not None: headers['X-Auth-PSK'] = self._psk parsed_objects = {} url = 'http://{}/DIAL/sony/applist'.format(self._host) try: cookies = self._recreate_auth_cookie() response = requests.get(url, cookies=cookies, timeout=TIMEOUT, headers=headers) except requests.exceptions.HTTPError as exception_instance: if log_errors: _LOGGER.error("HTTPError: " + str(exception_instance)) except Exception as exception_instance: # pylint: disable=broad-except if log_errors: _LOGGER.error("Exception: " + str(exception_instance)) else: content = response.content from xml.dom import minidom parsed_xml = minidom.parseString(content) for obj in parsed_xml.getElementsByTagName("app"): if obj.getElementsByTagName("name")[0].firstChild and \ obj.getElementsByTagName("id")[0].firstChild: name = obj.getElementsByTagName("name")[0] id_elm = obj.getElementsByTagName("id")[0] parsed_objects[str(name.firstChild.nodeValue)] = \ str(id_elm.firstChild.nodeValue) return parsed_objects
Python
def turn_on(self): """Turn the media player on.""" self._wakeonlan() # Try using the power on command incase the WOL doesn't work if self.get_power_status() != 'active': command = self.get_command_code('TvPower') if command is None: command = 'AAAAAQAAAAEAAAAuAw==' self.send_req_ircc(command)
def turn_on(self): """Turn the media player on.""" self._wakeonlan() # Try using the power on command incase the WOL doesn't work if self.get_power_status() != 'active': command = self.get_command_code('TvPower') if command is None: command = 'AAAAAQAAAAEAAAAuAw==' self.send_req_ircc(command)
Python
def turn_on_command(self): """Turn the media player on using command. Only confirmed working on Android. Can be used when WOL is not available.""" if self.get_power_status() != 'active': self.send_req_ircc(self.get_command_code('TvPower')) self.bravia_req_json("sony/system", self._jdata_build("setPowerStatus", {"status": "true"}))
def turn_on_command(self): """Turn the media player on using command. Only confirmed working on Android. Can be used when WOL is not available.""" if self.get_power_status() != 'active': self.send_req_ircc(self.get_command_code('TvPower')) self.bravia_req_json("sony/system", self._jdata_build("setPowerStatus", {"status": "true"}))
Python
def calc_time(self, *times): """Calculate the sum of times, value is returned in HH:MM.""" total_secs = 0 for tms in times: time_parts = [int(s) for s in tms.split(':')] total_secs += (time_parts[0] * 60 + time_parts[1]) * 60 + \ time_parts[2] total_secs, sec = divmod(total_secs, 60) hour, minute = divmod(total_secs, 60) if hour >= 24: # set 24:10 to 00:10 hour -= 24 return ("%02d:%02d" % (hour, minute))
def calc_time(self, *times): """Calculate the sum of times, value is returned in HH:MM.""" total_secs = 0 for tms in times: time_parts = [int(s) for s in tms.split(':')] total_secs += (time_parts[0] * 60 + time_parts[1]) * 60 + \ time_parts[2] total_secs, sec = divmod(total_secs, 60) hour, minute = divmod(total_secs, 60) if hour >= 24: # set 24:10 to 00:10 hour -= 24 return ("%02d:%02d" % (hour, minute))
Python
def playing_time(self, startdatetime, durationsec): """Give starttime, endtime and percentage played. Start time format: 2017-03-24T00:00:00+0100 Using that, we calculate number of seconds to end time. """ date_format = "%Y-%m-%dT%H:%M:%S" now = datetime.now() stripped_tz = startdatetime[:-5] start_date_time = datetime.strptime(stripped_tz, date_format) start_time = (time.strptime(stripped_tz, date_format)) try: playingtime = now - start_date_time except TypeError: playingtime = now - datetime(*start_time[0:6]) try: starttime = datetime.time(start_date_time) except TypeError: starttime = datetime.time(datetime(*start_time[0:6])) duration = time.strftime('%H:%M:%S', time.gmtime(durationsec)) endtime = self.calc_time(str(starttime), str(duration)) starttime = starttime.strftime('%H:%M') perc_playingtime = int(round(((playingtime.seconds / durationsec) * 100), 0)) return_value = {} return_value['start_time'] = starttime return_value['end_time'] = endtime return_value['media_position'] = playingtime.seconds return_value['media_position_perc'] = perc_playingtime return return_value
def playing_time(self, startdatetime, durationsec): """Give starttime, endtime and percentage played. Start time format: 2017-03-24T00:00:00+0100 Using that, we calculate number of seconds to end time. """ date_format = "%Y-%m-%dT%H:%M:%S" now = datetime.now() stripped_tz = startdatetime[:-5] start_date_time = datetime.strptime(stripped_tz, date_format) start_time = (time.strptime(stripped_tz, date_format)) try: playingtime = now - start_date_time except TypeError: playingtime = now - datetime(*start_time[0:6]) try: starttime = datetime.time(start_date_time) except TypeError: starttime = datetime.time(datetime(*start_time[0:6])) duration = time.strftime('%H:%M:%S', time.gmtime(durationsec)) endtime = self.calc_time(str(starttime), str(duration)) starttime = starttime.strftime('%H:%M') perc_playingtime = int(round(((playingtime.seconds / durationsec) * 100), 0)) return_value = {} return_value['start_time'] = starttime return_value['end_time'] = endtime return_value['media_position'] = playingtime.seconds return_value['media_position_perc'] = perc_playingtime return return_value
Python
def load_csv(input_file, col_act, col_pred, col_weight=None, delimiter=",", quotechar="\"", header=True): """ Loads the actual, predicted and weight (if present) values from the CSV file as column lists. :param input_file: the CSV file to load actual/predicted labels from :type input_file: str :param col_act: the 0-based index of the column that contains the actual/ground truth labels :type col_act: int :param col_pred: the 0-based index of the column that contains the predicted labels :type col_pred: int :param col_weight: the 0-based index of the (optional) column containing the weights (0-1) for the predictions, default is None (ie a weight 1 is assumed) :type col_weight: int :param delimiter: the delimiter to use for the CSV file :type delimiter: str :param quotechar: the quote character to use for the CSV file :type quotechar: str :param header: whether the CSV file has a header :type header: bool :return: the actual, predicted and weight (None if not present) columns as lists :rtype: tuple """ actual = [] predicted = [] weight = None if col_weight is None else [] with open(input_file, "r") as inputf: reader = csv.reader(inputf, delimiter=delimiter, quotechar=quotechar) first = True for row in reader: if first and header: first = False continue actual.append(row[col_act - 1]) predicted.append(row[col_pred - 1]) if col_weight is not None: weight.append(row[col_weight - 1]) return actual, predicted, weight
def load_csv(input_file, col_act, col_pred, col_weight=None, delimiter=",", quotechar="\"", header=True): """ Loads the actual, predicted and weight (if present) values from the CSV file as column lists. :param input_file: the CSV file to load actual/predicted labels from :type input_file: str :param col_act: the 0-based index of the column that contains the actual/ground truth labels :type col_act: int :param col_pred: the 0-based index of the column that contains the predicted labels :type col_pred: int :param col_weight: the 0-based index of the (optional) column containing the weights (0-1) for the predictions, default is None (ie a weight 1 is assumed) :type col_weight: int :param delimiter: the delimiter to use for the CSV file :type delimiter: str :param quotechar: the quote character to use for the CSV file :type quotechar: str :param header: whether the CSV file has a header :type header: bool :return: the actual, predicted and weight (None if not present) columns as lists :rtype: tuple """ actual = [] predicted = [] weight = None if col_weight is None else [] with open(input_file, "r") as inputf: reader = csv.reader(inputf, delimiter=delimiter, quotechar=quotechar) first = True for row in reader: if first and header: first = False continue actual.append(row[col_act - 1]) predicted.append(row[col_pred - 1]) if col_weight is not None: weight.append(row[col_weight - 1]) return actual, predicted, weight
Python
def _cell_to_string(self, c): """ Turns the cell into a string, takes the maximum number of decimals into account. :param c: the cell value to convert :type c: object :return: the converted value :rtype: str """ if (self.max_decimals > -1) and isinstance(c, float): cell_format = "%." + str(self.max_decimals) + "f" result = str(cell_format % c) else: result = str(c) return result
def _cell_to_string(self, c): """ Turns the cell into a string, takes the maximum number of decimals into account. :param c: the cell value to convert :type c: object :return: the converted value :rtype: str """ if (self.max_decimals > -1) and isinstance(c, float): cell_format = "%." + str(self.max_decimals) + "f" result = str(cell_format % c) else: result = str(c) return result
Python
def to_raw(self): """ Returns the underlying data as list of list (row-based). :return: the underlying data :rtype: list """ return self.data
def to_raw(self): """ Returns the underlying data as list of list (row-based). :return: the underlying data :rtype: list """ return self.data
Python
def to_list(self): """ Returns the matrix result as list of list (row-based). :return: the result :rtype: list """ result = [] for r in self.data: r_new = [] for c in r: r_new.append(self._cell_to_string(c)) result.append(r_new) return result
def to_list(self): """ Returns the matrix result as list of list (row-based). :return: the result :rtype: list """ result = [] for r in self.data: r_new = [] for c in r: r_new.append(self._cell_to_string(c)) result.append(r_new) return result
Python
def to_csv(self, output_file=None, delimiter=",", quotechar="\"", quoting=csv.QUOTE_MINIMAL): """ Writes the matrix to a CSV file. :param output_file: the CSV file to create :type output_file: str :param delimiter: the cell delimiter to use (default: ,) :type delimiter: str :param quotechar: the quoting character to use (default: ") :type quotechar: str :param quoting: the type of quoting to perform, default is csv.QUOTE_MINIMAL :type quoting: int :return: None if output file provided, otherwise the generated CSV output :rtype: str """ rows = self.to_list() if output_file is None: outputf = StringIO() writer = csv.writer(outputf, delimiter=delimiter, quotechar=quotechar, quoting=quoting) for r in rows: writer.writerow(r) return outputf.getvalue() else: with open(output_file, "w") as outputf: writer = csv.writer(outputf, delimiter=delimiter, quotechar=quotechar, quoting=quoting) for r in rows: writer.writerow(r) return None
def to_csv(self, output_file=None, delimiter=",", quotechar="\"", quoting=csv.QUOTE_MINIMAL): """ Writes the matrix to a CSV file. :param output_file: the CSV file to create :type output_file: str :param delimiter: the cell delimiter to use (default: ,) :type delimiter: str :param quotechar: the quoting character to use (default: ") :type quotechar: str :param quoting: the type of quoting to perform, default is csv.QUOTE_MINIMAL :type quoting: int :return: None if output file provided, otherwise the generated CSV output :rtype: str """ rows = self.to_list() if output_file is None: outputf = StringIO() writer = csv.writer(outputf, delimiter=delimiter, quotechar=quotechar, quoting=quoting) for r in rows: writer.writerow(r) return outputf.getvalue() else: with open(output_file, "w") as outputf: writer = csv.writer(outputf, delimiter=delimiter, quotechar=quotechar, quoting=quoting) for r in rows: writer.writerow(r) return None
Python
def to_plaintext(self, output_file=None): """ Turns the matrix result into plain text. :param output_file: the CSV file to create :type output_file: str :return: None if output file provided, otherwise the generated plain text output :rtype: str """ widths = [] num_rows = len(self.data) num_cols = 0 for r in self.data: if len(widths) == 0: num_cols = len(r) for i in range(num_cols): widths.append(0) for x in range(num_cols): widths[x] = max(widths[x], len(self._cell_to_string(r[x]))) result = "" for y in range(num_rows): r = self.data[y] for x in range(num_cols): value = self._cell_to_string(r[x]) if (x == 0) or (y == 0): result += value.ljust(widths[x]) else: result += value.rjust(widths[x]) if x < num_cols - 1: result += self.separator result += "\n" if output_file is not None: with open(output_file, "w") as outputf: outputf.write(result) return None else: return result
def to_plaintext(self, output_file=None): """ Turns the matrix result into plain text. :param output_file: the CSV file to create :type output_file: str :return: None if output file provided, otherwise the generated plain text output :rtype: str """ widths = [] num_rows = len(self.data) num_cols = 0 for r in self.data: if len(widths) == 0: num_cols = len(r) for i in range(num_cols): widths.append(0) for x in range(num_cols): widths[x] = max(widths[x], len(self._cell_to_string(r[x]))) result = "" for y in range(num_rows): r = self.data[y] for x in range(num_cols): value = self._cell_to_string(r[x]) if (x == 0) or (y == 0): result += value.ljust(widths[x]) else: result += value.rjust(widths[x]) if x < num_cols - 1: result += self.separator result += "\n" if output_file is not None: with open(output_file, "w") as outputf: outputf.write(result) return None else: return result
Python
def generate(self, matrix_type=MatrixType.COUNTS, max_decimals=3): """ Generates the confusion matrix and returns it as list of list (row-based). :param matrix_type: the type of matrix to generate :type matrix_type: MatrixType :param max_decimals: the maximum decimals after the decimal point to use for float values, -1 for no restrictions :type max_decimals: int :return: the generated matrix :rtype: MatrixResult """ result = [] indices = dict() for i, l in enumerate(self.labels): indices[l] = i + 1 # header row = [self.corner] for l in self.labels: row.append(self.predicted_prefix + l) result.append(row) # data for l in self.labels: row = [self.actual_prefix + l] for i in range(len(self.labels)): row.append(0) result.append(row) # fill in counts for i in range(len(self.actual)): index_act = indices[self.actual[i]] index_pred = indices[self.predicted[i]] weight = 1 if self.weight is None else self.weight[i] result[index_act][index_pred] += weight # post-process cells? if matrix_type == MatrixType.PERCENTAGES: sum = 0.0 for y in range(len(self.labels)): for x in range(len(self.labels)): sum += result[y+1][x+1] if sum > 0: for y in range(len(self.labels)): for x in range(len(self.labels)): result[y + 1][x + 1] /= sum elif matrix_type == MatrixType.PERCENTAGES_PER_ROW: for y in range(len(self.labels)): sum = 0 for x in range(len(self.labels)): sum += result[y+1][x+1] for x in range(len(self.labels)): result[y + 1][x + 1] /= sum return MatrixResult(result, max_decimals=max_decimals)
def generate(self, matrix_type=MatrixType.COUNTS, max_decimals=3): """ Generates the confusion matrix and returns it as list of list (row-based). :param matrix_type: the type of matrix to generate :type matrix_type: MatrixType :param max_decimals: the maximum decimals after the decimal point to use for float values, -1 for no restrictions :type max_decimals: int :return: the generated matrix :rtype: MatrixResult """ result = [] indices = dict() for i, l in enumerate(self.labels): indices[l] = i + 1 # header row = [self.corner] for l in self.labels: row.append(self.predicted_prefix + l) result.append(row) # data for l in self.labels: row = [self.actual_prefix + l] for i in range(len(self.labels)): row.append(0) result.append(row) # fill in counts for i in range(len(self.actual)): index_act = indices[self.actual[i]] index_pred = indices[self.predicted[i]] weight = 1 if self.weight is None else self.weight[i] result[index_act][index_pred] += weight # post-process cells? if matrix_type == MatrixType.PERCENTAGES: sum = 0.0 for y in range(len(self.labels)): for x in range(len(self.labels)): sum += result[y+1][x+1] if sum > 0: for y in range(len(self.labels)): for x in range(len(self.labels)): result[y + 1][x + 1] /= sum elif matrix_type == MatrixType.PERCENTAGES_PER_ROW: for y in range(len(self.labels)): sum = 0 for x in range(len(self.labels)): sum += result[y+1][x+1] for x in range(len(self.labels)): result[y + 1][x + 1] /= sum return MatrixResult(result, max_decimals=max_decimals)
Python
def generate(input_file, output_file, col_act, col_pred, col_weight=None, matrix_type=MatrixType.COUNTS, delimiter=",", quotechar="\"", header=True, labels=None, prefix_act="a: ", prefix_pred="p: ", corner="x", output_format=OutputFormat.CSV, max_decimals=3): """ Generates the confusion matrix from the CSV file. Outputs the result on stdout if no output file provided. :param input_file: the CSV file to load actual/predicted labels from :type input_file: str :param output_file: the (optional) CSV file to write the matrix to :type output_file: str :param col_act: the 1-based index of the column that contains the actual/ground truth labels :type col_act: int :param col_pred: the 1-based index of the column that contains the predicted labels :type col_pred: int :param col_weight: the 1-based index of the (optional) column containing the weight (0-1) for the predictions, default is None :type col_weight: int :param matrix_type: the type of matrix to generate :type matrix_type: MatrixType :param delimiter: the delimiter to use for the CSV file :type delimiter: str :param quotechar: the quote character to use for the CSV file :type quotechar: str :param header: whether the CSV file has a header :type header: bool :param labels: the (optional) list of predefined labels to use :type labels: list :param prefix_act: the prefix to use for the the actual cells (left column) :type prefix_act: str :param prefix_pred: the prefix to use for the the predicted cells (top row) :type prefix_pred: str :param corner: the text to print in the top-left corner :type corner: str :param output_format: the format to use when writing to a file (csv|plaintext) :type output_format: OutputFormat :param max_decimals: the maximum decimals after the decimal point to use in case of float values, -1 for no restrictions :type max_decimals: int """ actual, predicted, weight = load_csv(input_file, col_act, col_pred, col_weight=col_weight, delimiter=delimiter, quotechar=quotechar, header=header) matrix = ConfusionMatrix(actual, predicted, weight=weight, labels=labels, actual_prefix=prefix_act, predicted_prefix=prefix_pred, corner=corner) result = matrix.generate(matrix_type=matrix_type, max_decimals=max_decimals) if output_file is None: print(result.to_plaintext()) else: with open(output_file, "w") as outputf: if output_format == OutputFormat.CSV: result.to_csv(output_file, delimiter=delimiter, quotechar=quotechar, quoting=csv.QUOTE_MINIMAL) elif output_format == OutputFormat.PLAINTEXT: outputf.write(result.to_plaintext()) else: raise Exception("Unhandled output format: " + str(output_format))
def generate(input_file, output_file, col_act, col_pred, col_weight=None, matrix_type=MatrixType.COUNTS, delimiter=",", quotechar="\"", header=True, labels=None, prefix_act="a: ", prefix_pred="p: ", corner="x", output_format=OutputFormat.CSV, max_decimals=3): """ Generates the confusion matrix from the CSV file. Outputs the result on stdout if no output file provided. :param input_file: the CSV file to load actual/predicted labels from :type input_file: str :param output_file: the (optional) CSV file to write the matrix to :type output_file: str :param col_act: the 1-based index of the column that contains the actual/ground truth labels :type col_act: int :param col_pred: the 1-based index of the column that contains the predicted labels :type col_pred: int :param col_weight: the 1-based index of the (optional) column containing the weight (0-1) for the predictions, default is None :type col_weight: int :param matrix_type: the type of matrix to generate :type matrix_type: MatrixType :param delimiter: the delimiter to use for the CSV file :type delimiter: str :param quotechar: the quote character to use for the CSV file :type quotechar: str :param header: whether the CSV file has a header :type header: bool :param labels: the (optional) list of predefined labels to use :type labels: list :param prefix_act: the prefix to use for the the actual cells (left column) :type prefix_act: str :param prefix_pred: the prefix to use for the the predicted cells (top row) :type prefix_pred: str :param corner: the text to print in the top-left corner :type corner: str :param output_format: the format to use when writing to a file (csv|plaintext) :type output_format: OutputFormat :param max_decimals: the maximum decimals after the decimal point to use in case of float values, -1 for no restrictions :type max_decimals: int """ actual, predicted, weight = load_csv(input_file, col_act, col_pred, col_weight=col_weight, delimiter=delimiter, quotechar=quotechar, header=header) matrix = ConfusionMatrix(actual, predicted, weight=weight, labels=labels, actual_prefix=prefix_act, predicted_prefix=prefix_pred, corner=corner) result = matrix.generate(matrix_type=matrix_type, max_decimals=max_decimals) if output_file is None: print(result.to_plaintext()) else: with open(output_file, "w") as outputf: if output_format == OutputFormat.CSV: result.to_csv(output_file, delimiter=delimiter, quotechar=quotechar, quoting=csv.QUOTE_MINIMAL) elif output_format == OutputFormat.PLAINTEXT: outputf.write(result.to_plaintext()) else: raise Exception("Unhandled output format: " + str(output_format))
Python
def main(args=None): """ Performs the matrix generation. Use -h to see all options. :param args: the command-line arguments to use, uses sys.argv if None :type args: list """ parser = argparse.ArgumentParser( description='Generates a confusion matrix from a CSV file with actual/predicted label columns.', prog="scm-generate", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("-i", "--input", dest="input_file", metavar="FILE", required=True, help="the CSV file to load the actual/predicted labels from") parser.add_argument("-d", "--delimiter", dest="delimiter", metavar="DELIMITER", required=False, default=",", help="the column delimited in the CSV file") parser.add_argument("-q", "--quotechar", dest="quotechar", metavar="CHAR", required=False, default="\"", help="the quote character to use in the CSV file") parser.add_argument("-H", "--no_header", action="store_false", dest="header", help="whether the CSV file has no header row") parser.add_argument("-o", "--output", dest="output_file", metavar="FILE", required=False, help="the optional CSV file to write the generated matrix to") parser.add_argument("-O", "--output_format", dest="output_format", metavar="FORMAT", required=False, default=OutputFormat.CSV, choices=list(OutputFormat), type=OutputFormat.argparse, help="the output format to use when writing to the output file") parser.add_argument("-a", "--actual", dest="col_act", metavar="COL", required=False, default=1, type=int, help="the 1-based column index for the actual/ground truth labels") parser.add_argument("-A", "--actual_prefix", dest="prefix_act", metavar="PREFIX", required=False, default="a: ", type=str, help="the prefix to use for the labels depicted in the 'actual' labels column") parser.add_argument("-p", "--predicted", dest="col_pred", metavar="COL", required=False, default=2, type=int, help="the 1-based column index for the predicted labels") parser.add_argument("-P", "--predicted_prefix", dest="prefix_pred", metavar="PREFIX", required=False, default="p: ", type=str, help="the prefix to use for the labels depicted in the 'predicted' labels row") parser.add_argument("-w", "--weight", dest="col_weight", metavar="COL", required=False, default=None, type=int, help="the 1-based column index for the weight (0-1) of the predicted label") parser.add_argument("-l", "--labels", dest="labels", metavar="LABELS", required=False, default=None, type=str, help="comma-separated list of predefined labels to use (eg if not all labels present in CSV file)") parser.add_argument("-C", "--corner", dest="corner", metavar="CORNER", required=False, default="x", type=str, help="the text to print in the top-left corner") parser.add_argument("-D", "--max_decimals", dest="max_decimals", metavar="NUM", required=False, default=3, type=int, help="the maximum number of decimals after the decimal point to use in case of float values like percentages") parser.add_argument("-t", "--matrix_type", dest="matrix_type", metavar="TYPE", required=False, default=MatrixType.COUNTS, choices=list(MatrixType), type=MatrixType.argparse, help="the type of matrix to generate") parsed = parser.parse_args(args=args) labels = None if parsed.labels is None else parsed.labels.split(",") generate(parsed.input_file, parsed.output_file, parsed.col_act, parsed.col_pred, col_weight=parsed.col_weight, matrix_type=parsed.matrix_type, delimiter=parsed.delimiter, quotechar=parsed.quotechar, header=parsed.header, labels=labels, prefix_act=parsed.prefix_act, prefix_pred=parsed.prefix_pred, corner=parsed.corner, output_format=parsed.output_format, max_decimals=parsed.max_decimals)
def main(args=None): """ Performs the matrix generation. Use -h to see all options. :param args: the command-line arguments to use, uses sys.argv if None :type args: list """ parser = argparse.ArgumentParser( description='Generates a confusion matrix from a CSV file with actual/predicted label columns.', prog="scm-generate", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("-i", "--input", dest="input_file", metavar="FILE", required=True, help="the CSV file to load the actual/predicted labels from") parser.add_argument("-d", "--delimiter", dest="delimiter", metavar="DELIMITER", required=False, default=",", help="the column delimited in the CSV file") parser.add_argument("-q", "--quotechar", dest="quotechar", metavar="CHAR", required=False, default="\"", help="the quote character to use in the CSV file") parser.add_argument("-H", "--no_header", action="store_false", dest="header", help="whether the CSV file has no header row") parser.add_argument("-o", "--output", dest="output_file", metavar="FILE", required=False, help="the optional CSV file to write the generated matrix to") parser.add_argument("-O", "--output_format", dest="output_format", metavar="FORMAT", required=False, default=OutputFormat.CSV, choices=list(OutputFormat), type=OutputFormat.argparse, help="the output format to use when writing to the output file") parser.add_argument("-a", "--actual", dest="col_act", metavar="COL", required=False, default=1, type=int, help="the 1-based column index for the actual/ground truth labels") parser.add_argument("-A", "--actual_prefix", dest="prefix_act", metavar="PREFIX", required=False, default="a: ", type=str, help="the prefix to use for the labels depicted in the 'actual' labels column") parser.add_argument("-p", "--predicted", dest="col_pred", metavar="COL", required=False, default=2, type=int, help="the 1-based column index for the predicted labels") parser.add_argument("-P", "--predicted_prefix", dest="prefix_pred", metavar="PREFIX", required=False, default="p: ", type=str, help="the prefix to use for the labels depicted in the 'predicted' labels row") parser.add_argument("-w", "--weight", dest="col_weight", metavar="COL", required=False, default=None, type=int, help="the 1-based column index for the weight (0-1) of the predicted label") parser.add_argument("-l", "--labels", dest="labels", metavar="LABELS", required=False, default=None, type=str, help="comma-separated list of predefined labels to use (eg if not all labels present in CSV file)") parser.add_argument("-C", "--corner", dest="corner", metavar="CORNER", required=False, default="x", type=str, help="the text to print in the top-left corner") parser.add_argument("-D", "--max_decimals", dest="max_decimals", metavar="NUM", required=False, default=3, type=int, help="the maximum number of decimals after the decimal point to use in case of float values like percentages") parser.add_argument("-t", "--matrix_type", dest="matrix_type", metavar="TYPE", required=False, default=MatrixType.COUNTS, choices=list(MatrixType), type=MatrixType.argparse, help="the type of matrix to generate") parsed = parser.parse_args(args=args) labels = None if parsed.labels is None else parsed.labels.split(",") generate(parsed.input_file, parsed.output_file, parsed.col_act, parsed.col_pred, col_weight=parsed.col_weight, matrix_type=parsed.matrix_type, delimiter=parsed.delimiter, quotechar=parsed.quotechar, header=parsed.header, labels=labels, prefix_act=parsed.prefix_act, prefix_pred=parsed.prefix_pred, corner=parsed.corner, output_format=parsed.output_format, max_decimals=parsed.max_decimals)
Python
def _parse_arguments(desc, theargs): """Parses command line arguments using argparse """ help_formatter = argparse.RawDescriptionHelpFormatter parser = argparse.ArgumentParser(description=desc, formatter_class=help_formatter) parser.add_argument("--ownerid", default='898082745236', help="Owner id to pass to search " + "(default 898082745236)") parser.add_argument('--namefilter', default='Deep Learning AMI with Source Code Ubuntu v5.0', help='Find only AMI image with this string in name' + ' (default: Deep Learning AMI with Source Code)' + ' Ubuntu v5.0') return parser.parse_args(theargs)
def _parse_arguments(desc, theargs): """Parses command line arguments using argparse """ help_formatter = argparse.RawDescriptionHelpFormatter parser = argparse.ArgumentParser(description=desc, formatter_class=help_formatter) parser.add_argument("--ownerid", default='898082745236', help="Owner id to pass to search " + "(default 898082745236)") parser.add_argument('--namefilter', default='Deep Learning AMI with Source Code Ubuntu v5.0', help='Find only AMI image with this string in name' + ' (default: Deep Learning AMI with Source Code)' + ' Ubuntu v5.0') return parser.parse_args(theargs)
Python
def _get_ami_mapping(theargs): """Returns a string containing ami mapping """ mapstr = '' ec2 = boto3.client('ec2') response = ec2.describe_regions() for region in response['Regions']: rname = region['RegionName'] sys.stdout.write('Running query in region: ' + rname + '\n') ec2 = boto3.client('ec2', region_name=rname) resp = ec2.describe_images(Owners=[theargs.ownerid], Filters=[{'Name': 'name', 'Values': [theargs.namefilter]}]) for image in resp['Images']: mapstr += (' "' + rname + '" : {"AMI" : "' + image['ImageId'] + '"},\n') sys.stdout.write('\n\n Below is json fragment that can ' + 'go in "RegionMap"\n\n') return mapstr
def _get_ami_mapping(theargs): """Returns a string containing ami mapping """ mapstr = '' ec2 = boto3.client('ec2') response = ec2.describe_regions() for region in response['Regions']: rname = region['RegionName'] sys.stdout.write('Running query in region: ' + rname + '\n') ec2 = boto3.client('ec2', region_name=rname) resp = ec2.describe_images(Owners=[theargs.ownerid], Filters=[{'Name': 'name', 'Values': [theargs.namefilter]}]) for image in resp['Images']: mapstr += (' "' + rname + '" : {"AMI" : "' + image['ImageId'] + '"},\n') sys.stdout.write('\n\n Below is json fragment that can ' + 'go in "RegionMap"\n\n') return mapstr
Python
def createWindow(name): """Creates a new window (and sets it active).""" global _globalLock,_frontend,_vis,_window_title,_current_worlds,_windows,_current_window _globalLock.acquire() if len(_windows) == 0: #save the defaults in window 0 _windows.append(WindowInfo(_window_title,_frontend,_vis)) _windows[-1].worlds = _current_worlds _windows[-1].active_worlds = _current_worlds[:] #make a new window _window_title = name _frontend = GLPluginProgram() _vis = VisualizationPlugin() _frontend.setPlugin(_vis) _windows.append(WindowInfo(_window_title,_frontend,_vis)) _current_worlds = [] id = len(_windows)-1 _current_window = id _globalLock.release() return id
def createWindow(name): """Creates a new window (and sets it active).""" global _globalLock,_frontend,_vis,_window_title,_current_worlds,_windows,_current_window _globalLock.acquire() if len(_windows) == 0: #save the defaults in window 0 _windows.append(WindowInfo(_window_title,_frontend,_vis)) _windows[-1].worlds = _current_worlds _windows[-1].active_worlds = _current_worlds[:] #make a new window _window_title = name _frontend = GLPluginProgram() _vis = VisualizationPlugin() _frontend.setPlugin(_vis) _windows.append(WindowInfo(_window_title,_frontend,_vis)) _current_worlds = [] id = len(_windows)-1 _current_window = id _globalLock.release() return id
Python
def pushPlugin(plugin): """Adds a new glinterface.GLPluginInterface plugin on top of the old one.""" global _globalLock,_frontend _globalLock.acquire() assert isinstance(_frontend,GLPluginProgram),"Can't push a plugin after addPlugin" if len(_frontend.plugins) == 0: global _vis if _vis==None: raise RuntimeError("Visualization disabled") _frontend.setPlugin(_vis) _frontend.pushPlugin(plugin) _onFrontendChange() _globalLock.release()
def pushPlugin(plugin): """Adds a new glinterface.GLPluginInterface plugin on top of the old one.""" global _globalLock,_frontend _globalLock.acquire() assert isinstance(_frontend,GLPluginProgram),"Can't push a plugin after addPlugin" if len(_frontend.plugins) == 0: global _vis if _vis==None: raise RuntimeError("Visualization disabled") _frontend.setPlugin(_vis) _frontend.pushPlugin(plugin) _onFrontendChange() _globalLock.release()
Python
def popPlugin(): """Reverses a prior pushPlugin() call""" global _frontend _globalLock.acquire() _frontend.popPlugin() _onFrontendChange() _globalLock.release()
def popPlugin(): """Reverses a prior pushPlugin() call""" global _frontend _globalLock.acquire() _frontend.popPlugin() _onFrontendChange() _globalLock.release()
Python
def addPlugin(plugin): """Adds a second OpenGL viewport in the same window, governed by the given plugin (a glinterface.GLPluginInterface instance).""" global _frontend _globalLock.acquire() #create a multi-view widget if isinstance(_frontend,glcommon.GLMultiViewportProgram): _frontend.addView(plugin) else: if len(_frontend.plugins) == 0: setPlugin(None) multiProgram = glcommon.GLMultiViewportProgram() multiProgram.window = None if _current_window != None: if _windows[_current_window].glwindow != None: multiProgram.window = _windows[_current_window].glwindow multiProgram.addView(_frontend) multiProgram.addView(plugin) multiProgram.name = _window_title _frontend = multiProgram _onFrontendChange() _globalLock.release()
def addPlugin(plugin): """Adds a second OpenGL viewport in the same window, governed by the given plugin (a glinterface.GLPluginInterface instance).""" global _frontend _globalLock.acquire() #create a multi-view widget if isinstance(_frontend,glcommon.GLMultiViewportProgram): _frontend.addView(plugin) else: if len(_frontend.plugins) == 0: setPlugin(None) multiProgram = glcommon.GLMultiViewportProgram() multiProgram.window = None if _current_window != None: if _windows[_current_window].glwindow != None: multiProgram.window = _windows[_current_window].glwindow multiProgram.addView(_frontend) multiProgram.addView(plugin) multiProgram.name = _window_title _frontend = multiProgram _onFrontendChange() _globalLock.release()
Python
def run(plugin=None): """A blocking call to start a single window and then kill the visualization when closed. If plugin == None, the default visualization is used. Otherwise, plugin is a glinterface.GLPluginInterface object, and it is used.""" setPlugin(plugin) show() while shown(): time.sleep(0.1) setPlugin(None) kill()
def run(plugin=None): """A blocking call to start a single window and then kill the visualization when closed. If plugin == None, the default visualization is used. Otherwise, plugin is a glinterface.GLPluginInterface object, and it is used.""" setPlugin(plugin) show() while shown(): time.sleep(0.1) setPlugin(None) kill()
Python
def kill(): """This should be called at the end of the calling program to cleanly terminate the visualization thread""" global _vis,_globalLock if _vis==None: print "vis.kill() Visualization disabled" return _kill()
def kill(): """This should be called at the end of the calling program to cleanly terminate the visualization thread""" global _vis,_globalLock if _vis==None: print "vis.kill() Visualization disabled" return _kill()
Python
def show(display=True): """Shows or hides the current window""" _globalLock.acquire() if display: _show() else: _hide() _globalLock.release()
def show(display=True): """Shows or hides the current window""" _globalLock.acquire() if display: _show() else: _hide() _globalLock.release()
Python
def spin(duration): """Spin-shows a window for a certain duration or until the window is closed.""" show() t = 0 while t < duration: if not shown(): break time.sleep(min(0.04,duration-t)) t += 0.04 show(False) return
def spin(duration): """Spin-shows a window for a certain duration or until the window is closed.""" show() t = 0 while t < duration: if not shown(): break time.sleep(min(0.04,duration-t)) t += 0.04 show(False) return
Python
def lock(): """Begins a locked section. Needs to be called any time you modify a visualization item outside of the visualization thread. unlock() must be called to let the visualization thread proceed.""" global _globalLock _globalLock.acquire()
def lock(): """Begins a locked section. Needs to be called any time you modify a visualization item outside of the visualization thread. unlock() must be called to let the visualization thread proceed.""" global _globalLock _globalLock.acquire()
Python
def unlock(): """Ends a locked section acquired by lock().""" global _globalLock,_windows for w in _windows: if w.glwindow: w.doRefresh = True _globalLock.release()
def unlock(): """Ends a locked section acquired by lock().""" global _globalLock,_windows for w in _windows: if w.glwindow: w.doRefresh = True _globalLock.release()
Python
def shown(): """Returns true if a visualization window is currently shown.""" global _globalLock,_thread_running,_current_window _globalLock.acquire() res = (_thread_running and _current_window != None and _windows[_current_window].mode in ['shown','dialog'] or _windows[_current_window].guidata is not None) _globalLock.release() return res
def shown(): """Returns true if a visualization window is currently shown.""" global _globalLock,_thread_running,_current_window _globalLock.acquire() res = (_thread_running and _current_window != None and _windows[_current_window].mode in ['shown','dialog'] or _windows[_current_window].guidata is not None) _globalLock.release() return res
Python
def customUI(func): """Tells the next created window/dialog to use a custom UI function. func is a 1-argument function that takes a QtWindow or GLUTWindow as its argument.""" global _globalLock _globalLock.acquire() _set_custom_ui(func) _globalLock.release()
def customUI(func): """Tells the next created window/dialog to use a custom UI function. func is a 1-argument function that takes a QtWindow or GLUTWindow as its argument.""" global _globalLock _globalLock.acquire() _set_custom_ui(func) _globalLock.release()
Python
def add(name,item,keepAppearance=False): """Adds an item to the visualization. name is a unique identifier. If an item with the same name already exists, it will no longer be shown. If keepAppearance=True, then the prior item's appearance will be kept, if a prior item exists.""" global _vis if _vis==None: print "Visualization disabled" return _globalLock.acquire() _checkWindowCurrent(item) _globalLock.release() _vis.add(name,item,keepAppearance)
def add(name,item,keepAppearance=False): """Adds an item to the visualization. name is a unique identifier. If an item with the same name already exists, it will no longer be shown. If keepAppearance=True, then the prior item's appearance will be kept, if a prior item exists.""" global _vis if _vis==None: print "Visualization disabled" return _globalLock.acquire() _checkWindowCurrent(item) _globalLock.release() _vis.add(name,item,keepAppearance)
Python
def dirty(item_name='all'): """Marks the given item as dirty and recreates the OpenGL display lists. You may need to call this if you modify an item's geometry, for example. If things start disappearing from your world when you create a new window, you may need to call this too.""" global _vis if _vis==None: print "Visualization disabled" return _vis.dirty(item_name)
def dirty(item_name='all'): """Marks the given item as dirty and recreates the OpenGL display lists. You may need to call this if you modify an item's geometry, for example. If things start disappearing from your world when you create a new window, you may need to call this too.""" global _vis if _vis==None: print "Visualization disabled" return _vis.dirty(item_name)
Python
def animate(name,animation,speed=1.0,endBehavior='loop'): """Sends an animation to the named object. Works with points, so3 elements, se3 elements, rigid objects, or robots, and may work with other objects as well. Parameters: - animation: may be a Trajectory or a list of configurations. - speed: a modulator on the animation speed. If the animation is a list of milestones, it is by default run at 1 milestone per second. - endBehavior: either 'loop' (animation repeats forever) or 'halt' (plays once). """ global _vis if _vis==None: print "Visualization disabled" return _vis.animate(name,animation,speed,endBehavior)
def animate(name,animation,speed=1.0,endBehavior='loop'): """Sends an animation to the named object. Works with points, so3 elements, se3 elements, rigid objects, or robots, and may work with other objects as well. Parameters: - animation: may be a Trajectory or a list of configurations. - speed: a modulator on the animation speed. If the animation is a list of milestones, it is by default run at 1 milestone per second. - endBehavior: either 'loop' (animation repeats forever) or 'halt' (plays once). """ global _vis if _vis==None: print "Visualization disabled" return _vis.animate(name,animation,speed,endBehavior)
Python
def edit(name,doedit=True): """Turns on/off visual editing of some item. Only points, transforms, coordinate.Point's, coordinate.Transform's, coordinate.Frame's, robots, and objects are currently accepted.""" global _vis if _vis==None: return _vis.edit(name,doedit)
def edit(name,doedit=True): """Turns on/off visual editing of some item. Only points, transforms, coordinate.Point's, coordinate.Transform's, coordinate.Frame's, robots, and objects are currently accepted.""" global _vis if _vis==None: return _vis.edit(name,doedit)
Python
def addText(name,text,pos=None): """Adds text to the visualizer. You must give an identifier to all pieces of text, which will be used to access the text as any other vis object. Parameters: - name: the text's unique identifier. - text: the string to be drawn - pos: the position of the string. If pos=None, this is added to the on-screen "console" display. If pos has length 2, it is the (x,y) position of the upper left corner of the text on the screen. Negative units anchor the text to the right or bottom of the window. If pos has length 3, the text is drawn in the world coordinates. To customize the text appearance, you can set the color, 'size' attribute, and 'position' attribute of the text using the identifier given in 'name'. """ global _vis _vis.add(name,text,True) if pos is not None: _vis.setAttribute(name,'position',pos)
def addText(name,text,pos=None): """Adds text to the visualizer. You must give an identifier to all pieces of text, which will be used to access the text as any other vis object. Parameters: - name: the text's unique identifier. - text: the string to be drawn - pos: the position of the string. If pos=None, this is added to the on-screen "console" display. If pos has length 2, it is the (x,y) position of the upper left corner of the text on the screen. Negative units anchor the text to the right or bottom of the window. If pos has length 3, the text is drawn in the world coordinates. To customize the text appearance, you can set the color, 'size' attribute, and 'position' attribute of the text using the identifier given in 'name'. """ global _vis _vis.add(name,text,True) if pos is not None: _vis.setAttribute(name,'position',pos)
Python
def clearText(): """Clears all text in the visualization.""" global _vis if _vis==None: return _vis.clearText()
def clearText(): """Clears all text in the visualization.""" global _vis if _vis==None: return _vis.clearText()
Python
def logPlot(name,itemname,value): """Logs a custom visualization item to a plot""" global _vis if _vis==None: return _vis.logPlot(name,itemname,value)
def logPlot(name,itemname,value): """Logs a custom visualization item to a plot""" global _vis if _vis==None: return _vis.logPlot(name,itemname,value)
Python
def logPlotEvent(name,eventname,color=None): """Logs an event on the plot.""" global _vis if _vis==None: return _vis.logPlotEvent(name,eventname,color)
def logPlotEvent(name,eventname,color=None): """Logs an event on the plot.""" global _vis if _vis==None: return _vis.logPlotEvent(name,eventname,color)
Python
def updateAnimation(self,t): """Updates the configuration, if it's being animated""" if not self.animation: self.drawConfig = None else: u = self.animationSpeed*(t-self.animationStartTime) q = self.animation.eval(u,self.animationEndBehavior) self.drawConfig = q for n,app in self.subAppearances.iteritems(): app.updateAnimation(t)
def updateAnimation(self,t): """Updates the configuration, if it's being animated""" if not self.animation: self.drawConfig = None else: u = self.animationSpeed*(t-self.animationStartTime) q = self.animation.eval(u,self.animationEndBehavior) self.drawConfig = q for n,app in self.subAppearances.iteritems(): app.updateAnimation(t)
Python
def swapDrawConfig(self): """Given self.drawConfig!=None, swaps out the item's curren configuration with self.drawConfig. Used for animations""" if self.drawConfig: try: newDrawConfig = config.getConfig(self.item) #self.item = config.setConfig(self.item,self.drawConfig) self.drawConfig = newDrawConfig except Exception as e: print "Warning, exception thrown during animation update. Probably have incorrect length of configuration" import traceback traceback.print_exc() pass for n,app in self.subAppearances.iteritems(): app.swapDrawConfig()
def swapDrawConfig(self): """Given self.drawConfig!=None, swaps out the item's curren configuration with self.drawConfig. Used for animations""" if self.drawConfig: try: newDrawConfig = config.getConfig(self.item) #self.item = config.setConfig(self.item,self.drawConfig) self.drawConfig = newDrawConfig except Exception as e: print "Warning, exception thrown during animation update. Probably have incorrect length of configuration" import traceback traceback.print_exc() pass for n,app in self.subAppearances.iteritems(): app.swapDrawConfig()
Python
def dirty(self,item_name='all'): """Marks an item or everything as dirty, forcing a deep redraw.""" global _globalLock _globalLock.acquire() if item_name == 'all': for (name,itemvis) in self.items.iteritems(): itemvis.markChanged() else: self.getItem(item_name).markChanged() _globalLock.release()
def dirty(self,item_name='all'): """Marks an item or everything as dirty, forcing a deep redraw.""" global _globalLock _globalLock.acquire() if item_name == 'all': for (name,itemvis) in self.items.iteritems(): itemvis.markChanged() else: self.getItem(item_name).markChanged() _globalLock.release()
Python
def clearText(self): """Clears all text in the visualization.""" global _globalLock _globalLock.acquire() del_items = [] for (name,itemvis) in self.items.iteritems(): if isinstance(itemvis.item,str): itemvis.destroy() del_items.append(name) for n in del_items: del self.items[n] _globalLock.release()
def clearText(self): """Clears all text in the visualization.""" global _globalLock _globalLock.acquire() del_items = [] for (name,itemvis) in self.items.iteritems(): if isinstance(itemvis.item,str): itemvis.destroy() del_items.append(name) for n in del_items: del self.items[n] _globalLock.release()
Python
def listItems(self,root=None,indent=0): """Prints out all items in the visualization world.""" if root == None: for name,value in self.items.iteritems(): self.listItems(value,indent) else: if isinstance(root,str): root = self.getItem(root) if indent > 0: print " "*(indent-1), print root.name for n,v in root.subAppearances.iteritems(): self.listItems(v,indent+2)
def listItems(self,root=None,indent=0): """Prints out all items in the visualization world.""" if root == None: for name,value in self.items.iteritems(): self.listItems(value,indent) else: if isinstance(root,str): root = self.getItem(root) if indent > 0: print " "*(indent-1), print root.name for n,v in root.subAppearances.iteritems(): self.listItems(v,indent+2)
Python
def add(self,name,item,keepAppearance=False): """Adds a named item to the visualization world. If the item already exists, the appearance information will be reinitialized if keepAppearance=False (default) or be kept if keepAppearance=True.""" global _globalLock assert not isinstance(name,(list,tuple)),"Cannot add sub-path items" _globalLock.acquire() if keepAppearance and name in self.items: self.items[name].setItem(item) else: #need to erase prior item visualizer if name in self.items: self.items[name].destroy() app = VisAppearance(item,name) self.items[name] = app _globalLock.release() #self.refresh()
def add(self,name,item,keepAppearance=False): """Adds a named item to the visualization world. If the item already exists, the appearance information will be reinitialized if keepAppearance=False (default) or be kept if keepAppearance=True.""" global _globalLock assert not isinstance(name,(list,tuple)),"Cannot add sub-path items" _globalLock.acquire() if keepAppearance and name in self.items: self.items[name].setItem(item) else: #need to erase prior item visualizer if name in self.items: self.items[name].destroy() app = VisAppearance(item,name) self.items[name] = app _globalLock.release() #self.refresh()
Python
def load(type=None,directory=None): """Asks the user to open a resource file of a given type. If type is not given, all resource file types are given as options. Returns a (filename,value) pair""" fg = FileGetter('Open resource') fg.directory = directory if directory==None: fg.directory = getDirectory() if type is not None: extensions=[] for (k,v) in extensionToType.iteritems(): if v == type: extensions.append(k) extensions.append('.json') fg.filetypes.append((type,extensions)) def make_getfilename(glbackend): fg.getOpen() return None #These gymnastics are necessary because Qt can only be run in a single thread, and to be compatible #with the visualization you need to use the customUI functions old_window = vis.getWindow() global _thumbnail_window if _thumbnail_window is None: _thumbnail_window = vis.createWindow("") vis.setWindow(_thumbnail_window) vis.customUI(make_getfilename) vis.dialog() vis.customUI(None) vis.setWindow(old_window) if len(fg.result) == 0: return None if type == None: return get(str(fg.result),'auto',directory,doedit=False) return str(fg.result),get(str(fg.result),type,'',doedit=False)
def load(type=None,directory=None): """Asks the user to open a resource file of a given type. If type is not given, all resource file types are given as options. Returns a (filename,value) pair""" fg = FileGetter('Open resource') fg.directory = directory if directory==None: fg.directory = getDirectory() if type is not None: extensions=[] for (k,v) in extensionToType.iteritems(): if v == type: extensions.append(k) extensions.append('.json') fg.filetypes.append((type,extensions)) def make_getfilename(glbackend): fg.getOpen() return None #These gymnastics are necessary because Qt can only be run in a single thread, and to be compatible #with the visualization you need to use the customUI functions old_window = vis.getWindow() global _thumbnail_window if _thumbnail_window is None: _thumbnail_window = vis.createWindow("") vis.setWindow(_thumbnail_window) vis.customUI(make_getfilename) vis.dialog() vis.customUI(None) vis.setWindow(old_window) if len(fg.result) == 0: return None if type == None: return get(str(fg.result),'auto',directory,doedit=False) return str(fg.result),get(str(fg.result),type,'',doedit=False)
Python
def save(value,type='auto',directory=None): """Asks the user to save the given resource to a file of the correct type. If type='auto', the type is determined automatically. Returns the selected filename or None on cancellation.""" fg = FileGetter('Save resource') fg.directory = directory if directory==None: fg.directory = getDirectory() if type == 'auto': typelist = types.objectToTypes(value) else: typelist = [type] for type in typelist: extensions=[] for (k,v) in extensionToType.iteritems(): if v == type: extensions.append(k) extensions.append('.json') fg.filetypes.append((type,extensions)) def make_getfilename(glbackend): fg.getSave() return None #These gymnastics are necessary because Qt can only be run in a single thread, and to be compatible #with the visualization you need to use the customUI functions old_window = vis.getWindow() global _thumbnail_window if _thumbnail_window is None: _thumbnail_window = vis.createWindow("") vis.setWindow(_thumbnail_window) vis.customUI(make_getfilename) vis.dialog() vis.customUI(None) vis.setWindow(old_window) if len(fg.result) == 0: return None if set(str(fg.result),value,type,''): return str(fg.result) return None
def save(value,type='auto',directory=None): """Asks the user to save the given resource to a file of the correct type. If type='auto', the type is determined automatically. Returns the selected filename or None on cancellation.""" fg = FileGetter('Save resource') fg.directory = directory if directory==None: fg.directory = getDirectory() if type == 'auto': typelist = types.objectToTypes(value) else: typelist = [type] for type in typelist: extensions=[] for (k,v) in extensionToType.iteritems(): if v == type: extensions.append(k) extensions.append('.json') fg.filetypes.append((type,extensions)) def make_getfilename(glbackend): fg.getSave() return None #These gymnastics are necessary because Qt can only be run in a single thread, and to be compatible #with the visualization you need to use the customUI functions old_window = vis.getWindow() global _thumbnail_window if _thumbnail_window is None: _thumbnail_window = vis.createWindow("") vis.setWindow(_thumbnail_window) vis.customUI(make_getfilename) vis.dialog() vis.customUI(None) vis.setWindow(old_window) if len(fg.result) == 0: return None if set(str(fg.result),value,type,''): return str(fg.result) return None
Python
def thumbnail(value,size,type='auto',world=None,frame=None): """Retrieves an image of the given item, resized to the given size. Return value is a PIL Image if PIL is available, or just a raw RGBA memory buffer otherwise. Tip: can just take a snapshot of a world too.""" global _thumbnail_window world = _get_world(world) if isinstance(value,WorldModel): world = value value = None if type == 'auto' and value is not None: typelist = types.objectToTypes(value) if isinstance(typelist,(list,tuple)): type = typelist[0] else: type = typelist if type == None: raise ValueError("Un-recognized type") if type=='Config' and world is None and len(value)==3: type = 'Vector3' if type in ['Config','Configs','Trajectory','IKGoal']: if world is None: raise ValueError("Need a world to draw a thumbnail of type "+type) if frame != None: if type not in ['RigidTransform','Vector3','Matrix3']: raise ValueError("Can't accept frame argument for objects of type "+type) old_window = vis.getWindow() if _thumbnail_window is None: _thumbnail_window = vis.createWindow("") vis.setWindow(_thumbnail_window) assert not vis.shown() vp = vis.getViewport() vp.w,vp.h = size if vp.w < 256 or vp.h < 256: vp.w = vp.w *256 / min(vp.w,vp.h) vp.h = vp.h *256 / min(vp.w,vp.h) vis.setViewport(vp) vp = vis.getViewport() plugin = _ThumbnailPlugin(world) if world: plugin.add("world",world) if value is not None: if type == 'Config': world.robot(0).setConfig(value) else: plugin.add("item",value) plugin.autoFitCamera() vis.setPlugin(plugin) vis.show() plugin.rendered = 0 while not plugin.done: time.sleep(0.1) vis.setPlugin(None) vis.show(False) vis.setWindow(old_window) if (vp.w,vp.h) != size and plugin.image.__class__.__name__=='Image': try: from PIL import Image plugin.image.thumbnail(size,Image.ANTIALIAS) except ImportError: try: import Image plugin.image.thumbnail(size,Image.ANTIALIAS) except ImportError: # if this happens then # plugin.image is just a raw RGBA memory buffer pass return plugin.image
def thumbnail(value,size,type='auto',world=None,frame=None): """Retrieves an image of the given item, resized to the given size. Return value is a PIL Image if PIL is available, or just a raw RGBA memory buffer otherwise. Tip: can just take a snapshot of a world too.""" global _thumbnail_window world = _get_world(world) if isinstance(value,WorldModel): world = value value = None if type == 'auto' and value is not None: typelist = types.objectToTypes(value) if isinstance(typelist,(list,tuple)): type = typelist[0] else: type = typelist if type == None: raise ValueError("Un-recognized type") if type=='Config' and world is None and len(value)==3: type = 'Vector3' if type in ['Config','Configs','Trajectory','IKGoal']: if world is None: raise ValueError("Need a world to draw a thumbnail of type "+type) if frame != None: if type not in ['RigidTransform','Vector3','Matrix3']: raise ValueError("Can't accept frame argument for objects of type "+type) old_window = vis.getWindow() if _thumbnail_window is None: _thumbnail_window = vis.createWindow("") vis.setWindow(_thumbnail_window) assert not vis.shown() vp = vis.getViewport() vp.w,vp.h = size if vp.w < 256 or vp.h < 256: vp.w = vp.w *256 / min(vp.w,vp.h) vp.h = vp.h *256 / min(vp.w,vp.h) vis.setViewport(vp) vp = vis.getViewport() plugin = _ThumbnailPlugin(world) if world: plugin.add("world",world) if value is not None: if type == 'Config': world.robot(0).setConfig(value) else: plugin.add("item",value) plugin.autoFitCamera() vis.setPlugin(plugin) vis.show() plugin.rendered = 0 while not plugin.done: time.sleep(0.1) vis.setPlugin(None) vis.show(False) vis.setWindow(old_window) if (vp.w,vp.h) != size and plugin.image.__class__.__name__=='Image': try: from PIL import Image plugin.image.thumbnail(size,Image.ANTIALIAS) except ImportError: try: import Image plugin.image.thumbnail(size,Image.ANTIALIAS) except ImportError: # if this happens then # plugin.image is just a raw RGBA memory buffer pass return plugin.image
Python
def edit(name,value,type='auto',description=None,editor='visual',world=None,referenceObject=None,frame=None): """Launches an editor for the given value. Returns a pair (save,result) where save indicates what the user wanted to do with the edited value and result is the edited value. Arguments: - name: the displayed name of the edited value. Can be None, in which case 'Anonymous' is displayed - value: the value to be edited. Can be None, in which case 'type' must be specified and a default value is created. - type: the type string of the value to be edited. Usually can be auto-detected from value. - description: a descriptive string, displayed to the person editing. - editor: either 'visual' or 'console'. If 'visual', will display a GUI for visually editing the item. If 'console', the user will have to type in the value. - world: either a WorldModel instance or a string specifying a world file. This is necessary for visual editing. - referenceObject: a RobotModel or other object to which the value "refers to". For configurations and trajectories, this is the object that will be moved by the trajectory. In the case of a RigidTransform value, this can be an object or a list of objects that will be transformed by the transform. - frame: for Vector3, Matrix3, Point, Rotation, and RigidTransform types, the returned value will be given relative to this reference frame. The reference frame can be either an element of se3, an ObjectModel, a RobotModelLink, or a string indicating a named rigid element of the world. """ if name == None and type=='auto': raise RuntimeError("Cannot do an anonymous edit without the 'type' argument specified") if name == None: name = 'Anonymous' if type == 'auto': type = types.objectToTypes(value) if type is None: raise RuntimeError("Could not autodetect type of object "+name) if isinstance(type,(list,tuple)): type = type[0] if not vis.glinit._PyQtAvailable and editor=='visual': print "PyQt is not available, defaulting to console editor" editor = 'console' world = _get_world(world) if isinstance(frame,str): try: oframe = world.rigidObject(frame) frame = oframe except RuntimeError: try: if isinstance(referenceObject,RobotModel): oframe = referenceObject.link(frame) frame = oframe else: oframe = world.robot(0).link(frame) frame = oframe except RuntimeError: try: oframe = world.terrain(frame) frame = oframe except RuntimeError: raise RuntimeError('Named frame "'+frame+'" is not a valid frame') if type in ['Config','Configs','Trajectory']: if world==None and referenceObject==None: raise RuntimeError("Cannot visually edit a "+type+" resource without a world/referenceObject argument") if referenceObject==None and world.numRobots() > 0: referenceObject = world.robot(0) if value is None: value = types.make(type,referenceObject) if value == None: raise RuntimeError("Don't know how to edit objects of type "+type) if editor == 'console': return console_edit(name,value,type,description,world,frame) elif editor == 'visual': if type == 'Config': assert isinstance(referenceObject,RobotModel),"Can currently only edit Config values with a RobotModel reference object" return vis.editors.run(vis.editors.ConfigEditor(name,value,description,world,referenceObject)) elif type == 'Configs': assert isinstance(referenceObject,RobotModel),"Can currently only edit Configs values with a RobotModel reference object" return vis.editors.run(vis.editors.ConfigsEditor(name,value,description,world,referenceObject)) elif type == 'Trajectory': assert isinstance(referenceObject,RobotModel),"Can currently only edit Trajectory values with a RobotModel reference object" return vis.editors.run(vis.editors.TrajectoryEditor(name,value,description,world,referenceObject)) elif type == 'Vector3' or type == 'Point': if hasattr(frame,'getTransform'): frame = frame.getTransform() return vis.editors.run(vis.editors.PointEditor(name,value,description,world,frame)) elif type == 'RigidTransform' or type == 'Rotation': if type == 'RigidTransform' and isinstance(frame,RigidObjectModel): return vis.editors.run(vis.editors.ObjectTransformEditor(name,value,description,world,frame)) if type == 'Rotation': #convert from so3 to se3 value = [value,[0,0,0]] Tref = frame if hasattr(frame,'getTransform'): Tref = frame.getTransform() editor = vis.editors.RigidTransformEditor(name,value,description,world,Tref) if type == 'Rotation': editor.disableTranslation() #attach visualization items to the transform if isinstance(referenceObject,RobotModelLink): assert frame.index >= 0 r = frame.robot() descendant = [False]*r.numLinks() descendant[frame.index] = True for i in xrange(r.numLinks()): p = r.link(i).getParent() if p >= 0 and descendant[p]: descendant[i]=True for i in xrange(r.numLinks()): if descendant[i]: editor.attach(r.link(i)) editor.attach(frame) elif hasattr(referenceObject,'getTransform'): editor.attach(referenceObject) elif hasattr(referenceObject,'__iter__'): for i in referenceObject: editor.attach(referenceObject) #Run! if type == 'Rotation': #convert from se3 to so3 return vis.editors.run(editor)[0] else: return vis.editors.run(editor) else: raise RuntimeError("Visual editing of objects of type "+type+" not supported yet") else: raise ValueError("Invalid value for argument 'editor', must be either 'visual' or 'console'")
def edit(name,value,type='auto',description=None,editor='visual',world=None,referenceObject=None,frame=None): """Launches an editor for the given value. Returns a pair (save,result) where save indicates what the user wanted to do with the edited value and result is the edited value. Arguments: - name: the displayed name of the edited value. Can be None, in which case 'Anonymous' is displayed - value: the value to be edited. Can be None, in which case 'type' must be specified and a default value is created. - type: the type string of the value to be edited. Usually can be auto-detected from value. - description: a descriptive string, displayed to the person editing. - editor: either 'visual' or 'console'. If 'visual', will display a GUI for visually editing the item. If 'console', the user will have to type in the value. - world: either a WorldModel instance or a string specifying a world file. This is necessary for visual editing. - referenceObject: a RobotModel or other object to which the value "refers to". For configurations and trajectories, this is the object that will be moved by the trajectory. In the case of a RigidTransform value, this can be an object or a list of objects that will be transformed by the transform. - frame: for Vector3, Matrix3, Point, Rotation, and RigidTransform types, the returned value will be given relative to this reference frame. The reference frame can be either an element of se3, an ObjectModel, a RobotModelLink, or a string indicating a named rigid element of the world. """ if name == None and type=='auto': raise RuntimeError("Cannot do an anonymous edit without the 'type' argument specified") if name == None: name = 'Anonymous' if type == 'auto': type = types.objectToTypes(value) if type is None: raise RuntimeError("Could not autodetect type of object "+name) if isinstance(type,(list,tuple)): type = type[0] if not vis.glinit._PyQtAvailable and editor=='visual': print "PyQt is not available, defaulting to console editor" editor = 'console' world = _get_world(world) if isinstance(frame,str): try: oframe = world.rigidObject(frame) frame = oframe except RuntimeError: try: if isinstance(referenceObject,RobotModel): oframe = referenceObject.link(frame) frame = oframe else: oframe = world.robot(0).link(frame) frame = oframe except RuntimeError: try: oframe = world.terrain(frame) frame = oframe except RuntimeError: raise RuntimeError('Named frame "'+frame+'" is not a valid frame') if type in ['Config','Configs','Trajectory']: if world==None and referenceObject==None: raise RuntimeError("Cannot visually edit a "+type+" resource without a world/referenceObject argument") if referenceObject==None and world.numRobots() > 0: referenceObject = world.robot(0) if value is None: value = types.make(type,referenceObject) if value == None: raise RuntimeError("Don't know how to edit objects of type "+type) if editor == 'console': return console_edit(name,value,type,description,world,frame) elif editor == 'visual': if type == 'Config': assert isinstance(referenceObject,RobotModel),"Can currently only edit Config values with a RobotModel reference object" return vis.editors.run(vis.editors.ConfigEditor(name,value,description,world,referenceObject)) elif type == 'Configs': assert isinstance(referenceObject,RobotModel),"Can currently only edit Configs values with a RobotModel reference object" return vis.editors.run(vis.editors.ConfigsEditor(name,value,description,world,referenceObject)) elif type == 'Trajectory': assert isinstance(referenceObject,RobotModel),"Can currently only edit Trajectory values with a RobotModel reference object" return vis.editors.run(vis.editors.TrajectoryEditor(name,value,description,world,referenceObject)) elif type == 'Vector3' or type == 'Point': if hasattr(frame,'getTransform'): frame = frame.getTransform() return vis.editors.run(vis.editors.PointEditor(name,value,description,world,frame)) elif type == 'RigidTransform' or type == 'Rotation': if type == 'RigidTransform' and isinstance(frame,RigidObjectModel): return vis.editors.run(vis.editors.ObjectTransformEditor(name,value,description,world,frame)) if type == 'Rotation': #convert from so3 to se3 value = [value,[0,0,0]] Tref = frame if hasattr(frame,'getTransform'): Tref = frame.getTransform() editor = vis.editors.RigidTransformEditor(name,value,description,world,Tref) if type == 'Rotation': editor.disableTranslation() #attach visualization items to the transform if isinstance(referenceObject,RobotModelLink): assert frame.index >= 0 r = frame.robot() descendant = [False]*r.numLinks() descendant[frame.index] = True for i in xrange(r.numLinks()): p = r.link(i).getParent() if p >= 0 and descendant[p]: descendant[i]=True for i in xrange(r.numLinks()): if descendant[i]: editor.attach(r.link(i)) editor.attach(frame) elif hasattr(referenceObject,'getTransform'): editor.attach(referenceObject) elif hasattr(referenceObject,'__iter__'): for i in referenceObject: editor.attach(referenceObject) #Run! if type == 'Rotation': #convert from se3 to so3 return vis.editors.run(editor)[0] else: return vis.editors.run(editor) else: raise RuntimeError("Visual editing of objects of type "+type+" not supported yet") else: raise ValueError("Invalid value for argument 'editor', must be either 'visual' or 'console'")
Python
def intToBitString(a, bit = 256): """ Converts an integer to a binary string representation (without '0b') :param a: The integer to convert (integer) :param bit: The size of the representation in bit (integer) :return: Binary string representation (String) """ str = bin(a)[2:] return (bit-len(str))*"0" + str
def intToBitString(a, bit = 256): """ Converts an integer to a binary string representation (without '0b') :param a: The integer to convert (integer) :param bit: The size of the representation in bit (integer) :return: Binary string representation (String) """ str = bin(a)[2:] return (bit-len(str))*"0" + str
Python
def bitStringtoSigned(str, block_size = 32): """ Inserts each :block_size: step a zero in order to represent a string of :block_size: signed integer in binary format from the original string (loss of information) :param str: Original string of the integer in binary format (String) :param block_size: step for the insertion of 0 (Integer) :return: string of block_size signed integer in binary format (String) """ signed_str = "" cpt = 0 reverse_str = str[::-1] for i in range(0, len(str), block_size-1): cpt += 1 signed_str += reverse_str[i:i+block_size-1] + '0' signed_str = signed_str[::-1] signed_str = signed_str[cpt:] return signed_str
def bitStringtoSigned(str, block_size = 32): """ Inserts each :block_size: step a zero in order to represent a string of :block_size: signed integer in binary format from the original string (loss of information) :param str: Original string of the integer in binary format (String) :param block_size: step for the insertion of 0 (Integer) :return: string of block_size signed integer in binary format (String) """ signed_str = "" cpt = 0 reverse_str = str[::-1] for i in range(0, len(str), block_size-1): cpt += 1 signed_str += reverse_str[i:i+block_size-1] + '0' signed_str = signed_str[::-1] signed_str = signed_str[cpt:] return signed_str
Python
def convertBigIntToIntArray(big_int, bit = 256, block_size = 32): """ Converts a big integer into an array representation. Each array is coded on 2^:block_size:-1 bits and the representation has a total length of :bit: bits e.g. [0, 0, 0, 0, 0, 0, 500, 2] :param big_int: The integer you want to convert (Integer) :param bit: The length in bits of the representation (Integer) :param block_size: The initial size of each integer in the final array in bits (Integer) :return: An array representation of the integer (Integer Array) """ str = intToBitString(big_int, bit) int_array = bitStringToIntArray(str) return int_array
def convertBigIntToIntArray(big_int, bit = 256, block_size = 32): """ Converts a big integer into an array representation. Each array is coded on 2^:block_size:-1 bits and the representation has a total length of :bit: bits e.g. [0, 0, 0, 0, 0, 0, 500, 2] :param big_int: The integer you want to convert (Integer) :param bit: The length in bits of the representation (Integer) :param block_size: The initial size of each integer in the final array in bits (Integer) :return: An array representation of the integer (Integer Array) """ str = intToBitString(big_int, bit) int_array = bitStringToIntArray(str) return int_array
Python
def convertIntArraytoBigInt(int_array, block_size = 32): """ Converts an integer array (of :block_size: bits, signed) into an integer (e.g. [0, 0, 0, 0, 0, 0, 1, 0] --> 2147483648) :param int_array: The integer array you want to convert (Integer array) :param block_size: Size, in bits, of the length of each integer in the array (Integer) :return: Integer representation of the array (Integer) """ bin_str = "" for i in range(len(int_array)): bin_str += intToBitString(int_array[i], block_size)[1:] big_int = int(bin_str, 2) return big_int
def convertIntArraytoBigInt(int_array, block_size = 32): """ Converts an integer array (of :block_size: bits, signed) into an integer (e.g. [0, 0, 0, 0, 0, 0, 1, 0] --> 2147483648) :param int_array: The integer array you want to convert (Integer array) :param block_size: Size, in bits, of the length of each integer in the array (Integer) :return: Integer representation of the array (Integer) """ bin_str = "" for i in range(len(int_array)): bin_str += intToBitString(int_array[i], block_size)[1:] big_int = int(bin_str, 2) return big_int
Python
def mul(a_int_array, b_int_array, bit_a = 256, bit_b = 256, block_size = 32): """ Multiplies two integers in an integer array representation. :param a_int_array: integer array representation of the first integer (Integer array) :param b_integer_array: integer array representation of the second integer (Integer array) :param bit_a: length in bits of the first integer (Integer) :param bit_b: length in bits of the second integer (Integer) :param block_size: length in bits of each integer in the array representation (Integer) :return: result of the multiplication between the first integer and the second, in integer array representation (Integer array) """ a_big_int = convertIntArraytoBigInt(a_int_array, block_size) b_big_int = convertIntArraytoBigInt(b_int_array, block_size) res = a_big_int * b_big_int res_array = convertBigIntToIntArray(res, bit = bit_a + bit_b, block_size = block_size) return res_array
def mul(a_int_array, b_int_array, bit_a = 256, bit_b = 256, block_size = 32): """ Multiplies two integers in an integer array representation. :param a_int_array: integer array representation of the first integer (Integer array) :param b_integer_array: integer array representation of the second integer (Integer array) :param bit_a: length in bits of the first integer (Integer) :param bit_b: length in bits of the second integer (Integer) :param block_size: length in bits of each integer in the array representation (Integer) :return: result of the multiplication between the first integer and the second, in integer array representation (Integer array) """ a_big_int = convertIntArraytoBigInt(a_int_array, block_size) b_big_int = convertIntArraytoBigInt(b_int_array, block_size) res = a_big_int * b_big_int res_array = convertBigIntToIntArray(res, bit = bit_a + bit_b, block_size = block_size) return res_array
Python
def add_mod(a_int_array, b_int_array, mod_int_array, block_size = 32): """ Modular addition between two big integers :param a_int_array: integer array representation of the first integer (Integer array) :param b_integer_array: integer array representation of the second integer (Integer array) :param mod_integer_array: integer array representation of the modulues integer (Integer array) :param block_size: length in bits of each integer in the array representation (Integer) :return: result of the modular addition between the first integer and the second, in integer array representation (Integer array) """ a_big_int = convertIntArraytoBigInt(a_int_array, block_size) b_big_int = convertIntArraytoBigInt(b_int_array, block_size) mod_big_int = convertIntArraytoBigInt(mod_int_array, block_size) res = (a_big_int + b_big_int) % mod_big_int res_array = convertBigIntToIntArray(res, block_size = block_size) return res_array
def add_mod(a_int_array, b_int_array, mod_int_array, block_size = 32): """ Modular addition between two big integers :param a_int_array: integer array representation of the first integer (Integer array) :param b_integer_array: integer array representation of the second integer (Integer array) :param mod_integer_array: integer array representation of the modulues integer (Integer array) :param block_size: length in bits of each integer in the array representation (Integer) :return: result of the modular addition between the first integer and the second, in integer array representation (Integer array) """ a_big_int = convertIntArraytoBigInt(a_int_array, block_size) b_big_int = convertIntArraytoBigInt(b_int_array, block_size) mod_big_int = convertIntArraytoBigInt(mod_int_array, block_size) res = (a_big_int + b_big_int) % mod_big_int res_array = convertBigIntToIntArray(res, block_size = block_size) return res_array
Python
def sub_mod(a_int_array, b_int_array, mod_int_array, block_size = 32): """ Modular subtraction between two big integers :param a_int_array: integer array representation of the first integer (Integer array) :param b_integer_array: integer array representation of the second integer (Integer array) :param mod_integer_array: integer array representation of the modulues integer (Integer array) :param block_size: length in bits of each integer in the array representation (Integer) :return: result of the modular subtraction between the first integer and the second, in integer array representation (Integer array) """ a_big_int = convertIntArraytoBigInt(a_int_array, block_size) b_big_int = convertIntArraytoBigInt(b_int_array, block_size) mod_big_int = convertIntArraytoBigInt(mod_int_array, block_size) res = (a_big_int - b_big_int) % mod_big_int res_array = convertBigIntToIntArray(res, block_size = block_size) return res_array
def sub_mod(a_int_array, b_int_array, mod_int_array, block_size = 32): """ Modular subtraction between two big integers :param a_int_array: integer array representation of the first integer (Integer array) :param b_integer_array: integer array representation of the second integer (Integer array) :param mod_integer_array: integer array representation of the modulues integer (Integer array) :param block_size: length in bits of each integer in the array representation (Integer) :return: result of the modular subtraction between the first integer and the second, in integer array representation (Integer array) """ a_big_int = convertIntArraytoBigInt(a_int_array, block_size) b_big_int = convertIntArraytoBigInt(b_int_array, block_size) mod_big_int = convertIntArraytoBigInt(mod_int_array, block_size) res = (a_big_int - b_big_int) % mod_big_int res_array = convertBigIntToIntArray(res, block_size = block_size) return res_array
Python
def load( self, path: Union[Path, str], input_resource_type: str, output_resource_type: str = None, xml_schema_dir: Optional[Union[Path, str]] = None, server_url: str = HAPI_FHIR_SERVER_4, validate: bool = True, **kwargs, ) -> Union[list, dict]: """Load and validate FHIR data with specific input_resource_type. If output_resource_type is different to input_resource_type, then attempt to filter input_resource_type to output_resource_type and return that. Parameters ---------- path : Union[Path, str] Path containing FHIR data. Must be either XML or JSON input_resource_type : str Resource type of the input data in path output_resource_type : str If input_resource_type is Bundle, the output resource type could be one of the resources contained in the Bundle, this allows filtering for that resource type. If None, the input is returned xml_schema_dir : Optional[Union[Path, str]], optional If data is XML, this is path to .xsd schema, by default None server_url : Optional[str], optional For online validation, a server URL, by default HAPI_FHIR_SERVER_4 which is http://hapi.fhir.org/baseR4 validate : bool, optional Whether to validate the input, by default True kwargs: dict Keyword arguments for the online validation method - gets passed to the python requests.post function. For example, if want to pass custom headers with the request. Returns ------- Union[list, dict] Returns a FHIR resource type as a dicstionary, or if several FHIR resources of type output_resource_type were found in an input bundle, then all those are returned as a list. Raises ------ FHIRValidationError Raised if validation fails ValueError If path is not XML or JSON ValueError If no resource of type output_resource_type is found in input bundle """ if path.suffix == ".json": data = self.load_json(path) elif path.suffix == ".xml": # TODO: Improve XML handling. Currently it's quite inefficient # for two reasons: # (1) To convert XML string to dict, we use `xmlschema` which # validates data in `path` against the XML schema .xsd file. # Loading this schema takes some time. # (2) Effectively XML is getting validated twice, once in (1) and # then again by the `fhir.resources` parser which converts from # dict to a class import xmlschema xml_schema_dir = self.convert_path(xml_schema_dir) xml_schema_path = ( xml_schema_dir / f"{input_resource_type.lower()}.xsd" ) xml_schema = xmlschema.XMLSchema(xml_schema_path) try: data = xml_schema.to_dict(path) except xmlschema.validators.exceptions.XMLSchemaValidationError as e: # noqa raise FHIRValidationError( f"Failed to validate FHIR XML data located at {path} " f"using schema {xml_schema_path}.\n\n{e}" ) else: raise ValueError( f"`path` is an unsupported FHIR file type '{path.suffix}'. " "Must be either '.json' or '.xml'" ) if validate: self.validate(data, input_resource_type, server_url, **kwargs) if (output_resource_type is None) or ( output_resource_type == input_resource_type ): return data else: filtered_data = [ item["resource"] for item in data["entry"] if item["resource"]["resourceType"].lower() == output_resource_type.lower() ] if len(filtered_data) == 0: raise ValueError( f"No FHIR resource {output_resource_type} in {path}." ) elif len(filtered_data) > 1: if output_resource_type.lower() == "patient": raise FHIRValidationError( "FHIR data contains more than one patient resource." ) return filtered_data else: return filtered_data[0]
def load( self, path: Union[Path, str], input_resource_type: str, output_resource_type: str = None, xml_schema_dir: Optional[Union[Path, str]] = None, server_url: str = HAPI_FHIR_SERVER_4, validate: bool = True, **kwargs, ) -> Union[list, dict]: """Load and validate FHIR data with specific input_resource_type. If output_resource_type is different to input_resource_type, then attempt to filter input_resource_type to output_resource_type and return that. Parameters ---------- path : Union[Path, str] Path containing FHIR data. Must be either XML or JSON input_resource_type : str Resource type of the input data in path output_resource_type : str If input_resource_type is Bundle, the output resource type could be one of the resources contained in the Bundle, this allows filtering for that resource type. If None, the input is returned xml_schema_dir : Optional[Union[Path, str]], optional If data is XML, this is path to .xsd schema, by default None server_url : Optional[str], optional For online validation, a server URL, by default HAPI_FHIR_SERVER_4 which is http://hapi.fhir.org/baseR4 validate : bool, optional Whether to validate the input, by default True kwargs: dict Keyword arguments for the online validation method - gets passed to the python requests.post function. For example, if want to pass custom headers with the request. Returns ------- Union[list, dict] Returns a FHIR resource type as a dicstionary, or if several FHIR resources of type output_resource_type were found in an input bundle, then all those are returned as a list. Raises ------ FHIRValidationError Raised if validation fails ValueError If path is not XML or JSON ValueError If no resource of type output_resource_type is found in input bundle """ if path.suffix == ".json": data = self.load_json(path) elif path.suffix == ".xml": # TODO: Improve XML handling. Currently it's quite inefficient # for two reasons: # (1) To convert XML string to dict, we use `xmlschema` which # validates data in `path` against the XML schema .xsd file. # Loading this schema takes some time. # (2) Effectively XML is getting validated twice, once in (1) and # then again by the `fhir.resources` parser which converts from # dict to a class import xmlschema xml_schema_dir = self.convert_path(xml_schema_dir) xml_schema_path = ( xml_schema_dir / f"{input_resource_type.lower()}.xsd" ) xml_schema = xmlschema.XMLSchema(xml_schema_path) try: data = xml_schema.to_dict(path) except xmlschema.validators.exceptions.XMLSchemaValidationError as e: # noqa raise FHIRValidationError( f"Failed to validate FHIR XML data located at {path} " f"using schema {xml_schema_path}.\n\n{e}" ) else: raise ValueError( f"`path` is an unsupported FHIR file type '{path.suffix}'. " "Must be either '.json' or '.xml'" ) if validate: self.validate(data, input_resource_type, server_url, **kwargs) if (output_resource_type is None) or ( output_resource_type == input_resource_type ): return data else: filtered_data = [ item["resource"] for item in data["entry"] if item["resource"]["resourceType"].lower() == output_resource_type.lower() ] if len(filtered_data) == 0: raise ValueError( f"No FHIR resource {output_resource_type} in {path}." ) elif len(filtered_data) > 1: if output_resource_type.lower() == "patient": raise FHIRValidationError( "FHIR data contains more than one patient resource." ) return filtered_data else: return filtered_data[0]
Python
def save( self, path: Union[Path, str], data: dict, resource_type: str = None, validate: bool = True, server_url: str = HAPI_FHIR_SERVER_4, **kwargs, ) -> None: """Validate and save FHIR data as JSON file Parameters ---------- path : Union[Path, str] Path in which to save FHIR data as JSON data : dict Data to save resource_type : str, optional Name of the resource type (case-sensitive). If not supplied, attempts to find the resource type from data["resourceType"] by default None validate : bool, optional Whether to validate data before saving, by default True server_url : Optional[str], optional For online validation, a server URL, by default HAPI_FHIR_SERVER_4 kwargs: dict Keyword arguments for the online validation method - gets passed to the python requests.post function. For example, if want to pass custom headers with the request. """ # TODO: add XML to save? if validate: self.validate(data, resource_type, server_url, **kwargs) self.save_json(path, data)
def save( self, path: Union[Path, str], data: dict, resource_type: str = None, validate: bool = True, server_url: str = HAPI_FHIR_SERVER_4, **kwargs, ) -> None: """Validate and save FHIR data as JSON file Parameters ---------- path : Union[Path, str] Path in which to save FHIR data as JSON data : dict Data to save resource_type : str, optional Name of the resource type (case-sensitive). If not supplied, attempts to find the resource type from data["resourceType"] by default None validate : bool, optional Whether to validate data before saving, by default True server_url : Optional[str], optional For online validation, a server URL, by default HAPI_FHIR_SERVER_4 kwargs: dict Keyword arguments for the online validation method - gets passed to the python requests.post function. For example, if want to pass custom headers with the request. """ # TODO: add XML to save? if validate: self.validate(data, resource_type, server_url, **kwargs) self.save_json(path, data)
Python
def create_fhir_bundle( resources: List[dict], bundle_type: str = "transaction" ) -> dict: """Creates a FHIR bundle from a list of FHIR resources Parameters ---------- resources : List[dict] List of FHIR resources bundle_type : str, optional FHIR Bundle type https://www.hl7.org/fhir/bundle-definitions.html#Bundle.type, by default "transaction" Returns ------- dict FHIR Bundle """ return { "resourceType": "Bundle", "type": bundle_type, "entry": [ { "resource": resource, "request": {"method": "POST", "url": resource["resourceType"]}, } for resource in resources ], }
def create_fhir_bundle( resources: List[dict], bundle_type: str = "transaction" ) -> dict: """Creates a FHIR bundle from a list of FHIR resources Parameters ---------- resources : List[dict] List of FHIR resources bundle_type : str, optional FHIR Bundle type https://www.hl7.org/fhir/bundle-definitions.html#Bundle.type, by default "transaction" Returns ------- dict FHIR Bundle """ return { "resourceType": "Bundle", "type": bundle_type, "entry": [ { "resource": resource, "request": {"method": "POST", "url": resource["resourceType"]}, } for resource in resources ], }
Python
def convert_patient_record_entry_to_fhir( entry: "PatientRecordEntry", # noqa patient: "PatientAgent", # noqa environments: "Dict[Union[str, int], EnvironmentAgent]" = None, # noqa ) -> dict: """Converts a patient record entry (of type PatientRecordEntry) to a FHIR resource. The contents of entry.entry are converted to a FHIR dictionary `x` according to the mapping below. If there is raw FHIR data `y` in entry.fhir_resource, then the output will be the combined dictionary `{**x, **y}` NOTE: there is much room for expansion and improvement here. More resource types and fields could be added, fields like 'status' should probably be deduced and depend on input (for now we have set fixed place holder values), also for resource types like MedicationRequest, an 'end' date could be mapped to the duration values. Parameters ---------- entry : PatientRecordEntry A patient record entry patient : PatientAgent The instance of the PatientAgent with this entry environments : List[Dict[EnvironmentAgent]] Dictionary of environments, keyed by environment_id, that may have interacted with the patient. Currently this is not used, but could be useful in future if environment attribtues could enrich the FHIR data, for example, the location Returns ------- dict A FHIR resource Raises ------ ValueError If the resource type is not a type that is currently supported for conversion """ # NOTE: noqas above avoid circular import def set_subject(resource, patient): resource["subject"] = { "display": str(patient.name), } def set_coding(_entry): coding = { "coding": [ { "display": _entry["name"], } ] } if "code" in _entry: coding["coding"][0]["code"] = _entry["code"] if "system" in _entry: coding["coding"][0]["system"] = _entry["system"] return coding resource_type = entry.fhir_resource_type _entry = entry.entry if resource_type == "Encounter": resource = { # status: planned | arrived | triaged | in-progress | onleave # | finished | cancelled +. "status": "finished", # required "class": { # required "display": _entry["name"], }, "period": { "start": datetime_to_string(_entry["start"]), }, } set_subject(resource, patient) if "code" in _entry: resource["class"]["code"] = _entry["code"] if "end" in _entry: resource["period"]["end"] = datetime_to_string(_entry["end"]) elif resource_type == "Condition": resource = { "code": set_coding(_entry), "recordedDate": datetime_to_string(_entry["start"]), } set_subject(resource, patient) if "end" in _entry: resource["abatementDateTime"] = datetime_to_string(_entry["end"]) resource["clinicalStatus"] = { "coding": [ { "system": ( "http://terminology.hl7.org/CodeSystem/" "condition-clinical" ), "code": "inactive", } ] } elif resource_type == "Observation": resource = { # required "status": "final", "code": set_coding(_entry), "effectivePeriod": { "start": datetime_to_string(_entry["start"]), }, } set_subject(resource, patient) if "end" in _entry: resource["effectivePeriod"]["end"] = datetime_to_string( _entry["end"] ) if "value" in _entry: # NOTE: _entry["value"] must be a dictionary like # { # "value": 4.12, # "unit": "10^12/L", # "system": "http://unitsofmeasure.org", # "code": "10*12/L" # } resource["valueQuantity"] = _entry["value"] elif resource_type == "Procedure": resource = { # required "status": "final", "code": set_coding(_entry), "performedPeriod": { "start": datetime_to_string(_entry["start"]), }, } set_subject(resource, patient) if "end" in _entry: resource["performedPeriod"]["end"] = datetime_to_string( _entry["end"] ) elif resource_type == "MedicationRequest": med_id = f"medication-{entry.entry_id}" resource = { # required "status": "final", "intent": "order", "contained": [ { "resourceType": "Medication", "id": med_id, "code": set_coding(_entry), } ], "dispenseRequest": { "validityPeriod": { "start": datetime_to_string(_entry["start"]), }, }, "medicationReference": {"reference": f"#{med_id}"}, } set_subject(resource, patient) if "dosage" in _entry: resource["dosageInstruction"] = [ { "text": _entry["dosage"], }, ] if "duration_value" in _entry and "duration_unit" in _entry: resource["dispenseRequest"]["expectedSupplyDuration"] = { "value": _entry["duration_value"], "unit": _entry["duration_unit"], } elif resource_type == "ServiceRequest": resource = { "status": "active", "intent": "order", "code": set_coding(_entry), "reasonCode": [ { "text": _entry["name"], } ], "occurrenceDateTime": datetime_to_string(_entry["start"]), } set_subject(resource, patient) elif resource_type == "Appointment": resource = { "status": "active", "serviceCategory": [set_coding(_entry)], "reasonReference": [ { "display": _entry["name"], } ], "description": "description", "start": datetime_to_string(_entry["start"]), "participant": [ { "actor": { "reference": f"Patient/{patient.patient_id}", "display": str(patient.name), }, "status": "accepted", } ], } if "end" in _entry: resource["end"] = datetime_to_string(_entry["end"]) elif resource_type == "Patient": resource = { "id": str(patient.patient_id), "gender": patient.gender, "birthDate": datetime_to_string( patient.birth_date, format_str="%Y-%m-%d" ), } if "end" in _entry: resource["deceasedDateTime"] = datetime_to_string(_entry["end"]) else: raise ValueError( f"Currently cannot convert FHIR resource type {resource_type}." ) if "id" not in resource: resource["id"] = str(entry.entry_id) resource["resourceType"] = resource_type if entry.fhir_resource is None: return resource else: return {**resource, **entry.fhir_resource}
def convert_patient_record_entry_to_fhir( entry: "PatientRecordEntry", # noqa patient: "PatientAgent", # noqa environments: "Dict[Union[str, int], EnvironmentAgent]" = None, # noqa ) -> dict: """Converts a patient record entry (of type PatientRecordEntry) to a FHIR resource. The contents of entry.entry are converted to a FHIR dictionary `x` according to the mapping below. If there is raw FHIR data `y` in entry.fhir_resource, then the output will be the combined dictionary `{**x, **y}` NOTE: there is much room for expansion and improvement here. More resource types and fields could be added, fields like 'status' should probably be deduced and depend on input (for now we have set fixed place holder values), also for resource types like MedicationRequest, an 'end' date could be mapped to the duration values. Parameters ---------- entry : PatientRecordEntry A patient record entry patient : PatientAgent The instance of the PatientAgent with this entry environments : List[Dict[EnvironmentAgent]] Dictionary of environments, keyed by environment_id, that may have interacted with the patient. Currently this is not used, but could be useful in future if environment attribtues could enrich the FHIR data, for example, the location Returns ------- dict A FHIR resource Raises ------ ValueError If the resource type is not a type that is currently supported for conversion """ # NOTE: noqas above avoid circular import def set_subject(resource, patient): resource["subject"] = { "display": str(patient.name), } def set_coding(_entry): coding = { "coding": [ { "display": _entry["name"], } ] } if "code" in _entry: coding["coding"][0]["code"] = _entry["code"] if "system" in _entry: coding["coding"][0]["system"] = _entry["system"] return coding resource_type = entry.fhir_resource_type _entry = entry.entry if resource_type == "Encounter": resource = { # status: planned | arrived | triaged | in-progress | onleave # | finished | cancelled +. "status": "finished", # required "class": { # required "display": _entry["name"], }, "period": { "start": datetime_to_string(_entry["start"]), }, } set_subject(resource, patient) if "code" in _entry: resource["class"]["code"] = _entry["code"] if "end" in _entry: resource["period"]["end"] = datetime_to_string(_entry["end"]) elif resource_type == "Condition": resource = { "code": set_coding(_entry), "recordedDate": datetime_to_string(_entry["start"]), } set_subject(resource, patient) if "end" in _entry: resource["abatementDateTime"] = datetime_to_string(_entry["end"]) resource["clinicalStatus"] = { "coding": [ { "system": ( "http://terminology.hl7.org/CodeSystem/" "condition-clinical" ), "code": "inactive", } ] } elif resource_type == "Observation": resource = { # required "status": "final", "code": set_coding(_entry), "effectivePeriod": { "start": datetime_to_string(_entry["start"]), }, } set_subject(resource, patient) if "end" in _entry: resource["effectivePeriod"]["end"] = datetime_to_string( _entry["end"] ) if "value" in _entry: # NOTE: _entry["value"] must be a dictionary like # { # "value": 4.12, # "unit": "10^12/L", # "system": "http://unitsofmeasure.org", # "code": "10*12/L" # } resource["valueQuantity"] = _entry["value"] elif resource_type == "Procedure": resource = { # required "status": "final", "code": set_coding(_entry), "performedPeriod": { "start": datetime_to_string(_entry["start"]), }, } set_subject(resource, patient) if "end" in _entry: resource["performedPeriod"]["end"] = datetime_to_string( _entry["end"] ) elif resource_type == "MedicationRequest": med_id = f"medication-{entry.entry_id}" resource = { # required "status": "final", "intent": "order", "contained": [ { "resourceType": "Medication", "id": med_id, "code": set_coding(_entry), } ], "dispenseRequest": { "validityPeriod": { "start": datetime_to_string(_entry["start"]), }, }, "medicationReference": {"reference": f"#{med_id}"}, } set_subject(resource, patient) if "dosage" in _entry: resource["dosageInstruction"] = [ { "text": _entry["dosage"], }, ] if "duration_value" in _entry and "duration_unit" in _entry: resource["dispenseRequest"]["expectedSupplyDuration"] = { "value": _entry["duration_value"], "unit": _entry["duration_unit"], } elif resource_type == "ServiceRequest": resource = { "status": "active", "intent": "order", "code": set_coding(_entry), "reasonCode": [ { "text": _entry["name"], } ], "occurrenceDateTime": datetime_to_string(_entry["start"]), } set_subject(resource, patient) elif resource_type == "Appointment": resource = { "status": "active", "serviceCategory": [set_coding(_entry)], "reasonReference": [ { "display": _entry["name"], } ], "description": "description", "start": datetime_to_string(_entry["start"]), "participant": [ { "actor": { "reference": f"Patient/{patient.patient_id}", "display": str(patient.name), }, "status": "accepted", } ], } if "end" in _entry: resource["end"] = datetime_to_string(_entry["end"]) elif resource_type == "Patient": resource = { "id": str(patient.patient_id), "gender": patient.gender, "birthDate": datetime_to_string( patient.birth_date, format_str="%Y-%m-%d" ), } if "end" in _entry: resource["deceasedDateTime"] = datetime_to_string(_entry["end"]) else: raise ValueError( f"Currently cannot convert FHIR resource type {resource_type}." ) if "id" not in resource: resource["id"] = str(entry.entry_id) resource["resourceType"] = resource_type if entry.fhir_resource is None: return resource else: return {**resource, **entry.fhir_resource}
Python
def generate_patient_fhir_resources( patient: "PatientAgent", # noqa environments: "Dict[Union[str, int], EnvironmentAgent]" = None, # noqa validate: bool = True, server_url: Optional[str] = None, **kwargs, ): """Generate list of FHIR resources from patient record NOTE: there is much room for expansion and improvement here. For example, resources connected to the same encounter could all have references to that encounter id. Parameters ---------- patient : PatientAgent The instance of the PatientAgent with this entry environments : List[Dict[EnvironmentAgent]] Dictionary of environments, keyed by environment_id, that may have interacted with the patient. Currently this is not used, but could be useful in future if environment attribtues could enrich the FHIR data, for example, the location server_url : Optional[str], optional URL for server to validate each resource, by default None, in which case only offline validation is performed kwargs: dict Keyword arguments for the online validation method - gets passed to the python requests.post function. For example, if want to pass custom headers with the request. Returns ------- List[dict] List of FHIR resources """ # NOTE: noqas above avoid circular import resources = [] for entry in patient.record: resource = convert_patient_record_entry_to_fhir( entry, patient, environments ) resources.append(resource) for resource in resources: FHIRHandler().validate( resource, resource["resourceType"], server_url, **kwargs ) return resources
def generate_patient_fhir_resources( patient: "PatientAgent", # noqa environments: "Dict[Union[str, int], EnvironmentAgent]" = None, # noqa validate: bool = True, server_url: Optional[str] = None, **kwargs, ): """Generate list of FHIR resources from patient record NOTE: there is much room for expansion and improvement here. For example, resources connected to the same encounter could all have references to that encounter id. Parameters ---------- patient : PatientAgent The instance of the PatientAgent with this entry environments : List[Dict[EnvironmentAgent]] Dictionary of environments, keyed by environment_id, that may have interacted with the patient. Currently this is not used, but could be useful in future if environment attribtues could enrich the FHIR data, for example, the location server_url : Optional[str], optional URL for server to validate each resource, by default None, in which case only offline validation is performed kwargs: dict Keyword arguments for the online validation method - gets passed to the python requests.post function. For example, if want to pass custom headers with the request. Returns ------- List[dict] List of FHIR resources """ # NOTE: noqas above avoid circular import resources = [] for entry in patient.record: resource = convert_patient_record_entry_to_fhir( entry, patient, environments ) resources.append(resource) for resource in resources: FHIRHandler().validate( resource, resource["resourceType"], server_url, **kwargs ) return resources
Python
def generate_patient_fhir_bundle( patient: "PatientAgent", # noqa environments: "Dict[Union[str, int], EnvironmentAgent]" = None, # noqa bundle_type: str = "transaction", validate: bool = True, server_url: Optional[str] = None, save_path: Optional[Union[str, Path]] = None, **kwargs, ): """Generate FHIR bundle from patient record Parameters ---------- patient : PatientAgent The instance of the PatientAgent with this entry environments : List[Dict[EnvironmentAgent]] Dictionary of environments, keyed by environment_id, that may have interacted with the patient. Currently this is not used, but could be useful in future if environment attribtues could enrich the FHIR data, for example, the location validate : bool, optional Whether to validate the bundle, by default True server_url : Optional[str], optional URL for server to validate each resource, by default None, in which case only offline validation is performed save_path : Optional[Union[str, Path]], optional JSON path if want to save bundle, by default None kwargs: dict Keyword arguments for the online validation method - gets passed to the python requests.post function. For example, if want to pass custom headers with the request. Returns ------- dict FHIR bundle """ # NOTE: noqas above avoid circular import resources = generate_patient_fhir_resources( patient, environments, validate=False, server_url=None, ) bundle = create_fhir_bundle(resources, bundle_type) if save_path is not None: FHIRHandler().save( save_path, bundle, "Bundle", validate, server_url, **kwargs ) elif validate: FHIRHandler().validate( bundle, "Bundle", server_url, **kwargs, ) return bundle
def generate_patient_fhir_bundle( patient: "PatientAgent", # noqa environments: "Dict[Union[str, int], EnvironmentAgent]" = None, # noqa bundle_type: str = "transaction", validate: bool = True, server_url: Optional[str] = None, save_path: Optional[Union[str, Path]] = None, **kwargs, ): """Generate FHIR bundle from patient record Parameters ---------- patient : PatientAgent The instance of the PatientAgent with this entry environments : List[Dict[EnvironmentAgent]] Dictionary of environments, keyed by environment_id, that may have interacted with the patient. Currently this is not used, but could be useful in future if environment attribtues could enrich the FHIR data, for example, the location validate : bool, optional Whether to validate the bundle, by default True server_url : Optional[str], optional URL for server to validate each resource, by default None, in which case only offline validation is performed save_path : Optional[Union[str, Path]], optional JSON path if want to save bundle, by default None kwargs: dict Keyword arguments for the online validation method - gets passed to the python requests.post function. For example, if want to pass custom headers with the request. Returns ------- dict FHIR bundle """ # NOTE: noqas above avoid circular import resources = generate_patient_fhir_resources( patient, environments, validate=False, server_url=None, ) bundle = create_fhir_bundle(resources, bundle_type) if save_path is not None: FHIRHandler().save( save_path, bundle, "Bundle", validate, server_url, **kwargs ) elif validate: FHIRHandler().validate( bundle, "Bundle", server_url, **kwargs, ) return bundle
Python
def run_simulation_command(config_path: str): """Run simulation by running the simulaion for one patient at a time, looping over patients. - Loads and validates the config json file - Initializes the patients, environments, intelligence layer and other variables as per the specification in the config file - Runs the simulation by looping over patients, with logging - Saves the outputs in the config file save_dir Parameters ---------- config_path : str Path to simulation config file """ print(f"Parsing config from {config_path}") config = parse_config(config_path) print("Starting simulation") simulate(config)
def run_simulation_command(config_path: str): """Run simulation by running the simulaion for one patient at a time, looping over patients. - Loads and validates the config json file - Initializes the patients, environments, intelligence layer and other variables as per the specification in the config file - Runs the simulation by looping over patients, with logging - Saves the outputs in the config file save_dir Parameters ---------- config_path : str Path to simulation config file """ print(f"Parsing config from {config_path}") config = parse_config(config_path) print("Starting simulation") simulate(config)
Python
def death( patient: PatientAgent, environment: EnvironmentAgent, patient_time: datetime.datetime, ): """Interaction causing patient death. Sets patient attribute 'alive' to False and sets the 'end' field in the patient profile to patient_time timestamp Parameters ---------- patient : PatientAgent Patient agent environment : EnvironmentAgent Environment agent (plays no part in death interaction but required for consistent interaction function signature) patient_time : datetime.datetime Current patient time Returns ------- patient: PatientAgent Updated patient environment: EnvironmentAgent Environment (no update but required for consistent interaction function signature) update_data: dict Contains new patient record entries (redundant in this case) next_environment_id_to_prob: dict Dictionary mapping next environment IDs to probability (empty in this case) next_environment_id_to_time, Dictionary mapping next environment IDs to time delta (time to next environment) (empty in this case) """ patient.record[0].entry["end"] = datetime_to_string(patient_time) patient.alive = False next_environment_id_to_prob = {} next_environment_id_to_time = {} update_data = {"new_patient_record_entries": []} return ( patient, environment, update_data, next_environment_id_to_prob, next_environment_id_to_time, )
def death( patient: PatientAgent, environment: EnvironmentAgent, patient_time: datetime.datetime, ): """Interaction causing patient death. Sets patient attribute 'alive' to False and sets the 'end' field in the patient profile to patient_time timestamp Parameters ---------- patient : PatientAgent Patient agent environment : EnvironmentAgent Environment agent (plays no part in death interaction but required for consistent interaction function signature) patient_time : datetime.datetime Current patient time Returns ------- patient: PatientAgent Updated patient environment: EnvironmentAgent Environment (no update but required for consistent interaction function signature) update_data: dict Contains new patient record entries (redundant in this case) next_environment_id_to_prob: dict Dictionary mapping next environment IDs to probability (empty in this case) next_environment_id_to_time, Dictionary mapping next environment IDs to time delta (time to next environment) (empty in this case) """ patient.record[0].entry["end"] = datetime_to_string(patient_time) patient.alive = False next_environment_id_to_prob = {} next_environment_id_to_time = {} update_data = {"new_patient_record_entries": []} return ( patient, environment, update_data, next_environment_id_to_prob, next_environment_id_to_time, )