content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import Sequence from typing import Tuple import itertools def simulate_data( covariates: int, scales: Sequence[int], levels: Sequence[int], singletons: float, state: np.random.RandomState) -> Tuple[Array, Array, Array]: """Simulate IDs and data matrices.""" # simulate fixed effects ids = np.array(list(itertools.product(*(np.repeat(np.arange(l), s) for s, l in zip(scales, levels))))) fe = np.array(list(itertools.product(*(np.repeat(state.normal(size=l), s) for s, l in zip(scales, levels))))) # count dimensions N, M = ids.shape # shuffle the IDs for index in range(M): indices = np.arange(N) state.shuffle(indices) ids[indices, index] = ids.copy()[:, index] # shuffle and replace shares of the data with singletons indices = np.arange(N) for index in range(M): state.shuffle(indices) singleton_indices = indices[:int(singletons * N / M)] ids[indices, index] = ids.copy()[:, index] ids[singleton_indices, index] = -np.arange(singleton_indices.size) # simulate remaining data error = state.normal(size=(N, 1)) X = state.normal(size=(N, covariates)) y = X.sum(axis=1, keepdims=True) + fe.sum(axis=1, keepdims=True) + error return ids, X, y
c5fbddde4045b2367975b28bcfc4ded032427505
28,203
def create_strike_ingest_job_message(ingest_id, strike_id): """Creates a message to create the ingest job for a strike :param ingest_id: ID of the ingest :type ingest_id: int :param strike_id: The ID of the strike :type strike_id: int """ message = CreateIngest() message.create_ingest_type = STRIKE_JOB_TYPE message.ingest_id = ingest_id message.strike_id = strike_id return message
47e96f6fa53cc934572852cfcaf8ed9455408dec
28,204
def create_data(nfac0, # [Number of facet 0 elements -- rows/persons] nfac1, # [Number of facet 1 elements -- columns/items] ndim, # [Number of dimensions to create] seed = None, # [<None,int,{'Fac0':<None,int,array>,'Fac1':<None,int,array>}> => generates coordinates] facmetric = [4,-2], # [[m,b] => rand() * m + b, to set range of facet coordinate values] noise = None, # [<None, noise, {'Rows':<noise,{1:noise1,4:noise4}>,'Cols':<noise,{2:noise2,5:noise5}> => add error to rows/cols] validchars = None, # [<None, ['All',[valid chars]]; or ['Cols', {1:['a','b','c','d'],2:['All'],3:['1.2 -- 3.5'],4:['0 -- '],...}]> ] mean_sd = None, # [<None, ['All',[Mean,SD]], or ['Cols', {1:[Mean1,SD1],2:[Mean2,SD2],3:'Refer2VC',...}]> ] p_nan = 0.0, # [Proportion of cells to make missing at random] nanval = -999., # [Numeric code for designating missing values] condcoord = None, # [< None, 'Orthonormal'>]nheaders4rows = 1, # [Number of header column labels to put before each row] nheaders4rows = 1, # [Number of header column labels to put before each row] nheaders4cols = 1, # [Number of header row labels to put before each column] extra_headers = 0, # [<0, int, {'0':0.25, '1':0.75}> => If headers > 1, range of ints for labels, randomly assigned or in blocks] input_array = None, # [<None, name of data array, {'fac0coord':EntxDim row coords,'fac1coord':EntxDim col coords}>] apply_zeros = None, # [<None, [row, {'sub1':[0,1,1],...}> => for each item group in row, where to apply zeros to coords] output_as = 'Damon', # [<'Damon','datadict','array','textfile','dataframe','Damon_textfile','datadict_textfile','array_textfile'>] outfile = None, # [<None, 'my_data.csv'> => name of the output file/path when output_as includes 'textfile'>] delimiter = None, # [<None, delimiter character used to separate fields of output file, e.g., ',' or '\t'>] bankf0 = None, # [<None => no bank,[<'All', list of F0 (Row) entities>]> ] bankf1 = None, # [<None => no bank,[<'All', list of F1 (Col) entities>]> ] verbose = True, # [<None, True> => print useful information and messages] condcoord_ = None # [<None, condcoord args> deprecated, only for backward compatibility] ): """Create simulated model and noisy data objects analyzable by Damon. Returns ------- create_data() returns a python dictionary where each of the following datasets is formatted either as a DamonObj, a datadict, an array, or a file, or combinations thereof. 'data' and 'model' may also be output as '.hd5' or pytables files to conserve memory. {'data':, => simulated 'observed' data, with error and missing added 'model':, => simulated 'True' values, no error added 'anskey':, => answer key, when data are alpha (simulating multiple choice data) 'fac0coord':, => theoretical row coordinates 'fac1coord': => theoretical column coordinates } output_as controls the output format, which can be as an array, a file, a DamonObj, or a datadict. When validchars specifies string responses, outputs are in a "multiple choice" alpha response format. An "answer key" is included. Numerical and alpha files are constructed according to a linear model, with noise added as desired: data[r,c] = Row[r] * Col[c] + noise[r,c] Cells can also be made missing. These create the output called 'data'. 'model' is like 'data', but without noise or missing cells. "bankf0" and "bankf1" are used to create artificial person or item banks to test the anchoring capability of coord() and rasch(). They save Python "pickle" files in the current working directory called 'bankf0' and 'bankf1'. Comments -------- create_data() is a function for creating artificial datasets for Damon. Compliant with the assumptions of the ALS decomposition, the data value of each cell is the dot product of its row and column coordinates, plus a specified level of random noise. It is important to remember that not all real-world datasets follow these assumptions, and there is an option to import datasets created using different rules into the function. Nonetheless, it is an excellent tool for learning Damon and its capacities under controlled conditions without having to worry about collecting external datasets. The function can produce ratio, interval, sigmoid, ordinal, and dichotomous data. It also outputs artificial item banks. Aside from files and arrays, create_data() can create DamonObj's to which Damon methods can be directly applied. It can also create datadicts and hd5 (pytables-style) files. Nominal data ------------ create_data() can create nominal data (non-numerical responses) in a limited sense, to mimic multiple-choice test responses. The way it works is that it first creates a dichotomous (0,1) matrix. Then, for each designated column, it converts the "1" into the "correct response" for that column (say "B", out of A, B, C). It converts the zeros into one of the remaining response options at random, A or C. That means the "correct" responses have an underlying mathematical meaning whereas the incorrect responses have no meaning at all. In real life, incorrect response options tend not to be random, so the create_data() nominal option will tend to be "noisier" than real life. Nonetheless, it is a reasonable and usable imitation of a multiple choice dataset. Use the score_mc() method to convert the alpha characters to numerical. parse() also works, but not as well. Nominal data can also take a different form where there is no "correct" option, e.g., an attitude survey. Here, each response has its own definite mathematical meaning. Currently, create_data() is unable to create such data. However, you can work around this limitation to a degree by simply creating a dichotomous response matrix and treating each column as it it were an item response, rather than an item. This is not quite an accurate representation of real-world nominal data since it does not require a "forced choice" between responses, but it will work okay for playing around. WARNING: Because equate() automatically adds new construct identifiers to the column or row keys, its likely to throw an exception if the original keys are int type. To avoid problems, create_data() forces all keys to be strings such that new_obj.colkeytype = 'S60'. Arguments --------- "nfac0" is the number of row entities to be created. (Row entities are considered to belong to "facet 0".) --------------- "nfac1" is the number of column entities to be created. (Column entities are considered to belong to "facet 1".) --------------- "ndim" is the number of dimensions to be used in creating the aritifical row and column coordinate arrays. --------------- "seed" controls the selection of random numbers when creating row and column coordinates. In Python, "seeds" are integers identifying a specific unique set of random numbers. If the seed is "None", the seed integer is chosen at random. seed supports using existing arrays for row (Fac0) and column (Fac1) coordinates. Both types of coordinate arrays are 2-D nEntities x nDims. seed = None => Use a new, unique set of random starter coordinates whenever executing the function. seed = 1 => For each run, use a particular set of random coordinates which is the same every time. seed = 2, 3, ... N => Each seed integer labels a different unique set of random coordinates. seed = {'Fac0':MyArray0,'Fac1':MyArray1} => Instead of generating random numbers, use numpy arrays MyArray0 and MyArray1. seed = {'Fac0':1,'Fac1':None} => Use seed 1 for Fac0, a random selection for Fac1. seed = {'Fac0':2,'Fac1':MyArray} => Use seed 2 for Fac0, MyArray for Fac1. WARNING: Damon's coord() function also uses seeds to build initial random number coordinate arrays, generally starting with seed = 1. If you create a Damon object with seed = 1, and coord() is using the same seed, you may get unreasonable agreement in your results. Yes, this has bitten me more than once. To avoid this, try to specify seeds in create_data (e.g., see=100) that coord() is not likely to use (coord() starts with seed=1 and iterates up from that, unless otherwise specified). --------------- "facmetric" governs the spread of the created data values. It multiplies the generating coordinates by a number (m) and adds a number (b): facmetric = [m,b] facmetric = [4,-2] => multiply each facet value by 4 and add -2. IMPORTANT NOTE: facmetric allows the user to model ratio (count-like) data by setting the b parameter to zero. When this is done, the log function is applied to the model and data arrays (prior to any further transformations controlled by validchars) and the data are interpreted as "ratio" rather than "interval". If b equals any value other than zero, the log function is not applied: facmetric = [4,0] => The log is taken of the resulting data and model arrays. facmetric = [4,-2] => The log is NOT taken of the resulting data and model arrays. facmetric = [4,0.0001] => The log is NOT taken of the resulting data and model arrays, but the coordinates all range from (almost) zero to four. --------------- "noise" is a way to add random error to specified rows or columns. It does so by multiplying a specified number ("noise") by a random number between -0.5 and 0.5. It is important to note that this is just one way of modeling noise and does not describe all possible noise scenarios. However, it is the only noise scenario supported by create_data() and works well for most purposes. The syntax is: noise = None => No noise (0.0) is added to the model array. noise = noise (int/float) => The specified integer or decimal is multiplied by a random number between -0.5 and 0.5 and added to the model array. noise = {'Rows':<noise,{'RowX':noiseX,...}>, 'Cols':<noise,{'ColY':noiseY,...}> } => noise is added to specified row and column entities, starting with rows. If 'Rows' is a number and specific row entities are not identified, the noise is added to all rows equally. Same with 'Cols'. When specific entities are identified, the specified noise is added to just those entities. Those row/column entities that are not identified are assigned a default level of noise of 0.0 except (in the case of 'Cols' only) where noise has already been added at the row level. Note: the row and column identifiers are the labels assigned by create_data() to each row/column. Examples: noise = 4.0 => Multiply by 4.0 a random number between -0.50 and 0.50 and add it to the whole model array. noise = {'Rows':4.0,'Cols':{'Col3':2.0,'Col5':6.0}} => Create a noise array by first adding 4.0 to all rows of a zero array. Then add 2.0 to all the 'Col3' cells and 6.0 to all the 'Col5' cells ('Col5' now has 4.0 + 6.0 = 10.0). Then multiply by a random number between -0.50 and 0.50. This is the noise array. Add the noise array to the model array. noise = {'Rows':{'Row1':4.0,'Row2':5.0},'Cols':{'Col3':2.0,'Col5':6.0}} => Create a noise array by first adding 4.0 to row entities 1 and 2 of a zero array. Then add 2.0 to all the 'Col3' cells and 6.0 to all the 'Col5' cells. Cell['Row1','Col3'] will be 4.0 + 2.0 = 6.0. All non-specified cells will be zero. Then multiply by a random number between -0.50 and 0.50. This is the noise array. Add the noise array to the model array. --------------- "validchars" specifies a list or range of valid characters for the whole coredata array or for each of the individual columns in the coredata array. (It does not apply to rows.) The function transforms the model values (with noise added) into the range implied by the validchars argument. It does this for the matrix as a whole or applies a different range for each column, allowing the function to create very heterogeneous datasets. Note the limitations on "nominal" data discussed in the comments. validchars does a lot and is important and used throughout Damon. In the context of this function it controls the range and metric of the artificially generated data values. It can also be used to create nominal data in the style of a multiple-choice test. Regardless of metric, the "model" output automatically mirrors the "data" output column by column to facilitate comparison. The syntax is: ['All',[list/range of possible responses]] or ['Cols',{'Col1':[list/range of responses],'Col2':[list/range of responses],...}] The {} brace signifies a Python dictionary. Example 1: validchars = None means that the existing values and ranges will be accepted as they are and no transformations are desired. Example 2: validchars = ['All',['All']] means that "all" values are permitted for all columns. If the companion specification mean_sd equals None, the target means and standard deviations are set at the existing means and standard deviations of the created data (i.e., there are no transformations). Example 2: ['All',['a','b','c','d']] means for all columns in the data array, create data in the form of responses from 'a' to 'd' such that one of these responses signifies a "correct" response. The "correct" response is chosen at random by the function and recorded in the anskey output as the "correct" response for that column. The model output in this case consists not of letters but of the "true" (pre-noise) probability of a cell containing the "correct" response. (Bear in mind we assigned the response termed "correct" to those cells with "observed" probability (including noise) greater than 0.50 of "succeeding" on the item.) Therefore, where Damon is successful in its predictions, cells containing the correct response for a column should have a model probability of success greater than 0.50. The create_data() function currently only supports the "answer key" paradigm for creating nominal responses, i.e. where there is one "correct" response and this response corresponds to success probabilities greater than 0.50, all other responses being assigned at random to the probabilities less than 0.50. In this paradigm, non-correct responses have no intrinsic meaning or correct probability, aside from being less than 0.50. To explore other nominal scenarios, you may need to create a dichotomous data set, then collapse groups of columns into a single column assigning each cell the most likely response category value. Example 3: ['All',[0,1]] ['All',['0 -- 1']] ['Cols',{1:[0,1],2:[0,1],3:['0 -- 1'],...}] means for all columns in the data array convert the continuous linear data (model + noise) into the equivalent of dichotomous responses. Notice that the 'All' syntax and the 'Cols' syntax yield the same results. (This differs from how validchars is used in other Damon functions, where 'All' and 'Cols' yield mathematically different results.) Also notice that [0,1] means the same thing as ['0 -- 1'] so long as the 0 and 1 in the second case are integers and not floats (with decimal points). ['0.0 -- 1.0'] means that results will be in a continuous range from 0 to 1 instead of being (0/1) integers. Note: It is important to type the dash properly; it is one space followed by two hyphens followed by one space, enclosed in single quotes ( ' -- ' ): (space, hyphen, hyphen, space). Any deviation will cause an error. The underlying formula for converting to dichotomous involves standardizing, converting to probabilities, and rounding to 0 or 1. Example 4: ['All',[0,1,2,3]] ['All',['0 -- 3']] means for all columns transform data into integers ranging from 0 to 3. Example 5: ['All',['0.0 -- ']] means for all columns transform the data into continuous values ranging from 0 to infinity. This is a ratio scale and behaves differently from the model's default interval scale which ranges from -Infinity to +Infinity. The conversions are done using a log function (log(ratio) = interval). This is helpful to model "count" data, which starts at 0. But in the case of counts, you would want to specify ['All',['0 -- ']] without the decimal point, to indicate that data should be rounded to the nearest integer. Example 6: ['Cols',{1:['1.0 -- 10.0'],2:['1.0 -- 10.0']}] means for columns 1 and 2 (in a 2-column array), make the data values range continuously from 1.0 to 10.0. Note that relative to the untransformed metric, this is a "sigmoid" metric which crunches at the top and bottom of the scale. Example 7: ['Cols',{1:['a','b','c'],2:[0,1],3:['1.0 -- 5.0'],4:['All']}] means for Column 1 make the values be 'a', 'b', or 'c'; for Column 2 make the data dichotomous, for Column 3 make it range continuously from 1.0 to 5.0, and for Column 4 let the data range continuously from -Infinity to +Infinity (i.e., keep the model + noise values as they are). To refrain from any transformation of the model + noise data, use validchars = None. --------------- "mean_sd" is used to specify a target mean and standard deviation for each column, or for the array as a whole, when the data are on an interval or ratio scale. It is used in conjunction with the validchars argument. Take care to keep them consistent. Where validchars specifies 'All', mean_sd should provide a mean and standard deviation. Where mean_sd specifies 'Refer2VC', validchars should have a list of valid characters or a range, not 'All'. When the data are on a ratio scale (ranging from 0 to infinity), the mean and standard deviation do not apply to the ratio values but to the log(ratio) values. So to obtain a certain mean and SD on the ratio scale, enter the log(Mean) and log(SD) in the mean_sd argument. This transformation is necessary because means and standard deviations are meaningful only on an interval scale. Options: mean_sd = None => The column means and standard deviations are left unchanged. = ['All',[Mean,SD]] => Make the array as a whole have a specified mean and standard deviation. Column means/SDs will vary. = ['Cols',[Mean,SD]] => Make each column have the specified mean and standard deviation. = ['All','Refer2VC'] or ['Cols','Refer2VC'] => No Means or SDs are specified. Use the validchars specification to decide what to do. If validchars = 'All' for the whole array or a column, the metric is left unchanged. = ['Cols',{1:[Mean1,SD1],2:[Mean2,SD2],3:'Refer2VC',...}] => Make Column1 have Mean1 and SD1. Make Column2 have Mean2 and SD2. For Column 3, do not make it have any mean or standard deviation, presumably because it is not in an interval or ratio metric. Instead, specify the placeholder 'Refer2VC', which means look up the minimum and maximum values in validchars and use those to specify the range of the scale. If 'Refer2VC' is specified for a column that has not been assigned a min or max in validchars, a mean of 0 and SD of 1 will automatically be assigned. --------------- "p_nan" is the proportion of cells to make missing at random. It actually isn't a percent: p_nan = 0.10 => make 0.10 or 10% of cells randomly missing. --------------- "nanval" is the Not-a-Number Value used to indicate a missing cells. It has to be of the same type as the rest of the array. Default is -999.0 . --------------- "condcoord" provides the option of making the row coordinates orthonormal, or not. Options are: condcoord = <None, 'Orthonormal'> For 'Orthonormal', the matrix procedure is numpy's QR decomposition, where matrix A = QR, Q is the orthonormal transformation of A, and R is an upper-diagonal matrix that performs the transformation. Q is equivalent to a "modified Gram-Schmidt" orthogonalization of A. --------------- "nheaders4rows" is the number of header labels to insert to the left of the data to label rows. Default is 1. --------------- "nheaders4cols" is the number of header labels to insert to the above the data to label columns. Default is 1. --------------- "extra_headers", short for "extra header integer range", is a way of inserting a specified number of integer values as labels in the row and column headers, but it only applies to those rows or columns of the header labels that are in excess of the leading header containing the unique ID, and does not apply when nheaders4rows or nheaders4rows = 1. This argument is used to test Damon functions that call on row or column attributes. The argument can be used to assign headers randomly or in specified blocks. extra_headers = int => tells the function to create and assign headers at random. extra_headers = 0, 1, -1 => The 0, 1, and -1 specifications all result in only one integer value (0) as an attribute, so there's really no point in using them. extra_headers = 2 => create extra header attributes consisting of integers 0 and 1 extra_headers = 3 => create extra header attributes consisting of integers 0, 1, 2. extra_headers = -3 => create extra header attributes consisting of 0, -1, -2. extra_headers = {'header':proportion, ...} => tells the function to create and assign headers in blocks. extra_headers = {'0':0.25, '1':0.75} => Say there is an extra header row for columns and that there are 100 columns, i.e., nfac1 = 100. This says assign '0' to the first 25 columns and '1' to the remaining 75 columns. Make sure the proportions add to 1.0 and that they break the columns cleanly into sections. The same arrangement of '0's and '1's will be applied to the row headers if nheaders4rows > 1. --------------- "input_array" makes it possible to import a data array or a set of row and column coordinates generated outside the function, and use those to create the model outputs. The noise, validchars, and other parameters are applied to the resulting model values to create a data array. This makes it possible to experiment with arrays built with nonlinear functions, to set the size of each individual row and column coordinate (to model a range of person abilities and item difficulties, for instance), and to experiment with setting some coordinates to zero to model situations where entities do not participate in a common D-dimensional space. Options: input_array = None => Do not input (import) an array or set of coordinates. input_array = MyArray => Input the MyArray numpy array as the core "model" data. Do not include row or column labels. input_array = {'fac0coord':MyRowCoords,'fac1coord':MyColCoords} => MyRowCoords and MyColCoords are two nEntities x nDimensions numpy arrays. Their dot product becomes the "model" data. Do not include row or column labels. NOTE: When input_array is used, it automatically overwrites the nfac0, nfac1, and ndim parameters. input_array does not support pytables, so output_as = 'hd5' becomes output_as = 'Damon'. --------------- "apply_zeros" is used to simulate data with dimensionally distinct subspaces. A given group of items is said to be "in the same space" if they have non-zero values on the same dimensions and zero values for all other dimensions. Two subspaces differ if, for one or more dimensions, one of the subspaces has non-zero values while the other has zeros. Damon's matrix decomposition depends strongly on all items sharing a common space. If they don't, the individual subspaces need to be analyzed separately and pull information from other subspaces using a different procedure. This is done using the sub_coord() method. The format is: apply_zeros = [row, {'sub1':[zeros, ones], ...}] apply_zeros = None => Do not apply zeros to coordinates. All items resided in a common space. apply_zeros = [1, {'0':[0,1,1], '1':[1,0,1]}] => Row 1 (counting from 0) contains subscale labels '0', '1'. This is controlled using the nheaders4cols and extra_headers args. For each '0' item, zero out the first dimension and keep the remaining dimensions as they are. For each '1' item, zero out the second dimension and keep the remaining dimensions as they are. The number of zeros and ones must equal the number of dimensions specified in ndim. --------------- "output_as" specifies whether to output created data as a Damon object, array, or file. Options: 'array' => Output as a numpy array (includes labels). 'Damon' => Output created data as a fully formatted data object, equivalent to Damon(). 'datadict' => Output created data as a "data dictionary" but not instantiated as an object. 'dataframe' => Output data as a Pandas dataframe 'textfile' => Output as a text file. 'array_textfile' => Output as an array and a text file. 'Damon_textfile' => Output as both a DamonObj and a text file. 'datadict_textfile' => Output as both a DamonObj and a text file. [WARNING: the 'hd5' option has been deprecated.] 'hd5' => Output using pytables in Hierarchical data format_, suitable for large arrays that may not fit in memory. If this option is used, the file name given in outfile must have a .hd5 extension. In addition to outputting a .hd5 file, a datadict is returned. WARNING: When 'hd5' is specified, not all create_data() functionality is preserved. The following simplifications are imposed: * The condcoord arg is ignored. * The noise arg for specified rows is ignored. ------------------ "outfile" is the name of the output file or path (if saving to a directory that is not the current working directory), if 'textfile' is specified in output_as. Options: None => output_as does not include 'textfile'. 'MyFileName.txt', 'MyDocs/MyFileName.txt' => Results are output to a file with the specified name or path. 'MyFile.hd5' => Results are output as a pytables 'hd5' file (output_as = 'hd5'). --------------- "delimiter" is the character used to delimit columns when a file is created. When comma (',') is used, the file name should have a .csv extension. When tab ('\t') is used, the file name should have a .txt extension. Use None when no file is specified. NOTE: Tab delimiters are safer as otherwise the validchars column in the answer key may accidentally be parsed. --------------- "bankf0", when not equal to None, automatically creates a pickle file called 'bankf0' to store the coordinates of specified facet 0 (row) entities: bankf0 = None => do not create a facet 0 bank. bankf0 = ['All'] => create a pickle file called 'bankf0' and store all the row coordinates in it, assigned to the entity ID. bankf0 = [1,3,5] => create a pickle file called 'bankf0' and store the row coordinates for entities 1, 3, and 5, assigned to their entity ID's. --------------- "bankf1", when not equal to None, automatically creates a pickle file called 'bankf1' to store the coordinates of specified facet 1 (column) entities: bankf1 = None => do not create a facet 1 bank. bankf1 = ['All'] => create a pickle file called 'bankf1' and store all the column coordinates in it, assigned to the entity ID. bankf1 = [10,13,15] => create a pickle file called 'bankf1' and store the column coordinates for entities 10, 13, and 15, assigned to their entity ID's. --------------- "verbose" <None, True> tells create_data() to print out useful information and messages. It also passes the verbose parameter to downstream DamonObj's. Examples -------- Paste function -------------- create_data(nfac0, # [Number of facet 0 elements -- rows/persons] nfac1, # [Number of facet 1 elements -- columns/items] ndim, # [Number of dimensions to create] seed = None, # [<None => randomly pick starter coordinates; int => integer of "seed" random coordinates>] facmetric = [4,-2], # [[m,b] => rand() * m + b, to set range of facet coordinate values] noise = None, # [<None, noise, {'Rows':<noise,{1:noise1,4:noise4}>,'Cols':<noise,{2:noise2,5:noise5}> => add error to rows/cols] validchars = None, # [<None, ['All',[valid chars]]; or ['Cols', {1:['a','b','c','d'],2:['All'],3:['1.2 -- 3.5'],4:['0 -- '],...}]> ] mean_sd = None, # [<None, ['All',[Mean,SD]], or ['Cols', {1:[Mean1,SD1],2:[Mean2,SD2],3:'Refer2VC',...}]> ] p_nan = 0.0, # [Proportion of cells to make missing at random] nanval = -999., # [Numeric code for designating missing values] condcoord = None, # [< None, 'Orthonormal'>] nheaders4rows = 1, # [Number of header column labels to put before each row] nheaders4cols = 1, # [Number of header row labels to put before each column] extra_headers = 0, # [<0, int, {'0':0.25, '1':0.75}> => If headers > 1, range of ints for labels, randomly assigned or in blocks] input_array = None, # [<None, name of data array, {'fac0coord':EntxDim row coords,'fac1coord':EntxDim col coords}>] apply_zeros = None, # [<None, [row, {'sub1':[0,1,1],...}> => for each item group in row, where to apply zeros to coords] output_as = 'Damon', # [<'Damon','datadict','array','textfile','dataframe','Damon_textfile','datadict_textfile','array_textfile'>] outfile = None, # # [<None, 'my_data.csv'> => name of the output file/path when output_as includes 'textfile'>] delimiter = None, # [<None, delimiter character used to separate fields of output file, e.g., ',' or '\t'>] bankf0 = None, # [<None => no bank,[<'All', list of F0 (Row) entities>]> ] bankf1 = None, # [<None => no bank,[<'All', list of F1 (Col) entities>]> ] verbose = True, # [<None, True> => print useful information and messages] ) """ # For backward compatibility if condcoord_ is not None: condcoord = condcoord_ if verbose is True: print 'create_data() is working...\n' # Run utility create_data_out = dmn.utils._create_data(locals()) if verbose is True: print '\ncreate_data() is done.' print 'Contains:\n',create_data_out.keys(),'\n' return create_data_out
d21b3a9c3172087021ba92caa74b4be95cac993c
28,205
def load_database() -> pd.DataFrame: """ Loads data from hplib_database. Returns ------- df : pd.DataFrame Content of the database """ df = pd.read_csv(cwd()+r'/hplib_database.csv') return df
5bc5ec4e8376493a9d7da9a8782c2e0f4fb8223d
28,206
import re def ensure_windows_file_path_format_encoding_as_url(path: str) -> str: """Relevant for Windows where a file path name should be "file://path/to/file.html" instead of "file://path\\to\\file.html" .""" output = path.replace("\\", "/") # Raw replace backslash with slash. # Handle exception for "file:///C:/path/to/file.html" declaration in URLs: if re.match(r"^file:/+[A-Za-z]:", output, re.IGNORECASE): output = re.sub(r"^file:/+", "file:///", output, re.IGNORECASE) return encode_path_as_url(output)
84e429c5d8e4b0fc2fd8976b140694f0f7c1c04e
28,207
def index(): """ index page for WebUI Returns: rendered HTML page """ data = load_messages().message_contents return render_template("index.html", data=data, encoded_data=data)
c92515fc49aacded4e9242955f6b2d5ea125897a
28,209
def psnr(x, pred_x, max_val=255): """ PSNR """ val = tf.reduce_mean(tf.image.psnr(x, pred_x, max_val=max_val)) return val
7b5d44917c88d7644e5c1694a6e6d5873c0db952
28,210
def across_series_nearest_neighbors(Ts, Ts_idx, subseq_idx, m): """ For multiple time series find, per individual time series, the subsequences closest to a query. Parameters ---------- Ts : list A list of time series for which to find the nearest neighbor subsequences that are closest to the query subsequence `Ts[Ts_idx][subseq_idx : subseq_idx + m]` Ts_idx : int The index of time series in `Ts` which contains the query subsequence `Ts[Ts_idx][subseq_idx : subseq_idx + m]` subseq_idx : int The subsequence index in the time series `Ts[Ts_idx]` that contains the query subsequence `Ts[Ts_idx][subseq_idx : subseq_idx + m]` m : int Subsequence window size Returns ------- nns_radii : ndarray Nearest neighbor radii to subsequences in `Ts` that are closest to the query `Ts[Ts_idx][subseq_idx : subseq_idx + m]` nns_subseq_idx : ndarray Nearest neighbor indices to subsequences in `Ts` that are closest to the query `Ts[Ts_idx][subseq_idx : subseq_idx + m]` """ k = len(Ts) Q = Ts[Ts_idx][subseq_idx : subseq_idx + m] nns_radii = np.zeros(k, dtype=np.float64) nns_subseq_idx = np.zeros(k, dtype=np.int64) for i in range(k): dist_profile = distance_profile(Q, Ts[i], len(Q)) nns_subseq_idx[i] = np.argmin(dist_profile) nns_radii[i] = dist_profile[nns_subseq_idx[i]] return nns_radii, nns_subseq_idx
4cf3f8162b33f3b313f07160bab7e8042ce08f2b
28,211
def get_boundary_from_response(response): """ Parses the response header and returns the boundary. :param response: response containing the header that contains the boundary :return: a binary string of the boundary """ # Read only the first value with key 'content-type' (duplicate keys are allowed) content = response.headers.pop('content-type')[0] # Find the start and end index of the boundary b_start = content.find(b'boundary=') b_end = content[b_start:].find(b';') # Separate out boundary if b_end == -1: # If the end point is not found, just go to the end of the content string boundary = content[b_start+9:] else: boundary = content[b_start+9:b_start+b_end] return boundary
66a0112598b2210cca1a2210f6af963dfee641f7
28,212
import logging import json def get_message(message): """{ 'pattern': None, 'type': 'subscribe', 'channel': 'my-second-channel', 'data': 1L, }""" if not message: return logging.info('MSG: %s', message) data = message.get('data', {}) return json.loads(data)
2e79ed94fbfc3fba122e8bd8663e33b124d4d2b6
28,213
def parent_node(selector): """ Finds the parent_node of the given selector. """ if not get_instance(): raise Exception("You need to start a browser first with open_browser()") return parent_node_g(get_instance(), selector)
0d6136aa5262a4b4482166108715b3323328983b
28,214
def getPairCategory(rollSorted): """ Converts a roll's ordered list of frequencies to the pairwise hand category. """ if rollSorted[0] == 6: return "six-of-a-kind" elif rollSorted[0] == 5: return "five-of-a-kind" elif rollSorted[0] == 4 and rollSorted[1] == 2: return "four-two full house" elif rollSorted[0] == 4: return "four-of-a-kind" elif rollSorted[0] == 3 and rollSorted[1] == 3: return "double threes-of-a-kind" elif rollSorted[0] == 3 and rollSorted[1] == 2: return "three-two full house" elif rollSorted[0] == 3: return "three-of-a-kind" elif rollSorted[0] == 2 and rollSorted[1] == 2 \ and rollSorted[2] == 2: return "three pairs" elif rollSorted[0] == 2 and rollSorted[1] == 2: return "two pairs" elif rollSorted[0] == 2: return "one pair" else: return "high card"
1c48abd8d0c1a27a50ce587857852a95e8949e74
28,215
def allocz(size): """Alloc zeros with range""" return [0 for _ in range(size)]
21670a20ea045ee7f2cf0780a011f89f917b7180
28,216
def uint_to_little_endian_bytearray(number, size): """Converts an unsigned interger to a little endian bytearray. Arguments: number -- the number to convert size -- the length of the target bytearray """ if number > (2 ** (8 * size) - 1): raise ValueError("Integer overflow") nle = [0] * size for i in range(size): nle[i] = number >> i*8 & 0xFF return nle
bd3314fedf0accbc0d15b1bb146f54f52cb3bce1
28,217
import re def to_alu_hlu_map(input_str): """Converter for alu hlu map Convert following input into a alu -> hlu map: Sample input: ``` HLU Number ALU Number ---------- ---------- 0 12 1 23 ``` ALU stands for array LUN number hlu stands for host LUN number :param input_str: raw input from naviseccli :return: alu -> hlu map """ ret = {} if input_str is not None: pattern = re.compile(r'(\d+)\s*(\d+)') for line in input_str.split('\n'): line = line.strip() if len(line) == 0: continue matched = re.search(pattern, line) if matched is None or len(matched.groups()) < 2: continue else: hlu = matched.group(1) alu = matched.group(2) ret[int(alu)] = int(hlu) return ret
8e211b7efa3f8dd23c042f046d881daf987062bc
28,218
import copy def randomize(jet): """build a random tree""" jet = copy.deepcopy(jet) leaves = np.where(jet["tree"][:, 0] == -1)[0] nodes = [n for n in leaves] content = [jet["content"][n] for n in nodes] nodes = range(len(nodes)) tree = [[-1, -1] for n in nodes] pool = [n for n in nodes] next_id = len(nodes) while len(pool) >= 2: i = np.random.randint(len(pool)) left = pool[i] del pool[i] j = np.random.randint(len(pool)) right = pool[j] del pool[j] nodes.append(next_id) c = (content[left] + content[right]) # if len(c) == 5: # c[-1] = -1 content.append(c) tree.append([left, right]) pool.append(next_id) next_id += 1 jet["content"] = np.array(content) jet["tree"] = np.array(tree).astype(int) jet["root_id"] = len(jet["tree"]) - 1 return jet
d4e8d12f8701d140e965e773e9a2542b133d8535
28,220
from typing import Optional def get_vt_retrohunt_files(vt_key: str, r_id: str, limit: Optional[int] = 100): """Retrieve file objects related to a retrohunt from VT.""" url = f"https://www.virustotal.com/api/v3/intelligence/retrohunt_jobs/{r_id}/matching_files?limit={limit}" data = vt_request(api_key=vt_key, url=url) if "error" in data: print_err(f"[ERR] Error occured during receiving notifications: {data['error']}") return [] return data["data"]
f29cdd1db8fc1b0559a49422df24a16e4708b493
28,222
import math def proj_make_3dinput_v2(project, angle = 15, start_slice = [0,0,0], crop_slice = [0.75, 0.625, 0.625]): """ This function unprojects 2d data into 3d voxel at different angles/views and do crop. :param project: 2d image input :param angle: the angle of different view. set max(1) as 0. :param start_slice: start slice of three dimension :param crop_slice: crop ratio of three dimension :return pred_proj: 3d output """ angle1 = angle h = project.shape[0] w = project.shape[1] l = project.shape[1] if angle <= 45: label = project l1 = round((1.0/math.tan(math.radians(angle)))*l) L = round((w**2+l1**2)**0.5*angle/45) p = round((L-l)/2) project = np.pad(project,((0,0),(p,p)),'constant', constant_values=(0,0)) pred_proj = cv2.resize(project,(l1+w-1, h)) # crop s1 = round(start_slice[1]*w) s2 = round(start_slice[2]*l1) pred_proj = pred_proj[round(start_slice[0]*h):round((start_slice[0]+crop_slice[0])*h),:] input3d = np.zeros((round(crop_slice[0]*h), round(crop_slice[1]*w), round(crop_slice[2]*l1)), dtype=np.float32) for i in range(round(crop_slice[1]*w+crop_slice[2]*l1-1)): relen = input3d.diagonal(round(i-crop_slice[1]*w+1),1,2).shape[1] row, col = np.diag_indices(relen) if i < (input3d.shape[1]-1): input3d[:,row-(i-input3d.shape[1]+1), col] = np.expand_dims(pred_proj[:,i+w-s1+s2-input3d.shape[1]],1).repeat(relen, axis=1) elif i >= (input3d.shape[1]-1): input3d[:,row, col+(i-input3d.shape[1]+1)] = np.expand_dims(pred_proj[:,i+w-s1+s2-input3d.shape[1]],1).repeat(relen, axis=1) input3d_itk = sitk.GetImageFromArray(input3d) input3d_itk = resize_image_itk(input3d_itk, (round(crop_slice[2]*l), round(crop_slice[1]*w), round(crop_slice[0]*h)),resamplemethod=sitk.sitkLinear) input3d = sitk.GetArrayFromImage(input3d_itk) elif (angle > 45) & (angle < 90): label = project angle = 90-angle w1 = round((1.0/math.tan(math.radians(angle)))*l) L = round((w1**2+l**2)**0.5*angle/45) p = round((L-l)/2) project = np.pad(project,((0,0),(p,p)),'constant', constant_values=(0,0)) pred_proj = cv2.resize(project,(l+w1-1, h)) # crop s1 = round(start_slice[1]*w1) s2 = round(start_slice[2]*l) pred_proj = pred_proj[round(start_slice[0]*h):round((start_slice[0]+crop_slice[0])*h),:] input3d = np.zeros((round(crop_slice[0]*h), round(crop_slice[1]*w1), round(crop_slice[2]*l)), dtype=np.float32) for i in range(round(crop_slice[1]*w1+crop_slice[2]*l-1)): relen = input3d.diagonal(round(i-crop_slice[1]*w1+1),1,2).shape[1] row, col = np.diag_indices(relen) if i < (input3d.shape[1]-1): input3d[:,row-(i-input3d.shape[1]+1), col] = np.expand_dims(pred_proj[:,i+w1-s1+s2-input3d.shape[1]],1).repeat(relen, axis=1) elif i >= (input3d.shape[1]-1): input3d[:,row, col+(i-input3d.shape[1]+1)] = np.expand_dims(pred_proj[:,i+w1-s1+s2-input3d.shape[1]],1).repeat(relen, axis=1) input3d_itk = sitk.GetImageFromArray(input3d) input3d_itk = resize_image_itk(input3d_itk, (round(crop_slice[2]*l), round(crop_slice[1]*w), round(crop_slice[0]*h)),resamplemethod=sitk.sitkLinear) input3d = sitk.GetArrayFromImage(input3d_itk) elif angle == 90: label = project project = np.flip(project, 1) pred_proj = project[round(start_slice[0]*h):round((start_slice[0]+crop_slice[0])*h), round(start_slice[1]*w):round((start_slice[1]+crop_slice[1])*w)] input3d = np.expand_dims(pred_proj, 2).repeat(pred_proj.shape[1], axis=2) elif (angle > 90) & (angle <= 135): label = project angle = angle - 90 project = np.flip(project, 1) w1 = round((1.0/math.tan(math.radians(angle)))*l) L = round((w1**2+l**2)**0.5*angle/45) p = round((L-l)/2) project = np.pad(project,((0,0),(p,p)),'constant', constant_values=(0,0)) pred_proj = cv2.resize(project,(l+w1-1, h)) # crop start_slice[1] = 1 - (start_slice[1] + crop_slice[1]) s1 = round(start_slice[1]*w1) s2 = round(start_slice[2]*l) pred_proj = pred_proj[round(start_slice[0]*h):round((start_slice[0]+crop_slice[0])*h),:] input3d = np.zeros((round(crop_slice[0]*h), round(crop_slice[1]*w1), round(crop_slice[2]*l)), dtype=np.float32) for i in range(round(crop_slice[1]*w1+crop_slice[2]*l-1)): relen = input3d.diagonal(round(i-crop_slice[1]*w1+1),1,2).shape[1] row, col = np.diag_indices(relen) if i < (input3d.shape[1]-1): input3d[:,row-(i-input3d.shape[1]+1), col] = np.expand_dims(pred_proj[:,i+w1-s1+s2-input3d.shape[1]],1).repeat(relen, axis=1) elif i >= (input3d.shape[1]-1): input3d[:,row, col+(i-input3d.shape[1]+1)] = np.expand_dims(pred_proj[:,i+w1-s1+s2-input3d.shape[1]],1).repeat(relen, axis=1) input3d = np.flip(input3d, 1) input3d_itk = sitk.GetImageFromArray(input3d) input3d_itk = resize_image_itk(input3d_itk, (round(crop_slice[2]*l), round(crop_slice[1]*w), round(crop_slice[0]*h)),resamplemethod=sitk.sitkLinear) input3d = sitk.GetArrayFromImage(input3d_itk) start_slice[1] = 1 - (start_slice[1] + crop_slice[1]) elif (angle > 135) & (angle < 180): label = project angle = 180 - angle project = np.flip(project, 1) l1 = round((1.0/math.tan(math.radians(angle)))*l) L = round((w**2+l1**2)**0.5*angle/45) p = round((L-l)/2) project = np.pad(project,((0,0),(p,p)),'constant', constant_values=(0,0)) pred_proj = cv2.resize(project,(l1+w-1, h)) # crop start_slice[1] = 1 - (start_slice[1] + crop_slice[1]) s1 = round(start_slice[1]*w) s2 = round(start_slice[2]*l1) pred_proj = pred_proj[round(start_slice[0]*h):round((start_slice[0]+crop_slice[0])*h),:] input3d = np.zeros((round(crop_slice[0]*h), round(crop_slice[1]*w), round(crop_slice[2]*l1)), dtype=np.float32) for i in range(round(crop_slice[1]*w+crop_slice[2]*l1-1)): relen = input3d.diagonal(round(i-crop_slice[1]*w+1),1,2).shape[1] row, col = np.diag_indices(relen) if i < (input3d.shape[1]-1): input3d[:,row-(i-input3d.shape[1]+1), col] = np.expand_dims(pred_proj[:,i+w-s1+s2-input3d.shape[1]],1).repeat(relen, axis=1) elif i >= (input3d.shape[1]-1): input3d[:,row, col+(i-input3d.shape[1]+1)] = np.expand_dims(pred_proj[:,i+w-s1+s2-input3d.shape[1]],1).repeat(relen, axis=1) input3d = np.flip(input3d, 1) input3d_itk = sitk.GetImageFromArray(input3d) input3d_itk = resize_image_itk(input3d_itk, (round(crop_slice[2]*l), round(crop_slice[1]*w), round(crop_slice[0]*h)),resamplemethod=sitk.sitkLinear) input3d = sitk.GetArrayFromImage(input3d_itk) start_slice[1] = 1 - (start_slice[1] + crop_slice[1]) elif angle == 180: label = project project = np.flip(project, 1) pred_proj = project[round(start_slice[0]*h):round((start_slice[0]+crop_slice[0])*h), round(start_slice[2]*l):round((start_slice[2]+crop_slice[2])*l)] input3d = np.expand_dims(pred_proj, 1).repeat(pred_proj.shape[1], axis=1) return input3d
3867cd03cc9c7833a29c9f5d4d078b5caabe2b73
28,223
from ch07.digraph_search import topological_sort as ts def topological_sort(digraph): """Link in with Topological sort.""" return ts(digraph)
bb16ca7c44adf37893d47cfacd7d81ae0d646af6
28,224
def merge_scores(scores_test, scores_val): """ Aggregate scores """ scores_valtest = {} for key in scores_test: key_valtest = "final/" + key.split("/")[1] if key.startswith("test/"): keyval = "val/" + key.split("/")[1] value = 0.5 * (scores_test[key]["value"] + scores_val[keyval]["value"]) if scores_test[key]["string"].endswith("%"): value_str = f"{value:05.2%}" else: value_str = f"{value:.6}" stats = {"value": value, "string": value_str} scores_valtest[key_valtest] = stats else: scores_valtest[key_valtest] = scores_test[key] return scores_valtest
be0dded69367e7554c0cc2632946d46954a3cc15
28,225
def validateDate(data, name, published): """ Verify that a date string is valid. """ # Verify that if published exists it's a valid date date = parse_datetime(published) if not date: raise InvalidField(name, published) return published
d8a46a491f54cc571f2560a816d76c93a8af79cf
28,227
def join( group_id: int, db: Session = Depends(get_db), user: UserModel = Depends(get_active_user), ): """Join the group.""" return service.join(db, group_id=group_id, user_id=user.id)
cc3d329b36d04047030e8af4e2c0d0511605aa49
28,229
def calc_correlation(cs, lab): """ calc the spearman's correlation :param cs: :param lab: :return: """ rho, pval = spearmanr(cs, lab) return rho
a1768394ab94d8833cbbbd6eb32d927285bac4b8
28,230
def normalize(df): """Pandas df normalisation Parameters: df (pd df) : input df Returns: result (pd df) : output df """ result = df.copy() for feature_name in df.columns: max_value = df[feature_name].max() min_value = df[feature_name].min() result[feature_name] = 2 * (df[feature_name] - min_value) / (max_value - min_value) - 1 result = result.fillna(0) return result
2fc05fc9ef7642ac4b84cb6ed567ec64c1da0836
28,231
def cbv_decorator(decorator): """ Turns a normal view decorator into a class-based-view decorator. Usage: @cbv_decorator(login_required) class MyClassBasedView(View): pass """ def _decorator(cls): cls.dispatch = method_decorator(decorator)(cls.dispatch) return cls return _decorator
9811cc05bcf31cb5145cc0a4f089e40433446023
28,232
def _shift_anchors(anchors, direction): """Shift anchors to the specified direction """ new_anchors = deepcopy(anchors) if direction == 'center': pass elif direction == 'top': heights = new_anchors[:,3] - new_anchors[:,1] + 1 heights = heights[:,np.newaxis] new_anchors[:,[1,3]] = new_anchors[:,[1,3]] - heights/2 elif direction == 'bottom': heights = new_anchors[:,3] - new_anchors[:,1] + 1 heights = heights[:,np.newaxis] new_anchors[:,[1,3]] = new_anchors[:,[1,3]] + heights/2 elif direction == 'right': widths = new_anchors[:,2] - new_anchors[:,0] + 1 widths = widths[:,np.newaxis] new_anchors[:,[0,2]] = new_anchors[:,[0,2]] + widths/2 elif direction == 'left': widths = new_anchors[:,2] - new_anchors[:,0] + 1 widths = widths[:,np.newaxis] new_anchors[:,[0,2]] = new_anchors[:,[0,2]] - widths/2 return new_anchors
fbf48649c846bbb7275cb2a773f637f9bc7023f3
28,233
import itertools def enumerate_all_features(dim: int, num_values: int) -> chex.Array: """Helper function to create all categorical features.""" features = jnp.array(list(itertools.product(range(num_values), repeat=dim))) chex.assert_shape(features, [num_values ** dim, dim]) return features.astype(jnp.int32)
049178afae402a3c73fa3a547afc8be1640efd62
28,234
def convert_pad_shape(pad_shape): """ Used to get arguments for F.pad """ l = pad_shape[::-1] pad_shape = [item for sublist in l for item in sublist] return pad_shape
39e77b3931f29f3ab95a75662187e09df545364f
28,235
def PotentialWalk(pos, tree, softening=0, no=-1, theta=0.7): """Returns the gravitational potential at position x by performing the Barnes-Hut treewalk using the provided octree instance Arguments: pos - (3,) array containing position of interest tree - octree object storing the tree structure Keyword arguments: softening - softening radius of the particle at which the force is being evaluated - we use the greater of the target and source softenings when evaluating the softened potential no - index of the top-level node whose field is being summed - defaults to the global top-level node, can use a subnode in principle for e.g. parallelization theta - cell opening angle used to control force accuracy; smaller is slower (runtime ~ theta^-3) but more accurate. (default 0.7 gives ~1% accuracy) """ if no < 0: no = tree.NumParticles # we default to the top-level node index phi = 0 dx = np.empty(3,dtype=np.float64) while no > -1: r = 0 for k in range(3): dx[k] = tree.Coordinates[no,k] - pos[k] r += dx[k]*dx[k] r = sqrt(r) h = max(tree.Softenings[no],softening) if no < tree.NumParticles: # if we're looking at a leaf/particle if r>0: # by default we neglect the self-potential if r < h: phi += tree.Masses[no] * PotentialKernel(r,h) else: phi -= tree.Masses[no] / r no = tree.NextBranch[no] elif r > max(tree.Sizes[no]/theta + tree.Deltas[no], h+tree.Sizes[no]*0.6+tree.Deltas[no]): # if we satisfy the criteria for accepting the monopole phi -= tree.Masses[no]/r if tree.HasQuads: phi -= 0.5 * np.dot(np.dot(dx,tree.Quadrupoles[no]),dx)/(r*r*r*r*r) # Potential from the quadrupole moment no = tree.NextBranch[no] else: # open the node no = tree.FirstSubnode[no] return phi
6b03d5075df752bd5c7986da76ce8fc7c28ce10c
28,236
import re def GetProjectUserEmail(git_repo): """Get the email configured for the project.""" output = RunGit(git_repo, ['var', 'GIT_COMMITTER_IDENT']).output m = re.search(r'<([^>]*)>', output.strip()) return m.group(1) if m else None
2749a7797b4ef9c2d7532f31e8b8594f0cc9c174
28,237
from typing import Tuple def decode_features(features_ext, resource_attr_range: Tuple[int, int]): """ Given matrix of features from extended configs, corresponding to `ExtendedConfiguration`, split into feature matrix from normal configs and resource values. :param features_ext: Matrix of features from extended configs :param resource_attr_range: (r_min, r_max) :return: (features, resources) """ r_min, r_max = resource_attr_range features = features_ext[:, :-1] resources_encoded = _flatvec(features_ext[:, -1]) lower = r_min - 0.5 + EPS width = r_max - r_min + 1 - 2 * EPS resources = anp.clip(anp.round(resources_encoded * width + lower), r_min, r_max) return features, [int(r) for r in resources]
ffb709a8ef7f7da91a7b4b99c80b2f32ff66b66e
28,238
def get_event(): """ Get the event information of the group :param room_id: the room_id of the group """ incoming = request.get_json() event = Event.get_event_with_room_id(incoming['room_id']) if event: results = {'event_name': event.name, 'location': event.location, 'start_time': event.start_time, 'end_time': event.end_time, 'description': event.description} else: results = {'event_name': "", 'location': "", 'start_time': "", 'end_time': "", 'description': ""} return jsonify(results = results)
d6f9c581b563d3231ef9d40de557eee323025e4b
28,239
def test(model,X, y): """ Predicts given X dataset with given model Returns mse score between predicted output and ground truth output Parameters ---------- X: Test dataset with k features y: Ground truth of X with k features """ return mse(model.predict(X).flatten(),y.flatten())
5e27125def948478b98eac997009de29a44a64a9
28,241
def solver(objectives): """Returns a solver for the given objective(s). Either a single objective or a list of objectives can be provided. The result is either an IKSolver or a GeneralizedIKSolver corresponding to the given objective(s). (see klampt.robotsim.IKSolver and klampt.robotsim.GeneralizedIKSolver). In rare cases, it may return a list of IKSolver's if you give it objectives on different robots. They should be solved independently for efficiency. (The objectives should be a result from the :func:`objective` function. Beware that if you are making your own goals by calling the IKObjective constructors from the robotsim module, the .robot member of these goals must be set). """ if hasattr(objectives,'__iter__'): generalized = [] robs = dict() for obj in objectives: if isinstance(obj,IKObjective): robs.getdefault(obj.robot,[]).append(obj) elif isinstance(obj,GeneralizedIKObjective): generalized.append(obj) else: raise TypeError("Objective is of wrong type") if len(generalized) != 0: #need a generalized solver world = None if generalized[0].isObj1: world = WorldModel(generalized[0].obj1.world) else: world = WorldModel(generalized[0].link1.world) s = GeneralizedIKSolver(world) for obj in generalized: s.add(obj) for (r,objs) in robs.iteritems(): for obj in objs: s.add(GeneralizedIKObjective(r,obj)) return s else: res = [] for (r,objs) in robs: s = IKSolver(r) for obj in objs: s.add(obj) res.append(s) if len(res)==1: return res[0] return res else: if isinstance(objectives,IKObjective): s = IKSolver(objectives.robot) s.add(objectives) return s elif isinstance(objectives,GeneralizedIKObjective): world = None if objectives.isObj1: world = WorldModel(objectives.obj1.world) else: world = WorldModel(objectives.link1.world) s = GeneralizedIKSolver(world) s.add(objectives) return s else: raise TypeError("Objective is of wrong type")
685bed37629c57a7041f3cdcee0d98c3c93b1d77
28,242
def model_fn_builder(config: NeatConfig): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) is_training = (mode == tf.estimator.ModeKeys.TRAIN) batch_size = get_shape_list(features['actions/action_id'], expected_rank=1)[0] hidden_size = config.model['hidden_size'] # activation_fn = tf.nn.tanh if config.model.get('activation', 'tanh') == 'tanh' else tf.identity scp_model = StateChangePredictModel(config, is_training=is_training, object_types=features['objects/object_types'], ) encoded_h = scp_model.encode_affordances(features['objects/object_states']) encoded_h_pre = tf.gather(encoded_h, [0, 2], axis=1) encoded_h_post_gt = tf.gather(encoded_h, [1, 3], axis=1) action_embed = scp_model.encode_action(features['actions/action_id'], action_args=features['actions/action_args']) encoded_h_post_pred = scp_model.apply_action_mlp(action_embed, encoded_h_pre) ############################################################# # Now construct a decoder # [batch_size, 3, #objs, hidden_size] -> [batch_size, 3 * objs, hidden_size] all_encoded_h = tf.concat([ encoded_h_pre, # [0, 2] encoded_h_post_gt, # [1, 3] encoded_h_post_pred, # [1, 3] ], 1) gt_affordances_decoder = tf.gather(features['objects/object_states'], [0, 2, 1, 3, 1, 3], axis=1) isvalid_by_type = tf.cast(tf.gather(features['objects/is_valid'], [0, 2, 1, 3, 1, 3], axis=1), dtype=tf.float32) if mode == tf.estimator.ModeKeys.PREDICT: predictions = scp_model.sample(all_encoded_h) predictions.update(**features) return tf.contrib.tpu.TPUEstimatorSpec(mode=tf.estimator.ModeKeys.PREDICT, predictions=predictions) affordance_pred_by_type = scp_model.decode_affordances_when_gt_is_provided(all_encoded_h, gt_affordances_decoder) ###################### # For losses # action_logits = action_result['action_logits'] ############################################ # if params.get('demomode', False): # action_logits['affordances_pred'] = affordance_pred_by_type[:, 4:] # for k in action_logits: # action_logits[k] = tf.nn.softmax(action_logits[k], axis=-1) # return action_logits losses, norms = scp_model.compute_losses( object_states=features['objects/object_states'], isvalid_by_type_o1o2=isvalid_by_type[:, :2], encoded_h_pre=encoded_h_pre, encoded_h_post_gt=encoded_h_post_gt, encoded_h_post_pred=encoded_h_post_pred, affordance_pred_by_type=affordance_pred_by_type, gt_affordances_decoder=gt_affordances_decoder, isvalid_by_type=isvalid_by_type) # losses['action_success'] = sequence_xe_loss(action_logits['action_success'], features['actions/action_success']) loss = tf.add_n([x for x in losses.values()]) for k, v in norms.items(): losses[f'norms/{k}'] = v loss += 0.1 * norms['hidden_state_diff_l2'] loss += 0.1 * norms['hidden_state_diff_l1'] if is_training: tvars = [x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'global_step' not in x.name] else: tvars = tf.trainable_variables() # ckpt_to_assignment_map = {} # initialized_variable_names = {} # init_checkpoint = config.model.get('init_checkpoint', None) # if init_checkpoint: # regular_assignment_map, regular_initialized_variable_names = get_assignment_map_from_checkpoint( # tvars, init_checkpoint=init_checkpoint # ) # # # If you need to disable loading certain variables, comment something like this in # # regular_assignment_map = {k: v for k, v in regular_assignment_map.items() if # # all([x not in k for x in ('temporal_predict', # # 'roi_language_predict', # # 'roi_pool/pool_c5', # # 'aux_roi', # # 'second_fpn', # # 'img_mask', # # 'roi_pool/box_feats_proj/kernel')])} # # ckpt_to_assignment_map['regular'] = regular_assignment_map # initialized_variable_names.update(regular_initialized_variable_names) # # def scaffold_fn(): # """Loads pretrained model through scaffold function.""" # # ORDER BY PRIORITY # return tf.train.Scaffold() tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" # if var.name in initialized_variable_names: # init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) train_op, train_metrics = optimization.build_optimizer_from_config( loss=loss, optimizer_config=config.optimizer, device_config=config.device, ) train_metrics.update(losses) # for k, v in affordance_loss_metrics.items(): # train_metrics[f'affordance_metrics/{k}'] = v host_call = construct_host_call(scalars_to_log=train_metrics, model_dir=config.device['output_dir'], iterations_per_loop=config.device.get('iterations_per_loop', 1000)) return tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=loss, train_op=train_op, eval_metrics=None, # scaffold_fn=scaffold_fn, host_call=host_call) return model_fn
572ceb93e9907e1bb960e3c66f34c14e8bacce7c
28,243
def solve_case(rectangle, tree, io_obj): """ Main program to recursively fill a rectangle with pentominos """ return tuple(map(_solve_rect_with_x(rectangle)(tree)([ len(rectangle) // 2, len(rectangle) % 2, len(rectangle[0]) // 2, len(rectangle[0]) % 2])(io_obj), tuple(_get_x_center_pts(len(rectangle) // 2 + len(rectangle) % 2, len(rectangle[0]) // 2 + len(rectangle[0]) % 2))))
4afa8b6a1e723e5900d14ec252a131086b1b15f1
28,244
def keras_weights_to_caffemodel(keras_model): """ Only Implement the conv layer and fc layer :param keras_model: :return: """ net = caffe.Net() layers = keras_model.layers for layer in layers: if type(layer) == keras.layers.Convolution2D: w, b = layer.get_weights() w = convert_filter(w) param = caffe.Layer_param(layer.name, 'Convolution') net.add_layer_with_data(param, [w, b]) if type(layer) == keras.layers.Dense: w, b = layer.get_weights() w = convert_fc(w) param = caffe.Layer_param(layer.name, 'InnerProduct') net.add_layer_with_data(param, [w, b]) return net
088c5040e3c3cba962d4fee7caee74761f9dbb21
28,245
def get_db(): """ :return: """ db = getattr(g, "_database", None) if db is None: db = g._database = connect() db.row_factory = make_dicts return db
d462dd6d82ce1c5f13ab5acfc3f613c4d9bdba33
28,246
import scipy def interpolate(X, Y, Z, x_coords, y_coords, factor=4, method='cubic'): """ :return: Interpolated X, Y, Z coordinate tuples, given by a factor, and a method in `scipy.interpolate.griddata` """ X_f, Y_f, Z_f = X.flatten(), Y.flatten(), Z.flatten() x_inter = scale(x_coords, factor=factor).reshape(1, -1) y_inter = scale(y_coords, factor=factor).reshape(-1, 1) X_, Y_ = np.meshgrid(x_inter, y_inter) Z_ = scipy.interpolate.griddata((X_f, Y_f), Z_f, (x_inter, y_inter), method=method) return X_, Y_, Z_
6520fb7a9e1c7b25a3030d97ad9a43253fd7b976
28,247
def weighted_rmsd(x, y, weights=None, dim=None, apply_nan_mask=True): """ Compute weighted root-mean-square-deviation between two `xarray.DataArray` objects. Parameters ---------- x, y : `xarray.DataArray` objects xarray objects for which to compute `weighted_rmsd`. weights : array_like, optional weights to use. By default, weights=`None` dim : str or sequence of str, optional Dimension(s) over which to apply `weighted rmsd` By default weighted rmsd is applied over all dimensions. apply_nan_mask : bool, default: True Returns ------- weighted_root_mean_square deviation : float If `weights` is None, returns root mean square deviation using equal weights for all data points. """ if weights is None: warn('Computing root-mean-square-deviation using equal weights for all data points') weights, op_over_dims = _get_weights_and_dims( x, weights=weights, dim=dim, apply_nan_mask=apply_nan_mask ) # If the mask is applied in previous operation, # disable it for subseqent operations to speed up computation if apply_nan_mask: apply_nan_mask_flag = False else: apply_nan_mask_flag = True dev = (x - y) ** 2 dev_mean = weighted_mean( dev, weights=weights, dim=op_over_dims, apply_nan_mask=apply_nan_mask_flag ) return np.sqrt(dev_mean)
da975ff48dcdee1a7f2c44a58d171d7c4579e553
28,248
def _default_value(argument, default): """Returns ``default`` if ``argument`` is ``None``""" if argument is None: return default else: return argument
52eed8ddaf3c52adba69044cc462fc11279670c5
28,249
def all_feature_functions(): """ Returns all feature functions from the function module :rtype list[callable] :returns List of feature functions """ exclude = ['n_gram_frequency', 'term_frequency'] functions = [] for name in dir(features): feature_function = getattr(features, name) if callable(feature_function) and feature_function.__name__ not in exclude: functions.append(feature_function) return functions
3e2fb73def3791a64d1e3868bf5c7188d593e3cf
28,251
def unsupplied_buses(net, mg=None, slacks=None, respect_switches=True): """ Finds buses, that are not connected to an external grid. INPUT: **net** (pandapowerNet) - variable that contains a pandapower network OPTIONAL: **mg** (NetworkX graph) - NetworkX Graph or MultiGraph that represents a pandapower network. **in_service_only** (boolean, False) - Defines whether only in service buses should be included in unsupplied_buses. **slacks** (set, None) - buses which are considered as root / slack buses. If None, all existing slack buses are considered. **respect_switches** (boolean, True) - Fixes how to consider switches - only in case of no given mg. OUTPUT: **ub** (set) - unsupplied buses EXAMPLE: import pandapower.topology as top top.unsupplied_buses(net) """ mg = mg or create_nxgraph(net, respect_switches=respect_switches) if slacks is None: slacks = set(net.ext_grid[net.ext_grid.in_service].bus.values) | set( net.gen[net.gen.in_service & net.gen.slack].bus.values) not_supplied = set() for cc in nx.connected_components(mg): if not set(cc) & slacks: not_supplied.update(set(cc)) return not_supplied
e8f7da1735ab56ad5a4ffd9b4ef27088b7e89fd1
28,252
def is_constant_type(expression_type): """Returns True if expression_type is inhabited by a single value.""" return (expression_type.integer.modulus == "infinity" or expression_type.boolean.HasField("value") or expression_type.enumeration.HasField("value"))
66a3237971299df3c7370f039d87a8b5f4ae2be5
28,254
def compute_tuning_improvement_sds_5_4(): """Compute average improvement during tuning, in sds""" data = _get_tuning_results_df() delta = data['delta'].dropna() result = delta.mean() fn = OUTPUT_DIR.joinpath('5_4_tuning_improvement_sds.txt') with fn.open('w') as f: f.write( '{} standard deviations of improvement during tuning' .format(result)) return result
b1adc3c2c7f2dd22217ec4be63d8e6c49d5827bc
28,255
def make_grid(batch_imgs, n_rows): """Makes grid of images.""" batch_imgs = np.array(batch_imgs) assert len(batch_imgs.shape) == 4, f'Invalid shape {batch_imgs.shape}' batchsize, height, width, channels = batch_imgs.shape n_cols = (batchsize + n_rows - 1) // n_rows grid = np.zeros((n_rows * height, n_cols * width, channels)) for i, img in enumerate(batch_imgs): y = i // n_cols x = i % n_cols grid[y*height:(y+1)*height, x*width:(x+1)*width, :] = img if channels == 1: grid = np.concatenate([grid, grid, grid], axis=-1) # Upsample if low res to avoid visualization artifacts. if height <= 32: upsample_factor = 2 grid = grid.repeat(upsample_factor, axis=0).repeat(upsample_factor, axis=1) return grid
6da67c75407df6ff1a4b85e5a2c0aa6992a6ffe6
28,256
def jsonUsers(request): """Export user list to JSON""" user_list = list(CustomUser.objects.values()) return JsonResponse(user_list, safe=False)
625669fd38730b9fd759812b11904e5aef3bf76e
28,258
def farthest_point_sample(points, num_points=1024): """ Input: points: a point set, in the format of NxM, where N is the number of points, and M is the point dimension num_points: required number of sampled points """ def compute_dist(centroid, points): return np.sum((centroid - points) ** 2, axis=1) farthest_pts = np.zeros((num_points, points.shape[1])) farthest_pts[0] = points[np.random.randint(len(points))] # Random choose one point as starting point distances = compute_dist(farthest_pts[0], points) for idx in range(1, num_points): farthest_pts[idx] = points[np.argmax(distances)] distances = np.minimum(distances, compute_dist(farthest_pts[idx], points)) return farthest_pts.astype(np.float32)
a96ace38c6d2a18cc247e2131fc095eeccca1a84
28,259
def resolve_shx_font_name(font_name: str) -> str: """ Map SHX font names to TTF file names. e.g. 'TXT' -> 'txt_____.ttf' """ # Map SHX fonts to True Type Fonts: font_upper = font_name.upper() if font_upper in SHX_FONTS: font_name = SHX_FONTS[font_upper] return font_name
013e944160f7fc71849e3e7f6869620a1fd6a328
28,260
def select_event_by_name(session, event_name): """ Get an event by name Parameters ---------- session : database connexion session event_name : str name of the RAMP event Returns ------- `Event` instance """ event = session.query(Event).filter(Event.name == event_name).one() return event
779bf47b812fdc920ff4359f4766a63689ced195
28,261
import torch def attention_mask_creator(input_ids): """Provide the attention mask list of lists: 0 only for [PAD] tokens (index 0) Returns torch tensor""" attention_masks = [] for sent in input_ids: segments_ids = [int(t > 0) for t in sent] attention_masks.append(segments_ids) return torch.tensor(attention_masks)
06a5880069cdc88ea33fe987bf4ac77aceef13eb
28,262
def extension_from_parameters(): """Construct string for saving model with annotation of parameters""" ext = '' ext += '.A={}'.format(ACTIVATION) ext += '.B={}'.format(BATCH_SIZE) ext += '.D={}'.format(DROP) ext += '.E={}'.format(NB_EPOCH) if FEATURE_SUBSAMPLE: ext += '.F={}'.format(FEATURE_SUBSAMPLE) for i, n in enumerate(LAYERS): if n: ext += '.L{}={}'.format(i+1, n) ext += '.P={}'.format(PENALTY) return ext
009bd3dd0b105cbbd060ced37776e011487be415
28,263
def rotateBoard90(b): """b is a 64-bit score4 board consists of 4 layers Return: a 90 degree rotated board as follow (looking from above) C D E F 0 4 8 C 8 9 A B ==> 1 5 9 D 4 5 6 7 2 6 A E 0 1 2 3 3 7 B F """ return rotateLayer90(b & 0xFFFF) \ | rotateLayer90(b >> 16 & 0xFFFF) << 16 \ | rotateLayer90(b >> 32 & 0xFFFF) << 32 \ | rotateLayer90(b >> 48 & 0xFFFF) << 48
d4069e8f7953abdcf56fc9bc713da69e33f1fdbe
28,265
def register_preset_path(path): """Add filepath to registered presets :param path: the directory of the preset file(s) :type path: str :return: """ if path in _registered_paths: return log.warning("Path already registered: %s", path) _registered_paths.append(path) return path
231d151c0b01e13312539ad592faa9f01acdce42
28,266
def set_pause_orchestration( self, ne_pk_list: list[str], ) -> bool: """Set appliances to pause orchestration .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - pauseOrchestration - POST - /pauseOrchestration :param ne_pk_list: List of appliances in the format of integer.NE e.g. ``["3.NE","5.NE"]`` :type ne_pk_list: list[str] :return: Returns True/False based on successful call :rtype: bool """ return self._post( "/pauseOrchestration", data=ne_pk_list, expected_status=[204], return_type="bool", )
72ebb32a6bc1bef1faf09f646dfad90a9b29da32
28,267
def unwrap(value: str, wrap_char: str) -> str: """Unwraps a given string from a character or string. :param value: the string to be unwrapped :param wrap_char: the character or string used to unwrap :return: unwrapped string or the original string if it is not quoted properly with the wrap character or string :raise IllegalArgumentError: if either parameter is not a string """ check_argument_type(value, "value", str) check_argument_type(wrap_char, "wrap_char", str) if is_not_blank(value) and is_not_blank(wrap_char): if value[0] == wrap_char and value[-1] == wrap_char: return value[1:-1] if ( value[0 : len(wrap_char)] == wrap_char and value[-len(wrap_char) :] == wrap_char ): return value[len(wrap_char) : -len(wrap_char)] return value
d1c1aceb0c92e0ccda26f6444eea5056c56d5d44
28,268
def round_to_memory_units(memory_bytes, round_up): """Round bytes to the nearest memory unit.""" return from_memory_units(to_memory_units(memory_bytes, round_up))
9402592d20832dd3149e8b9bc14115711aeee51a
28,269
import gettext import types def clone(translation): """ Clones the given translation, creating an independent copy. """ clone = gettext.GNUTranslations() clone._catalog = translation._catalog.copy() if hasattr(translation, 'plural'): clone.plural = types.FunctionType( translation.plural.__code__, translation.plural.__globals__, translation.plural.__name__, translation.plural.__defaults__, translation.plural.__closure__ ) return clone
269756db03954d7b6539e941fe07db532e81b17d
28,271
def get_general_channel(): """Returns just the general channel of the workspace""" channels = get_all_channels() for channel in channels: if (channel['is_general']): return channel
f350c87a57dc54580729a29b456e25d5b0c6797f
28,272
def nonrigid_rotations(spc_mod_dct_i): """ Determine if the rotational partition function for a certain species should be calculated according to some non-rigid model. This determination solely relies on whether has specified the use of a non-rigid model for the species. :param spc_mod_dct_i: species partition function models :type spc_mod_dct_i: dict[str: str] :rtype: bool """ rot_model = spc_mod_dct_i['rot']['mod'] return bool(rot_model == 'vpt2')
5ef94d4dc1b267ffab6bb654d58aae592b69d367
28,273
def general_standing(): """ It gives the general standing based on the current matchday. Note that it depends on the parameters that are imported at the beginning of the notebook, specifically Results, hence in order to refresh it needs to be run after Results is created from the utilities script. This is called by other functions. """ posts,_ = fetch_standing('pti') dict_out={} for dic in posts: dict_out[dic['team'].lower()] = dic['position'] return dict_out
2a5f48a34209ff9568b9c4afb447df4eac944391
28,274
def pixwt(xc, yc, r, x, y): """ ; --------------------------------------------------------------------------- ; FUNCTION Pixwt( xc, yc, r, x, y ) ; ; Compute the fraction of a unit pixel that is interior to a circle. ; The circle has a radius r and is centered at (xc, yc). The center of ; the unit pixel (length of sides = 1) is at (x, y). ; --------------------------------------------------------------------------- """ return intarea(xc, yc, r, x-0.5, x+0.5, y-0.5, y+0.5)
35e8937456fa1a4c8ca251ff55449001c6f64859
28,275
def predict(net, inputs, use_GPU=False, in_type='numpy'): """Make predictions using a well-trained network. Parameters ---------- inputs : numpy array or torch tensor The inputs of the network. use_GPU : bool If True, calculate using GPU, otherwise, calculate using CPU. in_type : str The data type of the inputs, it can be 'numpy' or 'torch'. """ if use_GPU: net = net.cuda() if in_type=='numpy': inputs = dp.numpy2cuda(inputs) elif in_type=='torch': inputs = dp.torch2cuda(inputs) else: if in_type=='numpy': inputs = dp.numpy2torch(inputs) net = net.eval() #this works for the batch normalization layers pred = net(Variable(inputs)) if use_GPU: pred = dp.cuda2numpy(pred.data) else: pred = dp.torch2numpy(pred.data) return pred
4a07a8171024fe50f56a94a416f4745f8db01759
28,276
def svn_mergeinfo_catalog_merge(*args): """ svn_mergeinfo_catalog_merge(svn_mergeinfo_catalog_t mergeinfo_catalog, svn_mergeinfo_catalog_t changes_catalog, apr_pool_t result_pool, apr_pool_t scratch_pool) -> svn_error_t """ return _core.svn_mergeinfo_catalog_merge(*args)
dd79ece86fab697b519f4ec2b5b294e085701364
28,279
import numpy def normalize_const(v): """ Normalize a numpy array of floats or doubles. """ return v / numpy.linalg.norm(v)
927ad9d2d94735263ac10a445f4f7fe4b3150c95
28,280
import collections def make_lc_resolver(type_: type[_T], /) -> collections.Callable[..., collections.Awaitable[_T]]: """Make an injected callback which resolves a LazyConstant. Notes ----- * This is internally used by `inject_lc`. * For this to work, a `LazyConstant` must've been set as a type dependency for the passed `type_`. Parameters ---------- type_ : type[_T] The type of the constant to resolve. Returns ------- collections.abc.Callable[..., collections.abc.Awaitable[_T]] An injected callback used to resolve the LazyConstant. """ async def resolve( # LazyConstant gets type arguments at runtime constant: LazyConstant[_T] = injecting.inject(type=LazyConstant[type_]), ctx: injecting.AbstractInjectionContext = injecting.inject(type=injecting.AbstractInjectionContext), ) -> _T: """Resolve a lazy constant.""" if (value := constant.get_value()) is not None: return value async with constant.acquire(): if (value := constant.get_value()) is not None: return value result = await constant.callback.resolve(ctx) constant.set_value(result) return result return resolve
30190cfb0f74eab96bafabfc55ef63d18ea25a0f
28,282
import hashlib def get_md5(string): """ Get md5 according to the string """ byte_string = string.encode("utf-8") md5 = hashlib.md5() md5.update(byte_string) result = md5.hexdigest() return result
968b8f8ec28720e4ed4d020093f815b6af33eea7
28,283
def clean_docs_and_uniquify(docs): """ normalize docs and uniquify the doc :param docs: :return: """ docs = [normalize(t) for t in docs if isinstance(t, str)] docs = dedupe(docs) return docs
6a267c1b5b6744cf28b2b6c73e6cf49f25f95727
28,284
from typing import Any import torch def shard_init_helper_(init_method, tensor: Tensor, **kwargs: Any) -> None: """ Helper function to initialize shard parameters. """ if hasattr(tensor, _PARALLEL_DIM): local_rank = get_rank() group = get_group() world_size = get_world_size() parallel_dim = getattr(tensor, _PARALLEL_DIM) tensor_shape = list(map(int, tensor.shape)) tensor_size = len(tensor_shape) # handle both weight and bias col_dim = tensor_size - 1 row_dim = 0 if parallel_dim == 0: tensor_shape[col_dim] *= world_size if tensor_size == 2 else 1 elif parallel_dim == 1 or parallel_dim == -1: tensor_shape[row_dim] *= world_size elif parallel_dim == None: pass else: raise ValueError data = torch.empty( tensor_shape, dtype=torch.float, requires_grad=False ).cuda(local_rank) init_method(data, **kwargs) dist.broadcast(data, src=0) if parallel_dim == 0: data = scatter(data, dim=col_dim) if tensor_size == 2 else data elif parallel_dim == 1 or parallel_dim == -1: data = scatter(data, dim=row_dim) elif parallel_dim == None: pass else: raise ValueError() tensor.data.copy_(data) del data else: return init_method(tensor, **kwargs)
df692b2766ed49d3358c72450aa5d4159915e4b5
28,285
def segment_zentf_tiling(image2d, model, tilesize=1024, classlabel=1, overlap_factor=1): """Segment a singe [X, Y] 2D image using a pretrained segmentation model from the ZEN. The out will be a binary mask from the prediction of ZEN czmodel which is a TF.SavedModel with metainformation. Before the segmentation via the network will be applied the image2d will be tiled in order to match the tile size to the required batch tile size of the used network. Default is (1024, 1024) :param image2d: image to be segmented :type image2d: NumPy.Array :param model: trained TF2 model used for segmentation :type model: TF.SavedModel :param tilesize: required tile size for the segmentation model, defaults to 1024 :type tilesize: int, optional :param classlabel: Index for the class one is interested in, defaults to 1 :type classlabel: int, optional :param overlap_factor: overlap_factor of 2 = stride between each tile is only tile_shape/overlap_factor and therefore overlap_factor = 1 means no overlap, defaults to 1 :type overlap_factor: int, optional :return: binary - binary mask of the specified class :rtype: Numpy.Array """ # create tile image using MightMosaic image2d_tiled = MightyMosaic.from_array(image2d, (tilesize, tilesize), overlap_factor=overlap_factor, fill_mode='reflect') print('image2d_tiled shape : ', image2d_tiled.shape) # get number of tiles num_tiles = image2d_tiled.shape[0] * image2d_tiled.shape[1] print('Number of Tiles: ', num_tiles) # create array for the binary results binary_tiled = image2d_tiled ct = 0 for n1 in range(image2d_tiled.shape[0]): for n2 in range(image2d_tiled.shape[1]): ct += 1 print('Processing Tile : ', ct, ' Size : ', image2d_tiled[n1, n2, :, :].shape) # extract a tile tile = image2d_tiled[n1, n2, :, :] # get the binary from the prediction for a single tile binary_tile = segment_zentf(tile, model, classlabel=classlabel) # cats the result into the output array binary_tiled[n1, n2, :, :] = binary_tile # created fused binary and covert to int binary = binary_tiled.get_fusion().astype(int) return binary
7f88124b86bfe6bde147aeedd9385ea7b19063e3
28,286
def key2cas(key): """ Find the CAS Registry Number of a chemical substance using an IUPAC InChIKey :param key - a valid InChIKey """ if _validkey(key): hits = query('InChIKey=' + key, True) if hits: if len(hits) == 1: return hits[0]['rn'] else: # check hits for smallest molar mass compound, i.e., not polymer minmm = 100000 minrn = '' for i, hit in enumerate(hits): mm = detail(hit['rn'], 'molecularMass') if mm != '': if float(mm) < minmm: minmm = float(mm) minrn = hit['rn'] return minrn else: return '' else: return ''
d0919e2e6c1b6b149409e2b6333bcd3129c53379
28,288
import time import re def connect_server(server, username, startpage, sleep_func=time.sleep, tracktype='recenttracks'): """ Connect to server and get a XML page.""" if server == "libre.fm": baseurl = 'http://alpha.libre.fm/2.0/?' urlvars = dict(method='user.get%s' % tracktype, api_key=('lastexport.py-%s' % __version__).ljust(32, '-'), user=username, page=startpage, limit=200) elif server == "last.fm": baseurl = 'http://ws.audioscrobbler.com/2.0/?' urlvars = dict(method='user.get%s' % tracktype, api_key='e38cc7822bd7476fe4083e36ee69748e', user=username, page=startpage, limit=50) else: if server[:7] != 'http://': server = 'http://%s' % server baseurl = server + '/2.0/?' urlvars = dict(method='user.get%s' % tracktype, api_key=('lastexport.py-%s' % __version__).ljust(32, '-'), user=username, page=startpage, limit=200) url = baseurl + urlencode(urlvars, quote_via=quote_plus) for interval in (1, 5, 10, 62, 240): try: f = urlopen(url) break except Exception as e: last_exc = e print('Exception occured, retrying in %ds: %s' % (interval, e)) sleep_func(interval) else: print('Failed to open page %s' % urlvars['page']) raise last_exc response = f.read() f.close() #bad hack to fix bad xml response = re.sub('\xef\xbf\xbe', '', str(response)) return response
694bfa6b1ace0ed51338983b4901f06c78f44afd
28,289
def eigenvalue_nonunitary_diamondnorm(A, B, mxBasis): """ Eigenvalue nonunitary diamond distance between A and B """ d2 = A.shape[0] evA = _np.linalg.eigvals(A) evB = _np.linalg.eigvals(B) return (d2 - 1.0) / d2 * _np.max(_tools.minweight_match(evA, evB, lambda x, y: abs(abs(x) - abs(y)), return_pairs=False))
97716d88829e4bf0beef914f38bd40d24c1fc32a
28,290
def str_to_int(value): """Convert str to int if possible Args: value(str): string to convert Returns: int: converted value. str otherwise """ try: return int(value) except ValueError: return value
30bd55fc34abffa67c79117b0941ba7e6388efeb
28,291
import json def json_response(data): """this function is used for ajax def route(request): return json_response(t.json()) """ header = 'HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n' body = json.dumps(data, ensure_ascii=False, indent=8) r = header + '\r\n' + body return r.encode(encoding='utf-8')
8678a8f62fab10d9120d354889387b6d70cddea9
28,292
from operator import sub from operator import add def linePointXY(l,p,inside=True,distance=False,params=False): """ For a point ``p`` and a line ``l`` that lie in the same XY plane, compute the point on ``l`` that is closest to ``p``, and return that point. If ``inside`` is true, then return the closest distance point between the point and the line segment. If ``distance`` is true, return the closest distance, not the point. If ``params`` is true, return the sampling parameter value of the closest point. """ a=l[0] b=l[1] # check for degenerate case of zero-length line abdist = dist(a,b) if abdist < epsilon: #raise ValueError('zero-length line passed to linePointXY') print('zero-length line passed to linePointXY') return False if distance and params: raise ValueError('incompatible distance and params parameters passed to linePointXY') x0=p[0] y0=p[1] z0=p[2] x1=a[0] y1=a[1] z1=a[2] x2=b[0] y2=b[1] z2=b[2] ## check to see if all three points lie in the same x,y plane if not isXYPlanar([p,a,b]): raise ValueError('non-XY points in linePointXY call') return false # if abs(z1-z0) > epsilon or abs(z2-z0) > epsilon: # return False linedist = abs( ((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)/abdist) ## this is the fast case: if not inside and distance: return linedist ## find out where the intersection between the original line and a ## line defined by the point and an orthogonal direction vector ## is. We do this by constructing two direction vectors ## orthogonal to the orgiginal line scaled by the line distance, ## and adding them to the point in question. Assuming that the ## line distance is not zero, only one of these constructed points ## will fall on the line ## compute unit direction vector for original line dir = sub(b,a) dir = scale3(dir,1.0/mag(dir)) ## compute two orthogonal direction vectors of length linedist ordir1 = scale3(orthoXY(dir),linedist) ordir2 = scale3(ordir1, -1.0) ## there are two possible intersection points pi1 = add(p,ordir1) pi2 = add(p,ordir2) ## compute distances d1pa = dist(a,pi1) d1pb = dist(pi1,b) d1 = d1pa+d1pb # "triangle" with pi1 d2pa = dist(a,pi2) d2pb = dist(pi2,b) d2 = d2pa+d2pb # "triangle" with pi2 ## the shortest "triangle" distance will signal the point that ## is actually on the line, even if that point falls outside ## the a,b line interval if params or not inside: # if we don't care about being inside the # line segment if d1 <= d2: if distance: return d1 elif params: return d1pb/abdist else: return pi1 else: if distance: return d2 elif params: return d2pb/abdist else: return pi2 ## if the closest point on the line to point p lies between ## the endpoints of the line, then either d1 or d2 will equal ## abdist. IF neither do, then we know that the closest point lies ## outside the endpoints if abs(d1-abdist) < epsilon: if distance: return linedist else: return pi1 if abs(d2-abdist) < epsilon: if distance: return linedist else: return pi2 ## closest point is outside the interval. That means that the ## distance from point p to whichever endpoint is smaller is the ## closest distance d3 = dist(a,p) d4 = dist(b,p) if d3 < d4: if distance: return d3 else: return a else: if distance: return d4 else: return b
b91f1497ba2dffb72caa4cbf4a6f43972ee3853e
28,293
def config_port_type(dut, interface, stp_type="rpvst", port_type="edge", no_form=False, cli_type="klish"): """ API to config/unconfig the port type in RPVST :param dut: :param port_type: :param no_form: :return: """ commands = list() command = "spanning-tree port type {}".format(port_type) if not no_form else "no spanning-tree port type" interface_details = utils.get_interface_number_from_name(interface) if not interface_details: st.log("Interface details not found {}".format(interface_details)) return False commands.append("interface {} {}".format(interface_details.get("type"), interface_details.get("number"))) commands.append(command) commands.append('exit') st.config(dut, commands, type=cli_type) return True
a0a8672b3fea945a57367236debd1a420e270185
28,294
def check_strand(strand): """ Check the strand format. Return error message if the format is not as expected. """ if (strand != '-' and strand != '+'): return "Strand is not in the expected format (+ or -)"
9c2e720069ad8dcc8f867a37925f6e27e91dcb3f
28,296
def to_halfpi(rin, za): # match with a shunt input l net, rin > za.real """ """ ra, xa = za.real, za.imag xd = np.sqrt(ra * (rin - ra)) if np.iscomplex(xd): raise ValueError x2 = np.array([-xa - xd, -xa + xd]) x1 = -(ra**2 + (x2 + xa)**2) / (x2 + xa) return np.transpose([x1 * 1j, x2 * 1j]).tolist()
210e4bb008cd58323fab1ab66ad6ef84456c569b
28,297
from pathlib import Path import yaml def load_config_or_exit(workdir="."): """Loads the challenge configuration file from the current directory, or prints a message and exits the script if it doesn't exist. Returns: dict: The config """ path = Path(workdir) if (path / "challenge.yml").exists(): path = path / "challenge.yml" elif (path / "challenge.yaml").exists(): path = path / "challenge.yaml" else: print(f"{CRITICAL}Could not find a challenge.yml file in this directory.") exit(1) with path.open() as f: raw_config = f.read() config = yaml.safe_load(raw_config) return config
e58a7725422d3ae053ab56ae81ab98a7d13be0b5
28,298
def checkOnes(x, y): """ Checks if any of the factors in y = 1 """ _ = BranchingValues() _.x = 1 for i in _range(len(y)): if _if(y[i][0] == 1): _.x = 0 _endif() if _if(y[i][1] == 1): _.x = 0 _endif() _endfor() return _.x
fec6b2aeae13750ec1b37d4b5d187d6836ab5aaa
28,299
def read_fortran_namelist(fileobj): """Takes a fortran-namelist formatted file and returns appropriate dictionaries, followed by lines of text that do not fit this pattern. """ data = {} extralines = [] indict = False fileobj.seek(0) for line in fileobj.readlines(): if indict and line.strip().startswith('/'): indict = False elif line.strip().startswith('&'): indict = True dictname = line.strip()[1:].lower() data[dictname] = {} elif (not indict) and (len(line.strip()) > 0): extralines.append(line) elif indict: key, value = line.strip().split('=') if value.endswith(','): value = value[:-1] value = value.strip() try: value = eval(value) except SyntaxError: value = {'.true.': True, '.false.': False}.get(value, value) data[dictname][key.strip()] = value return data, extralines
3c3b96ca707c7f0492c2913c6b9496cb57fc969b
28,301
from typing import Tuple import re def check_token(surface: str) -> Tuple[str, str]: """Adopted and modified from coltekin/childes-tr/misc/parse-chat.py For a given surface form of the token, return (surface, clean), where clean is the token form without CHAT codes. """ if surface is None: return None, None clean='' if re.match(TO_OMIT, surface): # phonological forms are also omitted return surface, clean # remove unwanted markings to normalise token form clean = surface.replace(' ', '') clean = clean.replace('xxx', '') # unintelligible, 'wxxxord' --> 'word' clean = clean.replace('(', '').replace(')', '') clean = clean.replace('0', '') # 0token is omitted token clean = clean.replace('‡', ',') # prefixed interactional marker clean = clean.replace('„', ',') # suffixed interactional marker # clean = clean.replace('_', ' ') # compound, uncomment to remove '_' in compounds if "@" in clean: clean = clean[:clean.index("@")] # drop any @endings return surface, clean
c737c8acdce04597506e399a7d2fe0252634edc1
28,302
def remove_media_url(media_path): """ Strip leading MEDIA_URL from a media file url. :param media_path: :return: """ if media_path.startswith(MEDIA_URL): return media_path[len(MEDIA_URL):] else: return media_path
084773d30cc9c534a9347712058c581797a2b05b
28,303
def _map_route_on_graph(ordered_cluster: sp.Cluster, graph: sp.Graph) -> list[sp.Segment]: """Построить маршрут в графе Args: ordered_cluster: Кластер с заданным порядком обхода точек graph: Граф для прокладывания маршрута Returns: Построенный маршрут """ route = [] # Путь - список ребер графа for i, start in enumerate(ordered_cluster): finish = ordered_cluster[i + 1 if (i + 1) < len(ordered_cluster) else (i + 1 - len(ordered_cluster))] route.extend(a_star.a_star(start, finish, graph)) return route
1ca10abc6f9d88c08dbbbd63b48e62f7077c8a39
28,304
from typing import Tuple from typing import Dict def list_violation_data(client: Client, args) -> Tuple[str, Dict, Dict]: """List violation data. Args: client: Client object with request. args: Usually demisto.args() Returns: Outputs. """ from_ = args.get('from') to_ = args.get('to') query = args.get('query') violation_data = client.list_violation_data_request(from_, to_, query) if violation_data.get('error'): raise Exception(f'Failed to get violation data in the given time frame.\n' f'Error from Securonix is: {violation_data.get("errorMessage")}') violation_events = violation_data.get('events') fields_to_include = ['Accountname', 'Baseeventid', 'Category', 'Categorybehavior', 'Categoryobject', 'Categoryseverity', 'Destinationaddress', 'Destinationntdomain', 'Destinationuserid', 'Gestinationusername', 'Deviceaddress', 'Deviceeventcategory', 'Deviceexternalid', 'Devicehostname', 'EventID', 'Eventoutcome', 'Eventtime', 'Generationtime', 'Invalid', 'JobID', 'Jobstarttime', 'Message', 'Policyname', 'Resourcename', 'Rg_id', 'Rg_name', 'Riskscore', 'Riskthreatname', 'Sessionid', 'Sourcehostname', 'Sourcentdomain', 'Sourceuserid', 'Sourceusername', 'Sourceuserprivileges', 'TenantID', 'Tenantname', 'Timeline', 'Createdate', 'Criticality', 'Datasourceid', 'Department', 'Division', 'EmployeeID', 'Encrypted', 'Firstname', 'Fullname', 'ID', 'LanID', 'Lastname', 'Lastsynctime', 'Masked', 'Mergeuniquecode', 'Riskscore', 'Skipencryption', 'Status', 'Timezoneoffset', 'Title', 'Uniquecode', 'UserID', 'Workemail', 'Violator'] violation_readable, violation_outputs = parse_data_arr(violation_events, fields_to_include=fields_to_include) headers = ['EventID', 'Eventtime', 'Message', 'Policyname', 'Accountname'] human_readable = tableToMarkdown(name="Activity data:", t=violation_readable, headers=headers, removeNull=True) entry_context = {f'Securonix.ViolationData(val.Uniquecode === obj.Uniquecode)': violation_outputs} return human_readable, entry_context, violation_data
c7548b7a86bb63855ee5c9fc7ec602ffa39a608b
28,305
def train_add_test(func=lambda a, b: a+b, results_dir=None, reg_weight=5e-2, learning_rate=1e-2, n_epochs=10001): """Addition of two MNIST digits with a symbolic regression network. Withold sums > 15 for test data""" tf.reset_default_graph() # Symbolic regression network to combine the conv net outputs PRIMITIVE_FUNCS = [ *[functions.Constant()] * 2, *[functions.Identity()] * 4, *[functions.Square()] * 4, *[functions.Sin()] * 2, *[functions.Exp()] * 2, *[functions.Sigmoid()] * 2, # *[functions.Product()] * 2, ] sr_net = symbolic_network.SymbolicNet(2, funcs=PRIMITIVE_FUNCS) # Symbolic regression network # Overall architecture sym_digit_network = SymbolicDigit(sr_net=sr_net, normalize=normalize) # Set up regularization term and training penalty = regularization.l12_smooth(sr_net.get_weights()) epoch = tf.placeholder_with_default(0.0, []) penalty = tf.sin(np.pi / n_epochs / 1.1 * epoch) ** 2 * regularization.l12_smooth(sr_net.get_weights()) penalty = reg_weight * penalty sym_digit_network.set_training(reg=penalty) config = tf.ConfigProto() config.gpu_options.allow_growth = True # Take up variable amount of memory on GPU sess = tf.Session(config=config) batch = batch_generator(batch_size=100) def train_fun(y): return y < 15 def test_fun(y): return np.logical_not(train_fun(y)) # Train, and restart training if loss goes to NaN loss_i = np.nan while np.isnan(loss_i): sess.run(tf.global_variables_initializer()) loss_i = sym_digit_network.train(sess, n_epochs, batch, func, epoch, lr_val=learning_rate, train_fun=train_fun) if np.isnan(loss_i): continue # Freezing weights sr_net_masked = symbolic_network.MaskedSymbolicNet(sess, sr_net, threshold=0.01) sym_digit_network = SymbolicDigitMasked(sym_digit_network, sr_net_masked, normalize=normalize) sym_digit_network.set_training() loss_i = sym_digit_network.train(sess, n_epochs, batch, func, lr_val=learning_rate/10, train_fun=train_fun) # Print out human-readable equation (with regularization) weights = sess.run(sr_net.get_weights()) expr = pretty_print.network(weights, PRIMITIVE_FUNCS, ["z1", "z2"]) expr = normalize(expr) print(expr) # Calculate accuracy on test dataset acc_train, error_train = sym_digit_network.calc_accuracy(X_train, y_train, func, sess) acc_train1, error_train1 = sym_digit_network.calc_accuracy(X_train, y_train, func, sess, filter_fun=train_fun) acc_train2, error_train2 = sym_digit_network.calc_accuracy(X_train, y_train, func, sess, filter_fun=test_fun) acc_test, error_test = sym_digit_network.calc_accuracy(X_test, y_test, func, sess) acc_test1, error_test1 = sym_digit_network.calc_accuracy(X_test, y_test, func, sess, filter_fun=train_fun) acc_test2, error_test2 = sym_digit_network.calc_accuracy(X_test, y_test, func, sess, filter_fun=test_fun) result_str = "Train digits overall accuracy: %.3f\ttrain sum accuracy: %.3f\t test sum accuracy: %.3f\n" \ "Train digits overall error: %.3f\ttrain sum error: %.3f\t test sum error: %.3f\n" \ "Test digits overall accuracy: %.3f\ttrain sum accuracy: %.3f\t test sum accuracy: %.3f\n" \ "Test digits overall error: %.3f\ttrain sum error: %.3f\t test sum error: %.3f\n" % \ (acc_train, acc_train1, acc_train2, error_train, error_train1, error_train2, acc_test, acc_test1, acc_test2, error_test, error_test1, error_test2) print(result_str) sym_digit_network.save_result(sess, results_dir, expr, result_str)
9705cfb8cc8a321c16eb6f93dda1878e73e9328f
28,306
def walk_graph(csr_matrix, labels, walk_length=40, num_walks=1, n_jobs=1): """Perform random walks on adjacency matrix. Args: csr_matrix: adjacency matrix. labels: list of node labels where index align with CSR matrix walk_length: maximum length of random walk (default=40) num_walks: number of walks to do for each node n_jobs: number of cores to use (default=1) Returns: np.ndarray: list of random walks """ normalized = normalize_csr_matrix(csr_matrix) results = (Parallel(n_jobs=n_jobs, max_nbytes=None) (delayed(walk_random, has_shareable_memory) (normalized, labels, walk_length) for _ in range(num_walks))) walks, freqs = zip(*results) random_walks = np.concatenate(walks) word_freqs = np.sum(freqs, axis=0) return random_walks, dict(zip(labels, word_freqs))
4a317aecbc88998469420575346c38da30f8bc90
28,307
def mutate_split(population, config): """ Splitting a non-zero dose (> 0.25Gy) into 2 doses. population - next population, array [population_size, element_size]. """ interval_in_indices = int(2 * config['time_interval_hours']) mutation_config = config['mutations']['mutate_split'] min_dose = config['step_value'] max_dose = config['max_dose_value'] population = np.asarray(population) for i, genome in enumerate(population): if np.random.uniform() < mutation_config['mut_prob']: non_zero_dose_indices = np.nonzero(genome)[0] if non_zero_dose_indices.size: gene_idx = np.random.choice(non_zero_dose_indices) k = genome[gene_idx] / min_dose split = np.random.randint(0, k) d1 = split * min_dose d2 = genome[gene_idx] - d1 for _ in range(len(population)): new_gene_idx = np.random.randint(len(genome)) if genome[new_gene_idx] + d1 <= max_dose: genome[new_gene_idx] = genome[new_gene_idx] + d1 break for _ in range(len(population)): new_gene_idx = np.random.randint(len(genome)) if genome[new_gene_idx] + d2 <= max_dose: genome[new_gene_idx] = genome[new_gene_idx] + d2 break genome[gene_idx] = 0 population[i] = refine_genome_around_cross_point_to_time_constraint( genome=population[i], interval_in_indices=interval_in_indices, config=config) return population.tolist()
db737191d5f7c1852410d6f1ad779e8ca58c658a
28,308
def _copy_df(df): """ Copy a DataFrame """ return df.copy() if df is not None else None
263bf1cf9cbdae371ea3e4685b4638e8a5714d7f
28,309
def findPossi(bo): """ Find all possibilities for all fields and add them to a list.""" possis = [] for row,rowVal in enumerate(bo): for col,colVal in enumerate(rowVal): localpossi=newPossiFinder(bo, col, row) if bo[row][col]==0: # Here ujson.loads(ujson.dumps()) is used because it is much faster than copy.deepcopy() to make a copy of a list. possis.append(ujson.loads(ujson.dumps([localpossi,rowcoltoNum(row,col)]))) possis.sort(key=getLen) t.possibls = possis return possis
c504ca243f631af135ae64f97b9f46b2cdb7d789
28,310
def modernforms_exception_handler(func): """Decorate Modern Forms calls to handle Modern Forms exceptions. A decorator that wraps the passed in function, catches Modern Forms errors, and handles the availability of the device in the data coordinator. """ async def handler(self, *args, **kwargs): try: await func(self, *args, **kwargs) self.coordinator.async_update_listeners() except ModernFormsConnectionError as error: _LOGGER.error("Error communicating with API: %s", error) self.coordinator.last_update_success = False self.coordinator.async_update_listeners() except ModernFormsError as error: _LOGGER.error("Invalid response from API: %s", error) return handler
c486173ef34f4c89fb3138cad989472f01d7bb7c
28,311
def reads_per_insertion(tnpergene_list,readpergene_list,lines): """It computes the reads per insertion following the formula: reads/(insertions-1) if the number of insertions is higher than 5, if not then the reads per insertion will be 0. Parameters ---------- tnpergene_list : list A list with all insertions readpergene_list : list A list of the reads lines : int Number of genes mapped to in the reference genome Returns ------- list A list containing all the reads per insertions per gene. """ readperinspergene_list = [np.nan]*len(lines) for i in range(len(tnpergene_list)): if not tnpergene_list[i] < 5: readperinspergene_list[i] = readpergene_list[i] / (tnpergene_list[i] -1) else: readperinspergene_list[i] = 0 return readperinspergene_list
c5a3f06298d2e782d60b20d561d9d5f65a369dcd
28,312
def getCurrDegreeSize(currDegree, spatialDim): """ Computes the number of polynomials of the current spatial dimension """ return np.math.factorial(currDegree + spatialDim - 1) / ( np.math.factorial(currDegree) * np.math.factorial(spatialDim - 1))
754440fde04f7fe30e336cf4d7c5efb75dd1aaac
28,313
def split_last_dimension(x, n): """Reshape x so that the last dimension becomes two dimensions. The first of these two dimensions is n. Args: x: a Tensor with shape [..., m] n: an integer. Returns: a Tensor with shape [..., n, m/n] """ x_shape = shape_list(x) m = x_shape[-1] if isinstance(m, int) and isinstance(n, int): assert m % n == 0 return tf.reshape(x, x_shape[:-1] + [n, m // n])
c1f26106e0d11a5722191a52c86f90b9559d32dc
28,314
def get_column_dtype(column, pd_or_sqla, index=False): """ Take a column (sqlalchemy table.Column or df.Series), return its dtype in Pandas or SQLA If it doesn't match anything else, return String Args: column: pd.Series or SQLA.table.column pd_or_sqla: either 'pd' or 'sqla': which kind of type to return index: if True, column type cannot be boolean Returns: Type or None if pd_or_sqla == 'sqla': one of {Integer, Float, Boolean, DateTime, String, or None (for all-NaN column)} if pd_or_sqla == 'pd': one of {np.int64, np.float64, np.datetime64, np.bool_, np.str_} """ if isinstance(column, sqa.Column): dtype = _get_type_from_db_col(column) elif isinstance(column, (pd.Series, pd.Index)): dtype = _get_type_from_df_col(column, index=index) else: raise ValueError(f'get_column_datatype takes a column; got {type(column)}') if dtype is None: return None elif pd_or_sqla == 'sqla': return dtype elif pd_or_sqla == 'pd': return _sqa_type2pandas_type(dtype, index=index) else: raise ValueError(f'Select pd_or_sqla must equal either "pd" or "sqla"')
c466405c66b24d48cc37920df2876e876d1d6885
28,316
def _stdlibs(tut): """Given a target, return the list of its standard rust libraries.""" libs = [ lib.static_library for li in tut[CcInfo].linking_context.linker_inputs.to_list() for lib in li.libraries ] stdlibs = [lib for lib in libs if (tut.label.name not in lib.basename)] return stdlibs
8098406876684911df5c52413780305bea2d12a7
28,317
def _chebyshev(wcs_dict): """Returns a chebyshev model of the wavelength solution. Constructs a Chebyshev1D mathematical model Parameters ---------- wcs_dict : dict Dictionary containing all the wcs information decoded from the header and necessary for constructing the Chebyshev1D model. Returns ------- `~astropy.modeling.Model` """ model = models.Chebyshev1D(degree=wcs_dict['order'] - 1, domain=[wcs_dict['pmin'], wcs_dict['pmax']], ) new_params = [wcs_dict['fpar'][i] for i in range(wcs_dict['order'])] model.parameters = new_params return model
3d30fde977351a4e43a0940696c8fc988400ccda
28,318
def autosolve(equation): """ Automatically solve an easy maths problem. :type equation: string :param equation: The equation to calculate. >>> autosolve("300 + 600") 900 """ try: # Try to set a variable to an integer num1 = int(equation.split(" ")[0]) except ValueError: # Try to set a variable to a decimal num1 = float(equation.split(" ")[0]) try: # Try to set a variable to an integer num2 = int(equation.split(" ")[2]) except ValueError: # Try to set a variable to a decimal num2 = float(equation.split(" ")[2]) # If the lowercase version of the operator is '+', 'plus' or 'add' if equation.split(" ")[1].lower() in ["+", "plus", "add"]: # Return the answer return num1 + num2 # If the lowercase version of the operator is '-', 'minus' or 'subtract' elif equation.split(" ")[1].lower() in ["-", "minus", "subtract"]: # Return the answer return num1 - num2 # If the lowercase version of the operator is '*', 'times', 'multiply' elif equation.split(" ")[1].lower() in ["*", "times", "multiply"]: # Return the answer return num1 * num2 # If the lowercase version of the operator is '/', 'divide' or 'quotient' elif equation.split(" ")[1].lower() in ["/", "divide", "quotient"]: # Return the answer return num1 / num2 # If the lowercase version of the operator is '%, 'remainder' or 'rem' elif equation.split(" ")[1].lower() in ["%", "remainder", "rem"]: # Return the answer return num1 % num2 # Raise a warning raise ValueError("Invalid operation provided.")
a4db1dedffdccc44d7747c4743f4f2eaf8dbd81a
28,319
from bs4 import BeautifulSoup import http def request_champion(champion_name: str) -> BeautifulSoup: """ Get http request to website with all statistics about a champion with html format. """ request = http.request( 'GET', f'https://www.leaguespy.gg/league-of-legends/champion/{champion_name}/stats', None, HEADERS ) return BeautifulSoup(request.data, 'lxml')
f0b5a0b1eb6cceec6c7e1c8c8cd1078a5f2505c3
28,320