content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_patron_pid(record): """Get patron_pid from existing users.""" user = get_user_by_legacy_id(record["id_crcBORROWER"]) if not user: # user was deleted, fallback to the AnonymousUser anonym = current_app.config["ILS_PATRON_ANONYMOUS_CLASS"]() patron_pid = str(anonym.id) else: patron_pid = user.pid return str(patron_pid)
e2ba9102052c0ce84f7fc04fdc5be97a6050634c
28,200
import PySide.QtGui as QtGui import PyQt5.QtWidgets as QtWidgets def get_QGroupBox(): """QGroupBox getter.""" try: return QtGui.QGroupBox except ImportError: return QtWidgets.QGroupBox
9c0333c9c8a4fafd1c4f4f5739546aaa19a6e321
28,201
import collections def update_dict(orig_dict: dict, new_dict: collections.abc.Mapping) -> dict: """Updates exisitng dictionary with values from a new dictionory. This function mimics the dict.update() function. However, it works for nested dictionories as well. Ref: https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth Args: orig_dict: The original dictionory to insert items to. new_dict: The new dictionory to insert items from. Returns: The updated dictionory. """ for keyname, value in new_dict.items(): if isinstance(value, collections.abc.Mapping): orig_dict[keyname] = update_dict(orig_dict.get(keyname, {}), value) else: orig_dict[keyname] = value return orig_dict
e176f4cf293be67aece6e9811fe75de679bb1e94
28,202
from typing import Sequence from typing import Tuple import itertools def simulate_data( covariates: int, scales: Sequence[int], levels: Sequence[int], singletons: float, state: np.random.RandomState) -> Tuple[Array, Array, Array]: """Simulate IDs and data matrices.""" # simulate fixed effects ids = np.array(list(itertools.product(*(np.repeat(np.arange(l), s) for s, l in zip(scales, levels))))) fe = np.array(list(itertools.product(*(np.repeat(state.normal(size=l), s) for s, l in zip(scales, levels))))) # count dimensions N, M = ids.shape # shuffle the IDs for index in range(M): indices = np.arange(N) state.shuffle(indices) ids[indices, index] = ids.copy()[:, index] # shuffle and replace shares of the data with singletons indices = np.arange(N) for index in range(M): state.shuffle(indices) singleton_indices = indices[:int(singletons * N / M)] ids[indices, index] = ids.copy()[:, index] ids[singleton_indices, index] = -np.arange(singleton_indices.size) # simulate remaining data error = state.normal(size=(N, 1)) X = state.normal(size=(N, covariates)) y = X.sum(axis=1, keepdims=True) + fe.sum(axis=1, keepdims=True) + error return ids, X, y
c5fbddde4045b2367975b28bcfc4ded032427505
28,203
def create_strike_ingest_job_message(ingest_id, strike_id): """Creates a message to create the ingest job for a strike :param ingest_id: ID of the ingest :type ingest_id: int :param strike_id: The ID of the strike :type strike_id: int """ message = CreateIngest() message.create_ingest_type = STRIKE_JOB_TYPE message.ingest_id = ingest_id message.strike_id = strike_id return message
47e96f6fa53cc934572852cfcaf8ed9455408dec
28,204
def create_data(nfac0, # [Number of facet 0 elements -- rows/persons] nfac1, # [Number of facet 1 elements -- columns/items] ndim, # [Number of dimensions to create] seed = None, # [<None,int,{'Fac0':<None,int,array>,'Fac1':<None,int,array>}> => generates coordinates] facmetric = [4,-2], # [[m,b] => rand() * m + b, to set range of facet coordinate values] noise = None, # [<None, noise, {'Rows':<noise,{1:noise1,4:noise4}>,'Cols':<noise,{2:noise2,5:noise5}> => add error to rows/cols] validchars = None, # [<None, ['All',[valid chars]]; or ['Cols', {1:['a','b','c','d'],2:['All'],3:['1.2 -- 3.5'],4:['0 -- '],...}]> ] mean_sd = None, # [<None, ['All',[Mean,SD]], or ['Cols', {1:[Mean1,SD1],2:[Mean2,SD2],3:'Refer2VC',...}]> ] p_nan = 0.0, # [Proportion of cells to make missing at random] nanval = -999., # [Numeric code for designating missing values] condcoord = None, # [< None, 'Orthonormal'>]nheaders4rows = 1, # [Number of header column labels to put before each row] nheaders4rows = 1, # [Number of header column labels to put before each row] nheaders4cols = 1, # [Number of header row labels to put before each column] extra_headers = 0, # [<0, int, {'0':0.25, '1':0.75}> => If headers > 1, range of ints for labels, randomly assigned or in blocks] input_array = None, # [<None, name of data array, {'fac0coord':EntxDim row coords,'fac1coord':EntxDim col coords}>] apply_zeros = None, # [<None, [row, {'sub1':[0,1,1],...}> => for each item group in row, where to apply zeros to coords] output_as = 'Damon', # [<'Damon','datadict','array','textfile','dataframe','Damon_textfile','datadict_textfile','array_textfile'>] outfile = None, # [<None, 'my_data.csv'> => name of the output file/path when output_as includes 'textfile'>] delimiter = None, # [<None, delimiter character used to separate fields of output file, e.g., ',' or '\t'>] bankf0 = None, # [<None => no bank,[<'All', list of F0 (Row) entities>]> ] bankf1 = None, # [<None => no bank,[<'All', list of F1 (Col) entities>]> ] verbose = True, # [<None, True> => print useful information and messages] condcoord_ = None # [<None, condcoord args> deprecated, only for backward compatibility] ): """Create simulated model and noisy data objects analyzable by Damon. Returns ------- create_data() returns a python dictionary where each of the following datasets is formatted either as a DamonObj, a datadict, an array, or a file, or combinations thereof. 'data' and 'model' may also be output as '.hd5' or pytables files to conserve memory. {'data':, => simulated 'observed' data, with error and missing added 'model':, => simulated 'True' values, no error added 'anskey':, => answer key, when data are alpha (simulating multiple choice data) 'fac0coord':, => theoretical row coordinates 'fac1coord': => theoretical column coordinates } output_as controls the output format, which can be as an array, a file, a DamonObj, or a datadict. When validchars specifies string responses, outputs are in a "multiple choice" alpha response format. An "answer key" is included. Numerical and alpha files are constructed according to a linear model, with noise added as desired: data[r,c] = Row[r] * Col[c] + noise[r,c] Cells can also be made missing. These create the output called 'data'. 'model' is like 'data', but without noise or missing cells. "bankf0" and "bankf1" are used to create artificial person or item banks to test the anchoring capability of coord() and rasch(). They save Python "pickle" files in the current working directory called 'bankf0' and 'bankf1'. Comments -------- create_data() is a function for creating artificial datasets for Damon. Compliant with the assumptions of the ALS decomposition, the data value of each cell is the dot product of its row and column coordinates, plus a specified level of random noise. It is important to remember that not all real-world datasets follow these assumptions, and there is an option to import datasets created using different rules into the function. Nonetheless, it is an excellent tool for learning Damon and its capacities under controlled conditions without having to worry about collecting external datasets. The function can produce ratio, interval, sigmoid, ordinal, and dichotomous data. It also outputs artificial item banks. Aside from files and arrays, create_data() can create DamonObj's to which Damon methods can be directly applied. It can also create datadicts and hd5 (pytables-style) files. Nominal data ------------ create_data() can create nominal data (non-numerical responses) in a limited sense, to mimic multiple-choice test responses. The way it works is that it first creates a dichotomous (0,1) matrix. Then, for each designated column, it converts the "1" into the "correct response" for that column (say "B", out of A, B, C). It converts the zeros into one of the remaining response options at random, A or C. That means the "correct" responses have an underlying mathematical meaning whereas the incorrect responses have no meaning at all. In real life, incorrect response options tend not to be random, so the create_data() nominal option will tend to be "noisier" than real life. Nonetheless, it is a reasonable and usable imitation of a multiple choice dataset. Use the score_mc() method to convert the alpha characters to numerical. parse() also works, but not as well. Nominal data can also take a different form where there is no "correct" option, e.g., an attitude survey. Here, each response has its own definite mathematical meaning. Currently, create_data() is unable to create such data. However, you can work around this limitation to a degree by simply creating a dichotomous response matrix and treating each column as it it were an item response, rather than an item. This is not quite an accurate representation of real-world nominal data since it does not require a "forced choice" between responses, but it will work okay for playing around. WARNING: Because equate() automatically adds new construct identifiers to the column or row keys, its likely to throw an exception if the original keys are int type. To avoid problems, create_data() forces all keys to be strings such that new_obj.colkeytype = 'S60'. Arguments --------- "nfac0" is the number of row entities to be created. (Row entities are considered to belong to "facet 0".) --------------- "nfac1" is the number of column entities to be created. (Column entities are considered to belong to "facet 1".) --------------- "ndim" is the number of dimensions to be used in creating the aritifical row and column coordinate arrays. --------------- "seed" controls the selection of random numbers when creating row and column coordinates. In Python, "seeds" are integers identifying a specific unique set of random numbers. If the seed is "None", the seed integer is chosen at random. seed supports using existing arrays for row (Fac0) and column (Fac1) coordinates. Both types of coordinate arrays are 2-D nEntities x nDims. seed = None => Use a new, unique set of random starter coordinates whenever executing the function. seed = 1 => For each run, use a particular set of random coordinates which is the same every time. seed = 2, 3, ... N => Each seed integer labels a different unique set of random coordinates. seed = {'Fac0':MyArray0,'Fac1':MyArray1} => Instead of generating random numbers, use numpy arrays MyArray0 and MyArray1. seed = {'Fac0':1,'Fac1':None} => Use seed 1 for Fac0, a random selection for Fac1. seed = {'Fac0':2,'Fac1':MyArray} => Use seed 2 for Fac0, MyArray for Fac1. WARNING: Damon's coord() function also uses seeds to build initial random number coordinate arrays, generally starting with seed = 1. If you create a Damon object with seed = 1, and coord() is using the same seed, you may get unreasonable agreement in your results. Yes, this has bitten me more than once. To avoid this, try to specify seeds in create_data (e.g., see=100) that coord() is not likely to use (coord() starts with seed=1 and iterates up from that, unless otherwise specified). --------------- "facmetric" governs the spread of the created data values. It multiplies the generating coordinates by a number (m) and adds a number (b): facmetric = [m,b] facmetric = [4,-2] => multiply each facet value by 4 and add -2. IMPORTANT NOTE: facmetric allows the user to model ratio (count-like) data by setting the b parameter to zero. When this is done, the log function is applied to the model and data arrays (prior to any further transformations controlled by validchars) and the data are interpreted as "ratio" rather than "interval". If b equals any value other than zero, the log function is not applied: facmetric = [4,0] => The log is taken of the resulting data and model arrays. facmetric = [4,-2] => The log is NOT taken of the resulting data and model arrays. facmetric = [4,0.0001] => The log is NOT taken of the resulting data and model arrays, but the coordinates all range from (almost) zero to four. --------------- "noise" is a way to add random error to specified rows or columns. It does so by multiplying a specified number ("noise") by a random number between -0.5 and 0.5. It is important to note that this is just one way of modeling noise and does not describe all possible noise scenarios. However, it is the only noise scenario supported by create_data() and works well for most purposes. The syntax is: noise = None => No noise (0.0) is added to the model array. noise = noise (int/float) => The specified integer or decimal is multiplied by a random number between -0.5 and 0.5 and added to the model array. noise = {'Rows':<noise,{'RowX':noiseX,...}>, 'Cols':<noise,{'ColY':noiseY,...}> } => noise is added to specified row and column entities, starting with rows. If 'Rows' is a number and specific row entities are not identified, the noise is added to all rows equally. Same with 'Cols'. When specific entities are identified, the specified noise is added to just those entities. Those row/column entities that are not identified are assigned a default level of noise of 0.0 except (in the case of 'Cols' only) where noise has already been added at the row level. Note: the row and column identifiers are the labels assigned by create_data() to each row/column. Examples: noise = 4.0 => Multiply by 4.0 a random number between -0.50 and 0.50 and add it to the whole model array. noise = {'Rows':4.0,'Cols':{'Col3':2.0,'Col5':6.0}} => Create a noise array by first adding 4.0 to all rows of a zero array. Then add 2.0 to all the 'Col3' cells and 6.0 to all the 'Col5' cells ('Col5' now has 4.0 + 6.0 = 10.0). Then multiply by a random number between -0.50 and 0.50. This is the noise array. Add the noise array to the model array. noise = {'Rows':{'Row1':4.0,'Row2':5.0},'Cols':{'Col3':2.0,'Col5':6.0}} => Create a noise array by first adding 4.0 to row entities 1 and 2 of a zero array. Then add 2.0 to all the 'Col3' cells and 6.0 to all the 'Col5' cells. Cell['Row1','Col3'] will be 4.0 + 2.0 = 6.0. All non-specified cells will be zero. Then multiply by a random number between -0.50 and 0.50. This is the noise array. Add the noise array to the model array. --------------- "validchars" specifies a list or range of valid characters for the whole coredata array or for each of the individual columns in the coredata array. (It does not apply to rows.) The function transforms the model values (with noise added) into the range implied by the validchars argument. It does this for the matrix as a whole or applies a different range for each column, allowing the function to create very heterogeneous datasets. Note the limitations on "nominal" data discussed in the comments. validchars does a lot and is important and used throughout Damon. In the context of this function it controls the range and metric of the artificially generated data values. It can also be used to create nominal data in the style of a multiple-choice test. Regardless of metric, the "model" output automatically mirrors the "data" output column by column to facilitate comparison. The syntax is: ['All',[list/range of possible responses]] or ['Cols',{'Col1':[list/range of responses],'Col2':[list/range of responses],...}] The {} brace signifies a Python dictionary. Example 1: validchars = None means that the existing values and ranges will be accepted as they are and no transformations are desired. Example 2: validchars = ['All',['All']] means that "all" values are permitted for all columns. If the companion specification mean_sd equals None, the target means and standard deviations are set at the existing means and standard deviations of the created data (i.e., there are no transformations). Example 2: ['All',['a','b','c','d']] means for all columns in the data array, create data in the form of responses from 'a' to 'd' such that one of these responses signifies a "correct" response. The "correct" response is chosen at random by the function and recorded in the anskey output as the "correct" response for that column. The model output in this case consists not of letters but of the "true" (pre-noise) probability of a cell containing the "correct" response. (Bear in mind we assigned the response termed "correct" to those cells with "observed" probability (including noise) greater than 0.50 of "succeeding" on the item.) Therefore, where Damon is successful in its predictions, cells containing the correct response for a column should have a model probability of success greater than 0.50. The create_data() function currently only supports the "answer key" paradigm for creating nominal responses, i.e. where there is one "correct" response and this response corresponds to success probabilities greater than 0.50, all other responses being assigned at random to the probabilities less than 0.50. In this paradigm, non-correct responses have no intrinsic meaning or correct probability, aside from being less than 0.50. To explore other nominal scenarios, you may need to create a dichotomous data set, then collapse groups of columns into a single column assigning each cell the most likely response category value. Example 3: ['All',[0,1]] ['All',['0 -- 1']] ['Cols',{1:[0,1],2:[0,1],3:['0 -- 1'],...}] means for all columns in the data array convert the continuous linear data (model + noise) into the equivalent of dichotomous responses. Notice that the 'All' syntax and the 'Cols' syntax yield the same results. (This differs from how validchars is used in other Damon functions, where 'All' and 'Cols' yield mathematically different results.) Also notice that [0,1] means the same thing as ['0 -- 1'] so long as the 0 and 1 in the second case are integers and not floats (with decimal points). ['0.0 -- 1.0'] means that results will be in a continuous range from 0 to 1 instead of being (0/1) integers. Note: It is important to type the dash properly; it is one space followed by two hyphens followed by one space, enclosed in single quotes ( ' -- ' ): (space, hyphen, hyphen, space). Any deviation will cause an error. The underlying formula for converting to dichotomous involves standardizing, converting to probabilities, and rounding to 0 or 1. Example 4: ['All',[0,1,2,3]] ['All',['0 -- 3']] means for all columns transform data into integers ranging from 0 to 3. Example 5: ['All',['0.0 -- ']] means for all columns transform the data into continuous values ranging from 0 to infinity. This is a ratio scale and behaves differently from the model's default interval scale which ranges from -Infinity to +Infinity. The conversions are done using a log function (log(ratio) = interval). This is helpful to model "count" data, which starts at 0. But in the case of counts, you would want to specify ['All',['0 -- ']] without the decimal point, to indicate that data should be rounded to the nearest integer. Example 6: ['Cols',{1:['1.0 -- 10.0'],2:['1.0 -- 10.0']}] means for columns 1 and 2 (in a 2-column array), make the data values range continuously from 1.0 to 10.0. Note that relative to the untransformed metric, this is a "sigmoid" metric which crunches at the top and bottom of the scale. Example 7: ['Cols',{1:['a','b','c'],2:[0,1],3:['1.0 -- 5.0'],4:['All']}] means for Column 1 make the values be 'a', 'b', or 'c'; for Column 2 make the data dichotomous, for Column 3 make it range continuously from 1.0 to 5.0, and for Column 4 let the data range continuously from -Infinity to +Infinity (i.e., keep the model + noise values as they are). To refrain from any transformation of the model + noise data, use validchars = None. --------------- "mean_sd" is used to specify a target mean and standard deviation for each column, or for the array as a whole, when the data are on an interval or ratio scale. It is used in conjunction with the validchars argument. Take care to keep them consistent. Where validchars specifies 'All', mean_sd should provide a mean and standard deviation. Where mean_sd specifies 'Refer2VC', validchars should have a list of valid characters or a range, not 'All'. When the data are on a ratio scale (ranging from 0 to infinity), the mean and standard deviation do not apply to the ratio values but to the log(ratio) values. So to obtain a certain mean and SD on the ratio scale, enter the log(Mean) and log(SD) in the mean_sd argument. This transformation is necessary because means and standard deviations are meaningful only on an interval scale. Options: mean_sd = None => The column means and standard deviations are left unchanged. = ['All',[Mean,SD]] => Make the array as a whole have a specified mean and standard deviation. Column means/SDs will vary. = ['Cols',[Mean,SD]] => Make each column have the specified mean and standard deviation. = ['All','Refer2VC'] or ['Cols','Refer2VC'] => No Means or SDs are specified. Use the validchars specification to decide what to do. If validchars = 'All' for the whole array or a column, the metric is left unchanged. = ['Cols',{1:[Mean1,SD1],2:[Mean2,SD2],3:'Refer2VC',...}] => Make Column1 have Mean1 and SD1. Make Column2 have Mean2 and SD2. For Column 3, do not make it have any mean or standard deviation, presumably because it is not in an interval or ratio metric. Instead, specify the placeholder 'Refer2VC', which means look up the minimum and maximum values in validchars and use those to specify the range of the scale. If 'Refer2VC' is specified for a column that has not been assigned a min or max in validchars, a mean of 0 and SD of 1 will automatically be assigned. --------------- "p_nan" is the proportion of cells to make missing at random. It actually isn't a percent: p_nan = 0.10 => make 0.10 or 10% of cells randomly missing. --------------- "nanval" is the Not-a-Number Value used to indicate a missing cells. It has to be of the same type as the rest of the array. Default is -999.0 . --------------- "condcoord" provides the option of making the row coordinates orthonormal, or not. Options are: condcoord = <None, 'Orthonormal'> For 'Orthonormal', the matrix procedure is numpy's QR decomposition, where matrix A = QR, Q is the orthonormal transformation of A, and R is an upper-diagonal matrix that performs the transformation. Q is equivalent to a "modified Gram-Schmidt" orthogonalization of A. --------------- "nheaders4rows" is the number of header labels to insert to the left of the data to label rows. Default is 1. --------------- "nheaders4cols" is the number of header labels to insert to the above the data to label columns. Default is 1. --------------- "extra_headers", short for "extra header integer range", is a way of inserting a specified number of integer values as labels in the row and column headers, but it only applies to those rows or columns of the header labels that are in excess of the leading header containing the unique ID, and does not apply when nheaders4rows or nheaders4rows = 1. This argument is used to test Damon functions that call on row or column attributes. The argument can be used to assign headers randomly or in specified blocks. extra_headers = int => tells the function to create and assign headers at random. extra_headers = 0, 1, -1 => The 0, 1, and -1 specifications all result in only one integer value (0) as an attribute, so there's really no point in using them. extra_headers = 2 => create extra header attributes consisting of integers 0 and 1 extra_headers = 3 => create extra header attributes consisting of integers 0, 1, 2. extra_headers = -3 => create extra header attributes consisting of 0, -1, -2. extra_headers = {'header':proportion, ...} => tells the function to create and assign headers in blocks. extra_headers = {'0':0.25, '1':0.75} => Say there is an extra header row for columns and that there are 100 columns, i.e., nfac1 = 100. This says assign '0' to the first 25 columns and '1' to the remaining 75 columns. Make sure the proportions add to 1.0 and that they break the columns cleanly into sections. The same arrangement of '0's and '1's will be applied to the row headers if nheaders4rows > 1. --------------- "input_array" makes it possible to import a data array or a set of row and column coordinates generated outside the function, and use those to create the model outputs. The noise, validchars, and other parameters are applied to the resulting model values to create a data array. This makes it possible to experiment with arrays built with nonlinear functions, to set the size of each individual row and column coordinate (to model a range of person abilities and item difficulties, for instance), and to experiment with setting some coordinates to zero to model situations where entities do not participate in a common D-dimensional space. Options: input_array = None => Do not input (import) an array or set of coordinates. input_array = MyArray => Input the MyArray numpy array as the core "model" data. Do not include row or column labels. input_array = {'fac0coord':MyRowCoords,'fac1coord':MyColCoords} => MyRowCoords and MyColCoords are two nEntities x nDimensions numpy arrays. Their dot product becomes the "model" data. Do not include row or column labels. NOTE: When input_array is used, it automatically overwrites the nfac0, nfac1, and ndim parameters. input_array does not support pytables, so output_as = 'hd5' becomes output_as = 'Damon'. --------------- "apply_zeros" is used to simulate data with dimensionally distinct subspaces. A given group of items is said to be "in the same space" if they have non-zero values on the same dimensions and zero values for all other dimensions. Two subspaces differ if, for one or more dimensions, one of the subspaces has non-zero values while the other has zeros. Damon's matrix decomposition depends strongly on all items sharing a common space. If they don't, the individual subspaces need to be analyzed separately and pull information from other subspaces using a different procedure. This is done using the sub_coord() method. The format is: apply_zeros = [row, {'sub1':[zeros, ones], ...}] apply_zeros = None => Do not apply zeros to coordinates. All items resided in a common space. apply_zeros = [1, {'0':[0,1,1], '1':[1,0,1]}] => Row 1 (counting from 0) contains subscale labels '0', '1'. This is controlled using the nheaders4cols and extra_headers args. For each '0' item, zero out the first dimension and keep the remaining dimensions as they are. For each '1' item, zero out the second dimension and keep the remaining dimensions as they are. The number of zeros and ones must equal the number of dimensions specified in ndim. --------------- "output_as" specifies whether to output created data as a Damon object, array, or file. Options: 'array' => Output as a numpy array (includes labels). 'Damon' => Output created data as a fully formatted data object, equivalent to Damon(). 'datadict' => Output created data as a "data dictionary" but not instantiated as an object. 'dataframe' => Output data as a Pandas dataframe 'textfile' => Output as a text file. 'array_textfile' => Output as an array and a text file. 'Damon_textfile' => Output as both a DamonObj and a text file. 'datadict_textfile' => Output as both a DamonObj and a text file. [WARNING: the 'hd5' option has been deprecated.] 'hd5' => Output using pytables in Hierarchical data format_, suitable for large arrays that may not fit in memory. If this option is used, the file name given in outfile must have a .hd5 extension. In addition to outputting a .hd5 file, a datadict is returned. WARNING: When 'hd5' is specified, not all create_data() functionality is preserved. The following simplifications are imposed: * The condcoord arg is ignored. * The noise arg for specified rows is ignored. ------------------ "outfile" is the name of the output file or path (if saving to a directory that is not the current working directory), if 'textfile' is specified in output_as. Options: None => output_as does not include 'textfile'. 'MyFileName.txt', 'MyDocs/MyFileName.txt' => Results are output to a file with the specified name or path. 'MyFile.hd5' => Results are output as a pytables 'hd5' file (output_as = 'hd5'). --------------- "delimiter" is the character used to delimit columns when a file is created. When comma (',') is used, the file name should have a .csv extension. When tab ('\t') is used, the file name should have a .txt extension. Use None when no file is specified. NOTE: Tab delimiters are safer as otherwise the validchars column in the answer key may accidentally be parsed. --------------- "bankf0", when not equal to None, automatically creates a pickle file called 'bankf0' to store the coordinates of specified facet 0 (row) entities: bankf0 = None => do not create a facet 0 bank. bankf0 = ['All'] => create a pickle file called 'bankf0' and store all the row coordinates in it, assigned to the entity ID. bankf0 = [1,3,5] => create a pickle file called 'bankf0' and store the row coordinates for entities 1, 3, and 5, assigned to their entity ID's. --------------- "bankf1", when not equal to None, automatically creates a pickle file called 'bankf1' to store the coordinates of specified facet 1 (column) entities: bankf1 = None => do not create a facet 1 bank. bankf1 = ['All'] => create a pickle file called 'bankf1' and store all the column coordinates in it, assigned to the entity ID. bankf1 = [10,13,15] => create a pickle file called 'bankf1' and store the column coordinates for entities 10, 13, and 15, assigned to their entity ID's. --------------- "verbose" <None, True> tells create_data() to print out useful information and messages. It also passes the verbose parameter to downstream DamonObj's. Examples -------- Paste function -------------- create_data(nfac0, # [Number of facet 0 elements -- rows/persons] nfac1, # [Number of facet 1 elements -- columns/items] ndim, # [Number of dimensions to create] seed = None, # [<None => randomly pick starter coordinates; int => integer of "seed" random coordinates>] facmetric = [4,-2], # [[m,b] => rand() * m + b, to set range of facet coordinate values] noise = None, # [<None, noise, {'Rows':<noise,{1:noise1,4:noise4}>,'Cols':<noise,{2:noise2,5:noise5}> => add error to rows/cols] validchars = None, # [<None, ['All',[valid chars]]; or ['Cols', {1:['a','b','c','d'],2:['All'],3:['1.2 -- 3.5'],4:['0 -- '],...}]> ] mean_sd = None, # [<None, ['All',[Mean,SD]], or ['Cols', {1:[Mean1,SD1],2:[Mean2,SD2],3:'Refer2VC',...}]> ] p_nan = 0.0, # [Proportion of cells to make missing at random] nanval = -999., # [Numeric code for designating missing values] condcoord = None, # [< None, 'Orthonormal'>] nheaders4rows = 1, # [Number of header column labels to put before each row] nheaders4cols = 1, # [Number of header row labels to put before each column] extra_headers = 0, # [<0, int, {'0':0.25, '1':0.75}> => If headers > 1, range of ints for labels, randomly assigned or in blocks] input_array = None, # [<None, name of data array, {'fac0coord':EntxDim row coords,'fac1coord':EntxDim col coords}>] apply_zeros = None, # [<None, [row, {'sub1':[0,1,1],...}> => for each item group in row, where to apply zeros to coords] output_as = 'Damon', # [<'Damon','datadict','array','textfile','dataframe','Damon_textfile','datadict_textfile','array_textfile'>] outfile = None, # # [<None, 'my_data.csv'> => name of the output file/path when output_as includes 'textfile'>] delimiter = None, # [<None, delimiter character used to separate fields of output file, e.g., ',' or '\t'>] bankf0 = None, # [<None => no bank,[<'All', list of F0 (Row) entities>]> ] bankf1 = None, # [<None => no bank,[<'All', list of F1 (Col) entities>]> ] verbose = True, # [<None, True> => print useful information and messages] ) """ # For backward compatibility if condcoord_ is not None: condcoord = condcoord_ if verbose is True: print 'create_data() is working...\n' # Run utility create_data_out = dmn.utils._create_data(locals()) if verbose is True: print '\ncreate_data() is done.' print 'Contains:\n',create_data_out.keys(),'\n' return create_data_out
d21b3a9c3172087021ba92caa74b4be95cac993c
28,205
def load_database() -> pd.DataFrame: """ Loads data from hplib_database. Returns ------- df : pd.DataFrame Content of the database """ df = pd.read_csv(cwd()+r'/hplib_database.csv') return df
5bc5ec4e8376493a9d7da9a8782c2e0f4fb8223d
28,206
import re def ensure_windows_file_path_format_encoding_as_url(path: str) -> str: """Relevant for Windows where a file path name should be "file://path/to/file.html" instead of "file://path\\to\\file.html" .""" output = path.replace("\\", "/") # Raw replace backslash with slash. # Handle exception for "file:///C:/path/to/file.html" declaration in URLs: if re.match(r"^file:/+[A-Za-z]:", output, re.IGNORECASE): output = re.sub(r"^file:/+", "file:///", output, re.IGNORECASE) return encode_path_as_url(output)
84e429c5d8e4b0fc2fd8976b140694f0f7c1c04e
28,207
import logging def init_logger(log_level=logging.WARNING): """Initialize logger. Args: log_level (int): logging level May be (in order of increasing verboseness): logging.CRITICAL or 50 logging.ERROR or 40 logging.WARNING or 30 logging.INFO or 20 logging.DEBUG or 10 """ logger = logging.getLogger(__name__) logger.setLevel(log_level) ch = logging.StreamHandler() ch.setLevel(log_level) logger.addHandler(ch) return logger
7a00817378c39a6a2d13becb7ccec111d9252b45
28,208
def index(): """ index page for WebUI Returns: rendered HTML page """ data = load_messages().message_contents return render_template("index.html", data=data, encoded_data=data)
c92515fc49aacded4e9242955f6b2d5ea125897a
28,209
def psnr(x, pred_x, max_val=255): """ PSNR """ val = tf.reduce_mean(tf.image.psnr(x, pred_x, max_val=max_val)) return val
7b5d44917c88d7644e5c1694a6e6d5873c0db952
28,210
def across_series_nearest_neighbors(Ts, Ts_idx, subseq_idx, m): """ For multiple time series find, per individual time series, the subsequences closest to a query. Parameters ---------- Ts : list A list of time series for which to find the nearest neighbor subsequences that are closest to the query subsequence `Ts[Ts_idx][subseq_idx : subseq_idx + m]` Ts_idx : int The index of time series in `Ts` which contains the query subsequence `Ts[Ts_idx][subseq_idx : subseq_idx + m]` subseq_idx : int The subsequence index in the time series `Ts[Ts_idx]` that contains the query subsequence `Ts[Ts_idx][subseq_idx : subseq_idx + m]` m : int Subsequence window size Returns ------- nns_radii : ndarray Nearest neighbor radii to subsequences in `Ts` that are closest to the query `Ts[Ts_idx][subseq_idx : subseq_idx + m]` nns_subseq_idx : ndarray Nearest neighbor indices to subsequences in `Ts` that are closest to the query `Ts[Ts_idx][subseq_idx : subseq_idx + m]` """ k = len(Ts) Q = Ts[Ts_idx][subseq_idx : subseq_idx + m] nns_radii = np.zeros(k, dtype=np.float64) nns_subseq_idx = np.zeros(k, dtype=np.int64) for i in range(k): dist_profile = distance_profile(Q, Ts[i], len(Q)) nns_subseq_idx[i] = np.argmin(dist_profile) nns_radii[i] = dist_profile[nns_subseq_idx[i]] return nns_radii, nns_subseq_idx
4cf3f8162b33f3b313f07160bab7e8042ce08f2b
28,211
def get_boundary_from_response(response): """ Parses the response header and returns the boundary. :param response: response containing the header that contains the boundary :return: a binary string of the boundary """ # Read only the first value with key 'content-type' (duplicate keys are allowed) content = response.headers.pop('content-type')[0] # Find the start and end index of the boundary b_start = content.find(b'boundary=') b_end = content[b_start:].find(b';') # Separate out boundary if b_end == -1: # If the end point is not found, just go to the end of the content string boundary = content[b_start+9:] else: boundary = content[b_start+9:b_start+b_end] return boundary
66a0112598b2210cca1a2210f6af963dfee641f7
28,212
import logging import json def get_message(message): """{ 'pattern': None, 'type': 'subscribe', 'channel': 'my-second-channel', 'data': 1L, }""" if not message: return logging.info('MSG: %s', message) data = message.get('data', {}) return json.loads(data)
2e79ed94fbfc3fba122e8bd8663e33b124d4d2b6
28,213
def parent_node(selector): """ Finds the parent_node of the given selector. """ if not get_instance(): raise Exception("You need to start a browser first with open_browser()") return parent_node_g(get_instance(), selector)
0d6136aa5262a4b4482166108715b3323328983b
28,214
def getPairCategory(rollSorted): """ Converts a roll's ordered list of frequencies to the pairwise hand category. """ if rollSorted[0] == 6: return "six-of-a-kind" elif rollSorted[0] == 5: return "five-of-a-kind" elif rollSorted[0] == 4 and rollSorted[1] == 2: return "four-two full house" elif rollSorted[0] == 4: return "four-of-a-kind" elif rollSorted[0] == 3 and rollSorted[1] == 3: return "double threes-of-a-kind" elif rollSorted[0] == 3 and rollSorted[1] == 2: return "three-two full house" elif rollSorted[0] == 3: return "three-of-a-kind" elif rollSorted[0] == 2 and rollSorted[1] == 2 \ and rollSorted[2] == 2: return "three pairs" elif rollSorted[0] == 2 and rollSorted[1] == 2: return "two pairs" elif rollSorted[0] == 2: return "one pair" else: return "high card"
1c48abd8d0c1a27a50ce587857852a95e8949e74
28,215
def allocz(size): """Alloc zeros with range""" return [0 for _ in range(size)]
21670a20ea045ee7f2cf0780a011f89f917b7180
28,216
def uint_to_little_endian_bytearray(number, size): """Converts an unsigned interger to a little endian bytearray. Arguments: number -- the number to convert size -- the length of the target bytearray """ if number > (2 ** (8 * size) - 1): raise ValueError("Integer overflow") nle = [0] * size for i in range(size): nle[i] = number >> i*8 & 0xFF return nle
bd3314fedf0accbc0d15b1bb146f54f52cb3bce1
28,217
import re def to_alu_hlu_map(input_str): """Converter for alu hlu map Convert following input into a alu -> hlu map: Sample input: ``` HLU Number ALU Number ---------- ---------- 0 12 1 23 ``` ALU stands for array LUN number hlu stands for host LUN number :param input_str: raw input from naviseccli :return: alu -> hlu map """ ret = {} if input_str is not None: pattern = re.compile(r'(\d+)\s*(\d+)') for line in input_str.split('\n'): line = line.strip() if len(line) == 0: continue matched = re.search(pattern, line) if matched is None or len(matched.groups()) < 2: continue else: hlu = matched.group(1) alu = matched.group(2) ret[int(alu)] = int(hlu) return ret
8e211b7efa3f8dd23c042f046d881daf987062bc
28,218
def debug(func): """Decorator to debug a function's inputs and outputs. Uses the built-in 'logging' module.\n Inputs are logged via `decorate.debug.debug_input`, and follow the format: \n `'Datetime' | ['action'] 'function' | args=(args,) | kwargs={kwargs:values}` Logged input example: `2021-02-05T17:36:53.276937 | [CALL] 'hello' | args=('World',) | kwargs={'punctuation': '!'}` Outputs are logged via `decorate.debug.debug_output`, and follow the format: \n `'Datetime' | ['action'] 'function' | output='output'` Logged output example: `2021-02-05T17:36:53.276937 | [RETURN] 'hello' | output="Hello, World!"` Args: func (Callable): Function to decorate Returns: Callable: Wrapped function """ @wraps(func) def wrapper(*args, **kwargs): debug_input(func, deco=False, input_args=args, input_kwargs=kwargs) result = func(*args, **kwargs) debug_output(func, deco=False, output=result) return result return wrapper
c6d7b84f401a469afafaeda64c4be2a34542da67
28,219
import copy def randomize(jet): """build a random tree""" jet = copy.deepcopy(jet) leaves = np.where(jet["tree"][:, 0] == -1)[0] nodes = [n for n in leaves] content = [jet["content"][n] for n in nodes] nodes = range(len(nodes)) tree = [[-1, -1] for n in nodes] pool = [n for n in nodes] next_id = len(nodes) while len(pool) >= 2: i = np.random.randint(len(pool)) left = pool[i] del pool[i] j = np.random.randint(len(pool)) right = pool[j] del pool[j] nodes.append(next_id) c = (content[left] + content[right]) # if len(c) == 5: # c[-1] = -1 content.append(c) tree.append([left, right]) pool.append(next_id) next_id += 1 jet["content"] = np.array(content) jet["tree"] = np.array(tree).astype(int) jet["root_id"] = len(jet["tree"]) - 1 return jet
d4e8d12f8701d140e965e773e9a2542b133d8535
28,220
import subprocess def run_command(command): """ Run command """ try: res = subprocess.run( command, capture_output=True, text=True, check=True, shell=True, ) out = res.stdout except subprocess.CalledProcessError as exc: out = exc.output, exc.stderr, exc.returncode return out
1807e42893c7f6ba6bdc52849bb69221a9d4be89
28,221
from typing import Optional def get_vt_retrohunt_files(vt_key: str, r_id: str, limit: Optional[int] = 100): """Retrieve file objects related to a retrohunt from VT.""" url = f"https://www.virustotal.com/api/v3/intelligence/retrohunt_jobs/{r_id}/matching_files?limit={limit}" data = vt_request(api_key=vt_key, url=url) if "error" in data: print_err(f"[ERR] Error occured during receiving notifications: {data['error']}") return [] return data["data"]
f29cdd1db8fc1b0559a49422df24a16e4708b493
28,222
import math def proj_make_3dinput_v2(project, angle = 15, start_slice = [0,0,0], crop_slice = [0.75, 0.625, 0.625]): """ This function unprojects 2d data into 3d voxel at different angles/views and do crop. :param project: 2d image input :param angle: the angle of different view. set max(1) as 0. :param start_slice: start slice of three dimension :param crop_slice: crop ratio of three dimension :return pred_proj: 3d output """ angle1 = angle h = project.shape[0] w = project.shape[1] l = project.shape[1] if angle <= 45: label = project l1 = round((1.0/math.tan(math.radians(angle)))*l) L = round((w**2+l1**2)**0.5*angle/45) p = round((L-l)/2) project = np.pad(project,((0,0),(p,p)),'constant', constant_values=(0,0)) pred_proj = cv2.resize(project,(l1+w-1, h)) # crop s1 = round(start_slice[1]*w) s2 = round(start_slice[2]*l1) pred_proj = pred_proj[round(start_slice[0]*h):round((start_slice[0]+crop_slice[0])*h),:] input3d = np.zeros((round(crop_slice[0]*h), round(crop_slice[1]*w), round(crop_slice[2]*l1)), dtype=np.float32) for i in range(round(crop_slice[1]*w+crop_slice[2]*l1-1)): relen = input3d.diagonal(round(i-crop_slice[1]*w+1),1,2).shape[1] row, col = np.diag_indices(relen) if i < (input3d.shape[1]-1): input3d[:,row-(i-input3d.shape[1]+1), col] = np.expand_dims(pred_proj[:,i+w-s1+s2-input3d.shape[1]],1).repeat(relen, axis=1) elif i >= (input3d.shape[1]-1): input3d[:,row, col+(i-input3d.shape[1]+1)] = np.expand_dims(pred_proj[:,i+w-s1+s2-input3d.shape[1]],1).repeat(relen, axis=1) input3d_itk = sitk.GetImageFromArray(input3d) input3d_itk = resize_image_itk(input3d_itk, (round(crop_slice[2]*l), round(crop_slice[1]*w), round(crop_slice[0]*h)),resamplemethod=sitk.sitkLinear) input3d = sitk.GetArrayFromImage(input3d_itk) elif (angle > 45) & (angle < 90): label = project angle = 90-angle w1 = round((1.0/math.tan(math.radians(angle)))*l) L = round((w1**2+l**2)**0.5*angle/45) p = round((L-l)/2) project = np.pad(project,((0,0),(p,p)),'constant', constant_values=(0,0)) pred_proj = cv2.resize(project,(l+w1-1, h)) # crop s1 = round(start_slice[1]*w1) s2 = round(start_slice[2]*l) pred_proj = pred_proj[round(start_slice[0]*h):round((start_slice[0]+crop_slice[0])*h),:] input3d = np.zeros((round(crop_slice[0]*h), round(crop_slice[1]*w1), round(crop_slice[2]*l)), dtype=np.float32) for i in range(round(crop_slice[1]*w1+crop_slice[2]*l-1)): relen = input3d.diagonal(round(i-crop_slice[1]*w1+1),1,2).shape[1] row, col = np.diag_indices(relen) if i < (input3d.shape[1]-1): input3d[:,row-(i-input3d.shape[1]+1), col] = np.expand_dims(pred_proj[:,i+w1-s1+s2-input3d.shape[1]],1).repeat(relen, axis=1) elif i >= (input3d.shape[1]-1): input3d[:,row, col+(i-input3d.shape[1]+1)] = np.expand_dims(pred_proj[:,i+w1-s1+s2-input3d.shape[1]],1).repeat(relen, axis=1) input3d_itk = sitk.GetImageFromArray(input3d) input3d_itk = resize_image_itk(input3d_itk, (round(crop_slice[2]*l), round(crop_slice[1]*w), round(crop_slice[0]*h)),resamplemethod=sitk.sitkLinear) input3d = sitk.GetArrayFromImage(input3d_itk) elif angle == 90: label = project project = np.flip(project, 1) pred_proj = project[round(start_slice[0]*h):round((start_slice[0]+crop_slice[0])*h), round(start_slice[1]*w):round((start_slice[1]+crop_slice[1])*w)] input3d = np.expand_dims(pred_proj, 2).repeat(pred_proj.shape[1], axis=2) elif (angle > 90) & (angle <= 135): label = project angle = angle - 90 project = np.flip(project, 1) w1 = round((1.0/math.tan(math.radians(angle)))*l) L = round((w1**2+l**2)**0.5*angle/45) p = round((L-l)/2) project = np.pad(project,((0,0),(p,p)),'constant', constant_values=(0,0)) pred_proj = cv2.resize(project,(l+w1-1, h)) # crop start_slice[1] = 1 - (start_slice[1] + crop_slice[1]) s1 = round(start_slice[1]*w1) s2 = round(start_slice[2]*l) pred_proj = pred_proj[round(start_slice[0]*h):round((start_slice[0]+crop_slice[0])*h),:] input3d = np.zeros((round(crop_slice[0]*h), round(crop_slice[1]*w1), round(crop_slice[2]*l)), dtype=np.float32) for i in range(round(crop_slice[1]*w1+crop_slice[2]*l-1)): relen = input3d.diagonal(round(i-crop_slice[1]*w1+1),1,2).shape[1] row, col = np.diag_indices(relen) if i < (input3d.shape[1]-1): input3d[:,row-(i-input3d.shape[1]+1), col] = np.expand_dims(pred_proj[:,i+w1-s1+s2-input3d.shape[1]],1).repeat(relen, axis=1) elif i >= (input3d.shape[1]-1): input3d[:,row, col+(i-input3d.shape[1]+1)] = np.expand_dims(pred_proj[:,i+w1-s1+s2-input3d.shape[1]],1).repeat(relen, axis=1) input3d = np.flip(input3d, 1) input3d_itk = sitk.GetImageFromArray(input3d) input3d_itk = resize_image_itk(input3d_itk, (round(crop_slice[2]*l), round(crop_slice[1]*w), round(crop_slice[0]*h)),resamplemethod=sitk.sitkLinear) input3d = sitk.GetArrayFromImage(input3d_itk) start_slice[1] = 1 - (start_slice[1] + crop_slice[1]) elif (angle > 135) & (angle < 180): label = project angle = 180 - angle project = np.flip(project, 1) l1 = round((1.0/math.tan(math.radians(angle)))*l) L = round((w**2+l1**2)**0.5*angle/45) p = round((L-l)/2) project = np.pad(project,((0,0),(p,p)),'constant', constant_values=(0,0)) pred_proj = cv2.resize(project,(l1+w-1, h)) # crop start_slice[1] = 1 - (start_slice[1] + crop_slice[1]) s1 = round(start_slice[1]*w) s2 = round(start_slice[2]*l1) pred_proj = pred_proj[round(start_slice[0]*h):round((start_slice[0]+crop_slice[0])*h),:] input3d = np.zeros((round(crop_slice[0]*h), round(crop_slice[1]*w), round(crop_slice[2]*l1)), dtype=np.float32) for i in range(round(crop_slice[1]*w+crop_slice[2]*l1-1)): relen = input3d.diagonal(round(i-crop_slice[1]*w+1),1,2).shape[1] row, col = np.diag_indices(relen) if i < (input3d.shape[1]-1): input3d[:,row-(i-input3d.shape[1]+1), col] = np.expand_dims(pred_proj[:,i+w-s1+s2-input3d.shape[1]],1).repeat(relen, axis=1) elif i >= (input3d.shape[1]-1): input3d[:,row, col+(i-input3d.shape[1]+1)] = np.expand_dims(pred_proj[:,i+w-s1+s2-input3d.shape[1]],1).repeat(relen, axis=1) input3d = np.flip(input3d, 1) input3d_itk = sitk.GetImageFromArray(input3d) input3d_itk = resize_image_itk(input3d_itk, (round(crop_slice[2]*l), round(crop_slice[1]*w), round(crop_slice[0]*h)),resamplemethod=sitk.sitkLinear) input3d = sitk.GetArrayFromImage(input3d_itk) start_slice[1] = 1 - (start_slice[1] + crop_slice[1]) elif angle == 180: label = project project = np.flip(project, 1) pred_proj = project[round(start_slice[0]*h):round((start_slice[0]+crop_slice[0])*h), round(start_slice[2]*l):round((start_slice[2]+crop_slice[2])*l)] input3d = np.expand_dims(pred_proj, 1).repeat(pred_proj.shape[1], axis=1) return input3d
3867cd03cc9c7833a29c9f5d4d078b5caabe2b73
28,223
from ch07.digraph_search import topological_sort as ts def topological_sort(digraph): """Link in with Topological sort.""" return ts(digraph)
bb16ca7c44adf37893d47cfacd7d81ae0d646af6
28,224
def merge_scores(scores_test, scores_val): """ Aggregate scores """ scores_valtest = {} for key in scores_test: key_valtest = "final/" + key.split("/")[1] if key.startswith("test/"): keyval = "val/" + key.split("/")[1] value = 0.5 * (scores_test[key]["value"] + scores_val[keyval]["value"]) if scores_test[key]["string"].endswith("%"): value_str = f"{value:05.2%}" else: value_str = f"{value:.6}" stats = {"value": value, "string": value_str} scores_valtest[key_valtest] = stats else: scores_valtest[key_valtest] = scores_test[key] return scores_valtest
be0dded69367e7554c0cc2632946d46954a3cc15
28,225
import os def extract_data_frame_from_file(data_path, filename): """ :param data_path: path of source file directory :param filename: source file name :return: pandas dataframe with file content """ file_path = os.path.join(data_path, filename) return pd.read_csv(file_path, skiprows=1, sep=SEPARATOR)
68ab293638b40488181c908822ca50eda55731c6
28,226
def validateDate(data, name, published): """ Verify that a date string is valid. """ # Verify that if published exists it's a valid date date = parse_datetime(published) if not date: raise InvalidField(name, published) return published
d8a46a491f54cc571f2560a816d76c93a8af79cf
28,227
def debug_error_message(msg): """Returns the error message `msg` if config.DEBUG is True otherwise returns `None` which will cause Werkzeug to provide a generic error message :param msg: The error message to return if config.DEBUG is True .. versionadded: 0.0.9 """ if getattr(config, "DEBUG", False): return msg return None
7f58f62d5891f7eacc1a6326b2c64ea1ce5862fe
28,228
def join( group_id: int, db: Session = Depends(get_db), user: UserModel = Depends(get_active_user), ): """Join the group.""" return service.join(db, group_id=group_id, user_id=user.id)
cc3d329b36d04047030e8af4e2c0d0511605aa49
28,229
def calc_correlation(cs, lab): """ calc the spearman's correlation :param cs: :param lab: :return: """ rho, pval = spearmanr(cs, lab) return rho
a1768394ab94d8833cbbbd6eb32d927285bac4b8
28,230
def normalize(df): """Pandas df normalisation Parameters: df (pd df) : input df Returns: result (pd df) : output df """ result = df.copy() for feature_name in df.columns: max_value = df[feature_name].max() min_value = df[feature_name].min() result[feature_name] = 2 * (df[feature_name] - min_value) / (max_value - min_value) - 1 result = result.fillna(0) return result
2fc05fc9ef7642ac4b84cb6ed567ec64c1da0836
28,231
def cbv_decorator(decorator): """ Turns a normal view decorator into a class-based-view decorator. Usage: @cbv_decorator(login_required) class MyClassBasedView(View): pass """ def _decorator(cls): cls.dispatch = method_decorator(decorator)(cls.dispatch) return cls return _decorator
9811cc05bcf31cb5145cc0a4f089e40433446023
28,232
def _shift_anchors(anchors, direction): """Shift anchors to the specified direction """ new_anchors = deepcopy(anchors) if direction == 'center': pass elif direction == 'top': heights = new_anchors[:,3] - new_anchors[:,1] + 1 heights = heights[:,np.newaxis] new_anchors[:,[1,3]] = new_anchors[:,[1,3]] - heights/2 elif direction == 'bottom': heights = new_anchors[:,3] - new_anchors[:,1] + 1 heights = heights[:,np.newaxis] new_anchors[:,[1,3]] = new_anchors[:,[1,3]] + heights/2 elif direction == 'right': widths = new_anchors[:,2] - new_anchors[:,0] + 1 widths = widths[:,np.newaxis] new_anchors[:,[0,2]] = new_anchors[:,[0,2]] + widths/2 elif direction == 'left': widths = new_anchors[:,2] - new_anchors[:,0] + 1 widths = widths[:,np.newaxis] new_anchors[:,[0,2]] = new_anchors[:,[0,2]] - widths/2 return new_anchors
fbf48649c846bbb7275cb2a773f637f9bc7023f3
28,233
import itertools def enumerate_all_features(dim: int, num_values: int) -> chex.Array: """Helper function to create all categorical features.""" features = jnp.array(list(itertools.product(range(num_values), repeat=dim))) chex.assert_shape(features, [num_values ** dim, dim]) return features.astype(jnp.int32)
049178afae402a3c73fa3a547afc8be1640efd62
28,234
def convert_pad_shape(pad_shape): """ Used to get arguments for F.pad """ l = pad_shape[::-1] pad_shape = [item for sublist in l for item in sublist] return pad_shape
39e77b3931f29f3ab95a75662187e09df545364f
28,235
def PotentialWalk(pos, tree, softening=0, no=-1, theta=0.7): """Returns the gravitational potential at position x by performing the Barnes-Hut treewalk using the provided octree instance Arguments: pos - (3,) array containing position of interest tree - octree object storing the tree structure Keyword arguments: softening - softening radius of the particle at which the force is being evaluated - we use the greater of the target and source softenings when evaluating the softened potential no - index of the top-level node whose field is being summed - defaults to the global top-level node, can use a subnode in principle for e.g. parallelization theta - cell opening angle used to control force accuracy; smaller is slower (runtime ~ theta^-3) but more accurate. (default 0.7 gives ~1% accuracy) """ if no < 0: no = tree.NumParticles # we default to the top-level node index phi = 0 dx = np.empty(3,dtype=np.float64) while no > -1: r = 0 for k in range(3): dx[k] = tree.Coordinates[no,k] - pos[k] r += dx[k]*dx[k] r = sqrt(r) h = max(tree.Softenings[no],softening) if no < tree.NumParticles: # if we're looking at a leaf/particle if r>0: # by default we neglect the self-potential if r < h: phi += tree.Masses[no] * PotentialKernel(r,h) else: phi -= tree.Masses[no] / r no = tree.NextBranch[no] elif r > max(tree.Sizes[no]/theta + tree.Deltas[no], h+tree.Sizes[no]*0.6+tree.Deltas[no]): # if we satisfy the criteria for accepting the monopole phi -= tree.Masses[no]/r if tree.HasQuads: phi -= 0.5 * np.dot(np.dot(dx,tree.Quadrupoles[no]),dx)/(r*r*r*r*r) # Potential from the quadrupole moment no = tree.NextBranch[no] else: # open the node no = tree.FirstSubnode[no] return phi
6b03d5075df752bd5c7986da76ce8fc7c28ce10c
28,236
import re def GetProjectUserEmail(git_repo): """Get the email configured for the project.""" output = RunGit(git_repo, ['var', 'GIT_COMMITTER_IDENT']).output m = re.search(r'<([^>]*)>', output.strip()) return m.group(1) if m else None
2749a7797b4ef9c2d7532f31e8b8594f0cc9c174
28,237
from typing import Tuple def decode_features(features_ext, resource_attr_range: Tuple[int, int]): """ Given matrix of features from extended configs, corresponding to `ExtendedConfiguration`, split into feature matrix from normal configs and resource values. :param features_ext: Matrix of features from extended configs :param resource_attr_range: (r_min, r_max) :return: (features, resources) """ r_min, r_max = resource_attr_range features = features_ext[:, :-1] resources_encoded = _flatvec(features_ext[:, -1]) lower = r_min - 0.5 + EPS width = r_max - r_min + 1 - 2 * EPS resources = anp.clip(anp.round(resources_encoded * width + lower), r_min, r_max) return features, [int(r) for r in resources]
ffb709a8ef7f7da91a7b4b99c80b2f32ff66b66e
28,238
def get_event(): """ Get the event information of the group :param room_id: the room_id of the group """ incoming = request.get_json() event = Event.get_event_with_room_id(incoming['room_id']) if event: results = {'event_name': event.name, 'location': event.location, 'start_time': event.start_time, 'end_time': event.end_time, 'description': event.description} else: results = {'event_name': "", 'location': "", 'start_time': "", 'end_time': "", 'description': ""} return jsonify(results = results)
d6f9c581b563d3231ef9d40de557eee323025e4b
28,239
import os def copy_decompress(source_fn): """Generate bash code to copy/decompress file to $TMPDIR. """ new_fn = os.path.basename(source_fn) if source_fn.lower().endswith(".gz"): new_fn = new_fn.strip(".gz") cmd = "gunzip -C {source_fn} > $TMPDIR/{new_fn}" elif source_fn.lower().endswith(".bz2"): new_fn = new_fn.strip(".bz2") cmd = "bzip2 -dc {source_fn} > $TMPDIR/{new_fn}" elif source_fn.lower().endswith(".dsrc"): new_fn = new_fn.strip(".dsrc") cmd = "dsrc d {source_fn} $TMPDIR/{new_fn}" else: cmd = "cp {source_fn} $TMPDIR/{new_fn}" cmd = "\n".join([cmd, "cd $TMPDIR"]) return cmd.format(source_fn=source_fn, new_fn=new_fn), new_fn
7b65e66f240e2e86b70c963e7f0d90381656210e
28,240
def test(model,X, y): """ Predicts given X dataset with given model Returns mse score between predicted output and ground truth output Parameters ---------- X: Test dataset with k features y: Ground truth of X with k features """ return mse(model.predict(X).flatten(),y.flatten())
5e27125def948478b98eac997009de29a44a64a9
28,241
def solver(objectives): """Returns a solver for the given objective(s). Either a single objective or a list of objectives can be provided. The result is either an IKSolver or a GeneralizedIKSolver corresponding to the given objective(s). (see klampt.robotsim.IKSolver and klampt.robotsim.GeneralizedIKSolver). In rare cases, it may return a list of IKSolver's if you give it objectives on different robots. They should be solved independently for efficiency. (The objectives should be a result from the :func:`objective` function. Beware that if you are making your own goals by calling the IKObjective constructors from the robotsim module, the .robot member of these goals must be set). """ if hasattr(objectives,'__iter__'): generalized = [] robs = dict() for obj in objectives: if isinstance(obj,IKObjective): robs.getdefault(obj.robot,[]).append(obj) elif isinstance(obj,GeneralizedIKObjective): generalized.append(obj) else: raise TypeError("Objective is of wrong type") if len(generalized) != 0: #need a generalized solver world = None if generalized[0].isObj1: world = WorldModel(generalized[0].obj1.world) else: world = WorldModel(generalized[0].link1.world) s = GeneralizedIKSolver(world) for obj in generalized: s.add(obj) for (r,objs) in robs.iteritems(): for obj in objs: s.add(GeneralizedIKObjective(r,obj)) return s else: res = [] for (r,objs) in robs: s = IKSolver(r) for obj in objs: s.add(obj) res.append(s) if len(res)==1: return res[0] return res else: if isinstance(objectives,IKObjective): s = IKSolver(objectives.robot) s.add(objectives) return s elif isinstance(objectives,GeneralizedIKObjective): world = None if objectives.isObj1: world = WorldModel(objectives.obj1.world) else: world = WorldModel(objectives.link1.world) s = GeneralizedIKSolver(world) s.add(objectives) return s else: raise TypeError("Objective is of wrong type")
685bed37629c57a7041f3cdcee0d98c3c93b1d77
28,242
def model_fn_builder(config: NeatConfig): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) is_training = (mode == tf.estimator.ModeKeys.TRAIN) batch_size = get_shape_list(features['actions/action_id'], expected_rank=1)[0] hidden_size = config.model['hidden_size'] # activation_fn = tf.nn.tanh if config.model.get('activation', 'tanh') == 'tanh' else tf.identity scp_model = StateChangePredictModel(config, is_training=is_training, object_types=features['objects/object_types'], ) encoded_h = scp_model.encode_affordances(features['objects/object_states']) encoded_h_pre = tf.gather(encoded_h, [0, 2], axis=1) encoded_h_post_gt = tf.gather(encoded_h, [1, 3], axis=1) action_embed = scp_model.encode_action(features['actions/action_id'], action_args=features['actions/action_args']) encoded_h_post_pred = scp_model.apply_action_mlp(action_embed, encoded_h_pre) ############################################################# # Now construct a decoder # [batch_size, 3, #objs, hidden_size] -> [batch_size, 3 * objs, hidden_size] all_encoded_h = tf.concat([ encoded_h_pre, # [0, 2] encoded_h_post_gt, # [1, 3] encoded_h_post_pred, # [1, 3] ], 1) gt_affordances_decoder = tf.gather(features['objects/object_states'], [0, 2, 1, 3, 1, 3], axis=1) isvalid_by_type = tf.cast(tf.gather(features['objects/is_valid'], [0, 2, 1, 3, 1, 3], axis=1), dtype=tf.float32) if mode == tf.estimator.ModeKeys.PREDICT: predictions = scp_model.sample(all_encoded_h) predictions.update(**features) return tf.contrib.tpu.TPUEstimatorSpec(mode=tf.estimator.ModeKeys.PREDICT, predictions=predictions) affordance_pred_by_type = scp_model.decode_affordances_when_gt_is_provided(all_encoded_h, gt_affordances_decoder) ###################### # For losses # action_logits = action_result['action_logits'] ############################################ # if params.get('demomode', False): # action_logits['affordances_pred'] = affordance_pred_by_type[:, 4:] # for k in action_logits: # action_logits[k] = tf.nn.softmax(action_logits[k], axis=-1) # return action_logits losses, norms = scp_model.compute_losses( object_states=features['objects/object_states'], isvalid_by_type_o1o2=isvalid_by_type[:, :2], encoded_h_pre=encoded_h_pre, encoded_h_post_gt=encoded_h_post_gt, encoded_h_post_pred=encoded_h_post_pred, affordance_pred_by_type=affordance_pred_by_type, gt_affordances_decoder=gt_affordances_decoder, isvalid_by_type=isvalid_by_type) # losses['action_success'] = sequence_xe_loss(action_logits['action_success'], features['actions/action_success']) loss = tf.add_n([x for x in losses.values()]) for k, v in norms.items(): losses[f'norms/{k}'] = v loss += 0.1 * norms['hidden_state_diff_l2'] loss += 0.1 * norms['hidden_state_diff_l1'] if is_training: tvars = [x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'global_step' not in x.name] else: tvars = tf.trainable_variables() # ckpt_to_assignment_map = {} # initialized_variable_names = {} # init_checkpoint = config.model.get('init_checkpoint', None) # if init_checkpoint: # regular_assignment_map, regular_initialized_variable_names = get_assignment_map_from_checkpoint( # tvars, init_checkpoint=init_checkpoint # ) # # # If you need to disable loading certain variables, comment something like this in # # regular_assignment_map = {k: v for k, v in regular_assignment_map.items() if # # all([x not in k for x in ('temporal_predict', # # 'roi_language_predict', # # 'roi_pool/pool_c5', # # 'aux_roi', # # 'second_fpn', # # 'img_mask', # # 'roi_pool/box_feats_proj/kernel')])} # # ckpt_to_assignment_map['regular'] = regular_assignment_map # initialized_variable_names.update(regular_initialized_variable_names) # # def scaffold_fn(): # """Loads pretrained model through scaffold function.""" # # ORDER BY PRIORITY # return tf.train.Scaffold() tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" # if var.name in initialized_variable_names: # init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) train_op, train_metrics = optimization.build_optimizer_from_config( loss=loss, optimizer_config=config.optimizer, device_config=config.device, ) train_metrics.update(losses) # for k, v in affordance_loss_metrics.items(): # train_metrics[f'affordance_metrics/{k}'] = v host_call = construct_host_call(scalars_to_log=train_metrics, model_dir=config.device['output_dir'], iterations_per_loop=config.device.get('iterations_per_loop', 1000)) return tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=loss, train_op=train_op, eval_metrics=None, # scaffold_fn=scaffold_fn, host_call=host_call) return model_fn
572ceb93e9907e1bb960e3c66f34c14e8bacce7c
28,243
def solve_case(rectangle, tree, io_obj): """ Main program to recursively fill a rectangle with pentominos """ return tuple(map(_solve_rect_with_x(rectangle)(tree)([ len(rectangle) // 2, len(rectangle) % 2, len(rectangle[0]) // 2, len(rectangle[0]) % 2])(io_obj), tuple(_get_x_center_pts(len(rectangle) // 2 + len(rectangle) % 2, len(rectangle[0]) // 2 + len(rectangle[0]) % 2))))
4afa8b6a1e723e5900d14ec252a131086b1b15f1
28,244
def keras_weights_to_caffemodel(keras_model): """ Only Implement the conv layer and fc layer :param keras_model: :return: """ net = caffe.Net() layers = keras_model.layers for layer in layers: if type(layer) == keras.layers.Convolution2D: w, b = layer.get_weights() w = convert_filter(w) param = caffe.Layer_param(layer.name, 'Convolution') net.add_layer_with_data(param, [w, b]) if type(layer) == keras.layers.Dense: w, b = layer.get_weights() w = convert_fc(w) param = caffe.Layer_param(layer.name, 'InnerProduct') net.add_layer_with_data(param, [w, b]) return net
088c5040e3c3cba962d4fee7caee74761f9dbb21
28,245
def get_db(): """ :return: """ db = getattr(g, "_database", None) if db is None: db = g._database = connect() db.row_factory = make_dicts return db
d462dd6d82ce1c5f13ab5acfc3f613c4d9bdba33
28,246
import scipy def interpolate(X, Y, Z, x_coords, y_coords, factor=4, method='cubic'): """ :return: Interpolated X, Y, Z coordinate tuples, given by a factor, and a method in `scipy.interpolate.griddata` """ X_f, Y_f, Z_f = X.flatten(), Y.flatten(), Z.flatten() x_inter = scale(x_coords, factor=factor).reshape(1, -1) y_inter = scale(y_coords, factor=factor).reshape(-1, 1) X_, Y_ = np.meshgrid(x_inter, y_inter) Z_ = scipy.interpolate.griddata((X_f, Y_f), Z_f, (x_inter, y_inter), method=method) return X_, Y_, Z_
6520fb7a9e1c7b25a3030d97ad9a43253fd7b976
28,247
def weighted_rmsd(x, y, weights=None, dim=None, apply_nan_mask=True): """ Compute weighted root-mean-square-deviation between two `xarray.DataArray` objects. Parameters ---------- x, y : `xarray.DataArray` objects xarray objects for which to compute `weighted_rmsd`. weights : array_like, optional weights to use. By default, weights=`None` dim : str or sequence of str, optional Dimension(s) over which to apply `weighted rmsd` By default weighted rmsd is applied over all dimensions. apply_nan_mask : bool, default: True Returns ------- weighted_root_mean_square deviation : float If `weights` is None, returns root mean square deviation using equal weights for all data points. """ if weights is None: warn('Computing root-mean-square-deviation using equal weights for all data points') weights, op_over_dims = _get_weights_and_dims( x, weights=weights, dim=dim, apply_nan_mask=apply_nan_mask ) # If the mask is applied in previous operation, # disable it for subseqent operations to speed up computation if apply_nan_mask: apply_nan_mask_flag = False else: apply_nan_mask_flag = True dev = (x - y) ** 2 dev_mean = weighted_mean( dev, weights=weights, dim=op_over_dims, apply_nan_mask=apply_nan_mask_flag ) return np.sqrt(dev_mean)
da975ff48dcdee1a7f2c44a58d171d7c4579e553
28,248
def _default_value(argument, default): """Returns ``default`` if ``argument`` is ``None``""" if argument is None: return default else: return argument
52eed8ddaf3c52adba69044cc462fc11279670c5
28,249
def setup(opp, config): """Set up the w800rf32 component.""" # Declare the Handle event def handle_receive(event): """Handle received messages from w800rf32 gateway.""" # Log event if not event.device: return _LOGGER.debug("Receive W800rf32 event in handle_receive") # Get device_type from device_id in opp.data device_id = event.device.lower() signal = W800RF32_DEVICE.format(device_id) dispatcher_send(opp, signal, event) # device --> /dev/ttyUSB0 device = config[DOMAIN][CONF_DEVICE] w800_object = w800.Connect(device, None) def _start_w800rf32(event): w800_object.event_callback = handle_receive opp.bus.listen_once(EVENT_OPENPEERPOWER_START, _start_w800rf32) def _shutdown_w800rf32(event): """Close connection with w800rf32.""" w800_object.close_connection() opp.bus.listen_once(EVENT_OPENPEERPOWER_STOP, _shutdown_w800rf32) opp.data[DATA_W800RF32] = w800_object return True
9604e79c23baf155d36568cd32f86507747e78c3
28,250
def all_feature_functions(): """ Returns all feature functions from the function module :rtype list[callable] :returns List of feature functions """ exclude = ['n_gram_frequency', 'term_frequency'] functions = [] for name in dir(features): feature_function = getattr(features, name) if callable(feature_function) and feature_function.__name__ not in exclude: functions.append(feature_function) return functions
3e2fb73def3791a64d1e3868bf5c7188d593e3cf
28,251
def unsupplied_buses(net, mg=None, slacks=None, respect_switches=True): """ Finds buses, that are not connected to an external grid. INPUT: **net** (pandapowerNet) - variable that contains a pandapower network OPTIONAL: **mg** (NetworkX graph) - NetworkX Graph or MultiGraph that represents a pandapower network. **in_service_only** (boolean, False) - Defines whether only in service buses should be included in unsupplied_buses. **slacks** (set, None) - buses which are considered as root / slack buses. If None, all existing slack buses are considered. **respect_switches** (boolean, True) - Fixes how to consider switches - only in case of no given mg. OUTPUT: **ub** (set) - unsupplied buses EXAMPLE: import pandapower.topology as top top.unsupplied_buses(net) """ mg = mg or create_nxgraph(net, respect_switches=respect_switches) if slacks is None: slacks = set(net.ext_grid[net.ext_grid.in_service].bus.values) | set( net.gen[net.gen.in_service & net.gen.slack].bus.values) not_supplied = set() for cc in nx.connected_components(mg): if not set(cc) & slacks: not_supplied.update(set(cc)) return not_supplied
e8f7da1735ab56ad5a4ffd9b4ef27088b7e89fd1
28,252
import os import time def last_check(): """Return the date of the last check""" cache = cache_file() if cache: return os.path.getmtime(cache_file()) # Fallback return time.time()
31badaf58b708a6318f20d8d6aad18e86aedbec4
28,253
def is_constant_type(expression_type): """Returns True if expression_type is inhabited by a single value.""" return (expression_type.integer.modulus == "infinity" or expression_type.boolean.HasField("value") or expression_type.enumeration.HasField("value"))
66a3237971299df3c7370f039d87a8b5f4ae2be5
28,254
def compute_tuning_improvement_sds_5_4(): """Compute average improvement during tuning, in sds""" data = _get_tuning_results_df() delta = data['delta'].dropna() result = delta.mean() fn = OUTPUT_DIR.joinpath('5_4_tuning_improvement_sds.txt') with fn.open('w') as f: f.write( '{} standard deviations of improvement during tuning' .format(result)) return result
b1adc3c2c7f2dd22217ec4be63d8e6c49d5827bc
28,255
def make_grid(batch_imgs, n_rows): """Makes grid of images.""" batch_imgs = np.array(batch_imgs) assert len(batch_imgs.shape) == 4, f'Invalid shape {batch_imgs.shape}' batchsize, height, width, channels = batch_imgs.shape n_cols = (batchsize + n_rows - 1) // n_rows grid = np.zeros((n_rows * height, n_cols * width, channels)) for i, img in enumerate(batch_imgs): y = i // n_cols x = i % n_cols grid[y*height:(y+1)*height, x*width:(x+1)*width, :] = img if channels == 1: grid = np.concatenate([grid, grid, grid], axis=-1) # Upsample if low res to avoid visualization artifacts. if height <= 32: upsample_factor = 2 grid = grid.repeat(upsample_factor, axis=0).repeat(upsample_factor, axis=1) return grid
6da67c75407df6ff1a4b85e5a2c0aa6992a6ffe6
28,256
import random def getTypes(parasites): """Take parasites and assign a type to them -- like in pokemon! Then return the list of parasites.""" bugsOut = [] for i in range(len(parasites)): bugsOut.append([i+1, parasites[i], random.choice(["fire", "water", "grass"])]) return bugsOut
905466f92349d97f8fe7c3b7c446327cd52216df
28,257
def jsonUsers(request): """Export user list to JSON""" user_list = list(CustomUser.objects.values()) return JsonResponse(user_list, safe=False)
625669fd38730b9fd759812b11904e5aef3bf76e
28,258
def farthest_point_sample(points, num_points=1024): """ Input: points: a point set, in the format of NxM, where N is the number of points, and M is the point dimension num_points: required number of sampled points """ def compute_dist(centroid, points): return np.sum((centroid - points) ** 2, axis=1) farthest_pts = np.zeros((num_points, points.shape[1])) farthest_pts[0] = points[np.random.randint(len(points))] # Random choose one point as starting point distances = compute_dist(farthest_pts[0], points) for idx in range(1, num_points): farthest_pts[idx] = points[np.argmax(distances)] distances = np.minimum(distances, compute_dist(farthest_pts[idx], points)) return farthest_pts.astype(np.float32)
a96ace38c6d2a18cc247e2131fc095eeccca1a84
28,259
def resolve_shx_font_name(font_name: str) -> str: """ Map SHX font names to TTF file names. e.g. 'TXT' -> 'txt_____.ttf' """ # Map SHX fonts to True Type Fonts: font_upper = font_name.upper() if font_upper in SHX_FONTS: font_name = SHX_FONTS[font_upper] return font_name
013e944160f7fc71849e3e7f6869620a1fd6a328
28,260
def select_event_by_name(session, event_name): """ Get an event by name Parameters ---------- session : database connexion session event_name : str name of the RAMP event Returns ------- `Event` instance """ event = session.query(Event).filter(Event.name == event_name).one() return event
779bf47b812fdc920ff4359f4766a63689ced195
28,261
import torch def attention_mask_creator(input_ids): """Provide the attention mask list of lists: 0 only for [PAD] tokens (index 0) Returns torch tensor""" attention_masks = [] for sent in input_ids: segments_ids = [int(t > 0) for t in sent] attention_masks.append(segments_ids) return torch.tensor(attention_masks)
06a5880069cdc88ea33fe987bf4ac77aceef13eb
28,262
def extension_from_parameters(): """Construct string for saving model with annotation of parameters""" ext = '' ext += '.A={}'.format(ACTIVATION) ext += '.B={}'.format(BATCH_SIZE) ext += '.D={}'.format(DROP) ext += '.E={}'.format(NB_EPOCH) if FEATURE_SUBSAMPLE: ext += '.F={}'.format(FEATURE_SUBSAMPLE) for i, n in enumerate(LAYERS): if n: ext += '.L{}={}'.format(i+1, n) ext += '.P={}'.format(PENALTY) return ext
009bd3dd0b105cbbd060ced37776e011487be415
28,263
import time import six from re import S def gen_image_coeff(filter_or_bp, pupil=None, mask=None, module='A', coeff=None, coeff_hdr=None, sp_norm=None, nwaves=None, fov_pix=11, oversample=4, return_oversample=False, use_sp_waveset=False, **kwargs): """Generate PSF Create an image (direct, coronagraphic, grism, or DHS) based on a set of instrument parameters and PSF coefficients. The image is noiseless and doesn't take into account any non-linearity or saturation effects, but is convolved with the instrument throughput. Pixel values are in counts/sec. The result is effectively an idealized slope image. If no spectral dispersers (grisms or DHS), then this returns a single image or list of images if sp_norm is a list of spectra. Parameters ---------- filter_or_bp : str, :mod:`pysynphot.obsbandpass` Either the name of a filter or a Pysynphot bandpass. pupil : str, None NIRCam pupil elements such as grisms or lyot stops. mask : str, None Specify the coronagraphic occulter (spots or bar). module : str Module 'A' or 'B'. sp_norm : :mod:`pysynphot.spectrum` A normalized Pysynphot spectrum to generate image. If not specified, the default is flat in phot lam (equal number of photons per spectral bin). The default is normalized to produce 1 count/sec within that bandpass, assuming the telescope collecting area. Coronagraphic PSFs will further decrease this flux. coeff : ndarray A cube of polynomial coefficients for generating PSFs. This is generally oversampled with a shape (fov_pix*oversamp, fov_pix*oversamp, deg). If not set, this will be calculated using the :func:`gen_psf_coeff` function. coeff_hdr : FITS header Header information saved while generating coefficients. nwaves : int Option to specify the number of evenly spaced wavelength bins to generate and sum over to make final PSF. Useful for wide band filters with large PSFs over continuum source. use_sp_waveset : bool Set this option to use `sp_norm` waveset instead of bandpass waveset. Useful if user inputs a high-resolution spectrum with line emissions, so may wants to keep a grism PSF (for instance) at native resolution rather than blurred with the bandpass waveset. TODO: Test. fov_pix : int Number of detector pixels in the image coefficient and PSF. oversample : int Factor of oversampling of detector pixels. return_oversample: bool If True, then also returns the oversampled version of the PSF. Keyword Args ------------ grism_order : int Grism spectral order (default=1). npsf : int Number of evenly-spaced (with wavelength) monochromatic PSFs to generate with webbPSF. If not specified, then the default is to produce 20 PSFs/um. The wavelength range is determined by choosing those wavelengths where throughput is >0.001. ndeg : int Polynomial degree for PSF fitting. read_filter - ND_acq ND_acq : bool ND acquisition square in coronagraphic mask. """ is_grism = ((pupil is not None) and ('GRISM' in pupil)) is_dhs = ((pupil is not None) and ('DHS' in pupil)) if is_dhs: raise NotImplementedError('DHS has yet to be fully included') t0 = time.time() # Get filter throughput and create bandpass if isinstance(filter_or_bp, six.string_types): filter = filter_or_bp bp = read_filter(filter, pupil=pupil, mask=mask, module=module, **kwargs) else: bp = filter_or_bp filter = bp.name if (coeff is not None) and (coeff_hdr is not None): fov_pix = coeff_hdr['FOVPIX'] oversample = coeff_hdr['OSAMP'] module = coeff_hdr['MODULE'] elif (coeff is None) and (coeff_hdr is not None): raise AttributeError("`coeff_hdr` parameter set, but `coeff` is None") elif ((coeff is not None) and (coeff_hdr is None)): raise AttributeError("`coeff` parameter set, but `coeff_hdr` is None") else: coeff, coeff_hdr = gen_psf_coeff(bp, pupil=pupil, mask=mask, module=module, fov_pix=fov_pix, oversample=oversample, **kwargs) t1 = time.time() waveset = np.copy(bp.wave) if nwaves is not None: # Evenly spaced waves waveset = np.linspace(waveset.min(), waveset.max(), nwaves) elif not (is_grism or is_dhs): # For generating the PSF, let's save some time and memory by not using # ever single wavelength in the bandpass. # Do NOT do this for dispersed modes. binsize = 1 if coeff.shape[-1]>2000: binsize = 7 elif coeff.shape[-1]>1000: binsize = 5 elif coeff.shape[-1]>700: binsize = 3 if binsize>1: excess = waveset.size % binsize waveset = waveset[:waveset.size-excess] waveset = waveset.reshape(-1,binsize) # Reshape waveset = waveset[:,binsize//2] # Use the middle values waveset = np.concatenate(([bp.wave[0]],waveset,[bp.wave[-1]])) wgood = waveset / 1e4 w1 = wgood.min() w2 = wgood.max() wrange = w2 - w1 # print('nwaves: {}'.format(len(wgood))) t2 = time.time() # Flat spectrum with equal photon flux in each spectal bin if sp_norm is None: sp_flat = S.ArraySpectrum(waveset, 0*waveset + 10.) sp_flat.name = 'Flat spectrum in flam' # Bandpass unit response is the flux (in flam) of a star that # produces a response of one count per second in that bandpass sp_norm = sp_flat.renorm(bp.unit_response(), 'flam', bp) # Make sp_norm a list of spectral objects if it already isn't if not isinstance(sp_norm, list): sp_norm = [sp_norm] nspec = len(sp_norm) t3 = time.time() # Set up an observation of the spectrum using the specified bandpass if use_sp_waveset: if nspec>1: raise AttributeError("Only 1 spectrum allowed when use_sp_waveset=True.") # Modify waveset if use_sp_waveset=True obs_list = [] for sp in sp_norm: # Select only wavelengths within bandpass waveset = sp.wave waveset = waveset[(waveset>=w1*1e4) and (waveset<=w2*1e4)] obs_list.append(S.Observation(sp, bp, binset=waveset)) # Update wgood wgood = waveset / 1e4 w1 = wgood.min() w2 = wgood.max() wrange = w2 - w1 else: # Use the bandpass wavelength set to bin the fluxes obs_list = [S.Observation(sp, bp, binset=waveset) for sp in sp_norm] # Convert to count rate for obs in obs_list: obs.convert('counts') t4 = time.time() # Create a PSF for each wgood wavelength use_legendre = True if coeff_hdr['LEGNDR'] else False lxmap = [coeff_hdr['WAVE1'], coeff_hdr['WAVE2']] psf_fit = jl_poly(wgood, coeff, dim_reorder=False, use_legendre=use_legendre, lxmap=lxmap) # Just in case weird coeff gives negative values # psf_fit[psf_fit<=0] = np.min(psf_fit[psf_fit>0]) / 10 t5 = time.time() # Multiply each monochromatic PSFs by the binned e/sec at each wavelength # Array broadcasting: [nx,ny,nwave] x [1,1,nwave] # Do this for each spectrum/observation if nspec==1: psf_fit *= obs_list[0].binflux.reshape([-1,1,1]) psf_list = [psf_fit] else: psf_list = [psf_fit*obs.binflux.reshape([-1,1,1]) for obs in obs_list] del psf_fit # The number of pixels to span spatially fov_pix = int(fov_pix) oversample = int(oversample) fov_pix_over = int(fov_pix * oversample) t6 = time.time() # Grism spectroscopy if is_grism: # spectral resolution in um/pixel # res is in pixels per um and dw is inverse grism_order = kwargs['grism_order'] if ('grism_order' in kwargs.keys()) else 1 res, dw = grism_res(pupil, module, grism_order) # Number of real pixels that spectra will span npix_spec = int(wrange // dw + 1 + fov_pix) npix_spec_over = int(npix_spec * oversample) spec_list = [] spec_list_over = [] for psf_fit in psf_list: # If GRISM90 (along columns) rotate image by 90 deg CW if 'GRISM90' in pupil: psf_fit = np.rot90(psf_fit, k=1) elif module=='B': # Flip right to left to disperse in correct orientation psf_fit = psf_fit[:,:,::-1] # Create oversampled spectral image spec_over = np.zeros([fov_pix_over, npix_spec_over]) # Place each PSF at its dispersed location for i, w in enumerate(wgood): # Separate shift into an integer and fractional shift delx = oversample * (w-w1) / dw # Number of oversampled pixels to shift intx = int(delx) fracx = delx - intx if fracx < 0: fracx = fracx + 1 intx = intx - 1 # spec_over[:,intx:intx+fov_pix_over] += fshift(psf_fit[i], fracx) im = psf_fit[i] spec_over[:,intx:intx+fov_pix_over] += im*(1.-fracx) + np.roll(im,1,axis=1)*fracx spec_over[spec_over<__epsilon] = 0 #__epsilon # Rotate spectrum to its V2/V3 coordinates spec_bin = poppy.utils.krebin(spec_over, (fov_pix,npix_spec)) if 'GRISM90' in pupil: # Rotate image 90 deg CCW spec_over = np.rot90(spec_over, k=-1) spec_bin = np.rot90(spec_bin, k=-1) elif module=='B': # Flip right to left for sci coords spec_over = spec_over[:,::-1] spec_bin = spec_bin[:,::-1] # Rebin ovesampled spectral image to real pixels spec_list.append(spec_bin) spec_list_over.append(spec_over) # Wavelength solutions dw_over = dw/oversample w1_spec = w1 - dw_over*fov_pix_over/2 wspec_over = np.arange(npix_spec_over)*dw_over + w1_spec wspec = wspec_over.reshape((npix_spec,-1)).mean(axis=1) if ('GRISM0' in pupil) and (module=='B'): # Flip for sci coords wspec = wspec[::-1] if nspec == 1: spec_list = spec_list[0] spec_list_over = spec_list_over[0] # Return list of wavelengths for each horizontal pixel # as well as spectral image t7 = time.time() _log.debug('jl_poly: {:.2f} sec; binflux: {:.2f} sec; disperse: {:.2f} sec'.format(t5-t4, t6-t5, t7-t6)) if return_oversample: return (wspec, spec_list), (wspec_over, spec_list_over) else: return (wspec, spec_list) # DHS spectroscopy elif is_dhs: raise NotImplementedError('DHS has yet to be fully included') # Imaging else: # Create source image slopes (no noise) data_list = [] data_list_over = [] for psf_fit in psf_list: data_over = psf_fit.sum(axis=0) data_over[data_over<=__epsilon] = data_over[data_over>__epsilon].min() / 10 data_list_over.append(data_over) data_list.append(poppy.utils.krebin(data_over, (fov_pix,fov_pix))) if nspec == 1: data_list = data_list[0] data_list_over = data_list_over[0] t7 = time.time() _log.debug('jl_poly: {:.2f} sec; binflux: {:.2f} sec; PSF sum: {:.2f} sec'.format(t5-t4, t6-t5, t7-t6)) if return_oversample: return data_list, data_list_over else: return data_list
38d1ed576adbb49a32fb200c7b3346b04e7d7b36
28,264
def rotateBoard90(b): """b is a 64-bit score4 board consists of 4 layers Return: a 90 degree rotated board as follow (looking from above) C D E F 0 4 8 C 8 9 A B ==> 1 5 9 D 4 5 6 7 2 6 A E 0 1 2 3 3 7 B F """ return rotateLayer90(b & 0xFFFF) \ | rotateLayer90(b >> 16 & 0xFFFF) << 16 \ | rotateLayer90(b >> 32 & 0xFFFF) << 32 \ | rotateLayer90(b >> 48 & 0xFFFF) << 48
d4069e8f7953abdcf56fc9bc713da69e33f1fdbe
28,265
def register_preset_path(path): """Add filepath to registered presets :param path: the directory of the preset file(s) :type path: str :return: """ if path in _registered_paths: return log.warning("Path already registered: %s", path) _registered_paths.append(path) return path
231d151c0b01e13312539ad592faa9f01acdce42
28,266
def set_pause_orchestration( self, ne_pk_list: list[str], ) -> bool: """Set appliances to pause orchestration .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - pauseOrchestration - POST - /pauseOrchestration :param ne_pk_list: List of appliances in the format of integer.NE e.g. ``["3.NE","5.NE"]`` :type ne_pk_list: list[str] :return: Returns True/False based on successful call :rtype: bool """ return self._post( "/pauseOrchestration", data=ne_pk_list, expected_status=[204], return_type="bool", )
72ebb32a6bc1bef1faf09f646dfad90a9b29da32
28,267
def unwrap(value: str, wrap_char: str) -> str: """Unwraps a given string from a character or string. :param value: the string to be unwrapped :param wrap_char: the character or string used to unwrap :return: unwrapped string or the original string if it is not quoted properly with the wrap character or string :raise IllegalArgumentError: if either parameter is not a string """ check_argument_type(value, "value", str) check_argument_type(wrap_char, "wrap_char", str) if is_not_blank(value) and is_not_blank(wrap_char): if value[0] == wrap_char and value[-1] == wrap_char: return value[1:-1] if ( value[0 : len(wrap_char)] == wrap_char and value[-len(wrap_char) :] == wrap_char ): return value[len(wrap_char) : -len(wrap_char)] return value
d1c1aceb0c92e0ccda26f6444eea5056c56d5d44
28,268
def round_to_memory_units(memory_bytes, round_up): """Round bytes to the nearest memory unit.""" return from_memory_units(to_memory_units(memory_bytes, round_up))
9402592d20832dd3149e8b9bc14115711aeee51a
28,269
import os import zipfile from datetime import datetime import time def get_modification_time(input_dir_or_zip): """Get time of most recent content modification as seconds since the epoch.""" path = 'routes.txt' if os.path.isdir(input_dir_or_zip): return int(os.stat(os.path.join(input_dir_or_zip, path)).st_mtime) else: with zipfile.ZipFile(input_dir_or_zip) as zip_file: modified_dt = datetime.datetime(*zip_file.getinfo(path).date_time) return int(time.mktime(modified_dt.timetuple()))
53ff7763897e046d36db28b668fe6c33607fe69b
28,270
import gettext import types def clone(translation): """ Clones the given translation, creating an independent copy. """ clone = gettext.GNUTranslations() clone._catalog = translation._catalog.copy() if hasattr(translation, 'plural'): clone.plural = types.FunctionType( translation.plural.__code__, translation.plural.__globals__, translation.plural.__name__, translation.plural.__defaults__, translation.plural.__closure__ ) return clone
269756db03954d7b6539e941fe07db532e81b17d
28,271
def get_general_channel(): """Returns just the general channel of the workspace""" channels = get_all_channels() for channel in channels: if (channel['is_general']): return channel
f350c87a57dc54580729a29b456e25d5b0c6797f
28,272
def nonrigid_rotations(spc_mod_dct_i): """ Determine if the rotational partition function for a certain species should be calculated according to some non-rigid model. This determination solely relies on whether has specified the use of a non-rigid model for the species. :param spc_mod_dct_i: species partition function models :type spc_mod_dct_i: dict[str: str] :rtype: bool """ rot_model = spc_mod_dct_i['rot']['mod'] return bool(rot_model == 'vpt2')
5ef94d4dc1b267ffab6bb654d58aae592b69d367
28,273
def general_standing(): """ It gives the general standing based on the current matchday. Note that it depends on the parameters that are imported at the beginning of the notebook, specifically Results, hence in order to refresh it needs to be run after Results is created from the utilities script. This is called by other functions. """ posts,_ = fetch_standing('pti') dict_out={} for dic in posts: dict_out[dic['team'].lower()] = dic['position'] return dict_out
2a5f48a34209ff9568b9c4afb447df4eac944391
28,274
def pixwt(xc, yc, r, x, y): """ ; --------------------------------------------------------------------------- ; FUNCTION Pixwt( xc, yc, r, x, y ) ; ; Compute the fraction of a unit pixel that is interior to a circle. ; The circle has a radius r and is centered at (xc, yc). The center of ; the unit pixel (length of sides = 1) is at (x, y). ; --------------------------------------------------------------------------- """ return intarea(xc, yc, r, x-0.5, x+0.5, y-0.5, y+0.5)
35e8937456fa1a4c8ca251ff55449001c6f64859
28,275
def predict(net, inputs, use_GPU=False, in_type='numpy'): """Make predictions using a well-trained network. Parameters ---------- inputs : numpy array or torch tensor The inputs of the network. use_GPU : bool If True, calculate using GPU, otherwise, calculate using CPU. in_type : str The data type of the inputs, it can be 'numpy' or 'torch'. """ if use_GPU: net = net.cuda() if in_type=='numpy': inputs = dp.numpy2cuda(inputs) elif in_type=='torch': inputs = dp.torch2cuda(inputs) else: if in_type=='numpy': inputs = dp.numpy2torch(inputs) net = net.eval() #this works for the batch normalization layers pred = net(Variable(inputs)) if use_GPU: pred = dp.cuda2numpy(pred.data) else: pred = dp.torch2numpy(pred.data) return pred
4a07a8171024fe50f56a94a416f4745f8db01759
28,276
import os def tag_from_ci_env_vars(ci_name, pull_request_var, branch_var, commit_var): """ Checks if the CI environmental variables to check for a pull request, commit id and band commit branch are present. :return: String with the CI build information, or None if the CI environmental variables could not be found. """ pull_request = os.environ.get(pull_request_var) branch = os.environ.get(branch_var) commit = os.environ.get(commit_var) if pull_request and pull_request != "false": try: int(pull_request) print(script_tab + "Pull request valid '%s' variable found: %s" % (ci_name, pull_request)) return "pull_%s" % pull_request except ValueError: print(script_tab + "The pull request environmental variable " + "'%s' value '%s' from %s is not a valid number." % (pull_request_var, pull_request, ci_name)) if branch and commit: print(script_tab + "Branch and commit valid '%s' variables found: %s %s" % (ci_name, branch, commit)) # We only return first 10 digits from the commit ID (normal length 40) commit = "%s" % commit return "%s_%s" % (branch, commit[:10]) print(script_tab + "The environmental variables for %s " % ci_name + "were deemed invalid:\n" + script_tab + "\t%s: %s\n" % (pull_request_var, pull_request) + script_tab + "\t%s: %s\n" % (branch_var, branch) + script_tab + "\t%s: %s" % (commit_var, commit)) return None
73a4f5dda860edf1748b5abf833c73914075d0da
28,277
import os import requests def load_model() -> modellib.MaskRCNN: """ This function loads the segmentation model and returns it. The weights are downloaded if necessary. Returns: modellib.MaskRCNN: MRCNN model with trained weights """ # Define directory with trained model weights root_dir = os.path.split(__file__)[0] model_path = os.path.join(root_dir, "mask_rcnn_molecule.h5") # Download trained weights if needed if not os.path.exists(model_path): print("Downloading model weights...") url = 'https://storage.googleapis.com/mrcnn-weights/mask_rcnn_molecule.h5' req = requests.get(url, allow_redirects=True) with open(model_path, 'wb') as model_file: model_file.write(req.content) print("Successfully downloaded the segmentation model weights!") # Create model object in inference mode. model = modellib.MaskRCNN(mode="inference", model_dir=".", config=InferenceConfig()) # Load weights model.load_weights(model_path, by_name=True) return model
bbbe02b268bce94df0761b9a5ae952282183e734
28,278
def svn_mergeinfo_catalog_merge(*args): """ svn_mergeinfo_catalog_merge(svn_mergeinfo_catalog_t mergeinfo_catalog, svn_mergeinfo_catalog_t changes_catalog, apr_pool_t result_pool, apr_pool_t scratch_pool) -> svn_error_t """ return _core.svn_mergeinfo_catalog_merge(*args)
dd79ece86fab697b519f4ec2b5b294e085701364
28,279
import numpy def normalize_const(v): """ Normalize a numpy array of floats or doubles. """ return v / numpy.linalg.norm(v)
927ad9d2d94735263ac10a445f4f7fe4b3150c95
28,280
import re def _decode_block_str(block_str): """ Decode block definition string Gets a list of block arg (dicts) through a string notation of arguments. E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip All args can exist in any order with the exception of the leading string which is assumed to indicate the block type. leading string - block type ( ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) r - number of repeat blocks, k - kernel size, s - strides (1-9), e - expansion ratio, c - output channels, se - squeeze/excitation ratio n - activation fn ('re', 'r6', 'hs', or 'sw') Args: block_str: a string representation of block arguments. Returns: A list of block args (dicts) Raises: ValueError: if the string def not properly specified (TODO) """ assert isinstance(block_str, str) ops = block_str.split('_') block_type = ops[0] # take the block type off the front ops = ops[1:] options = {} noskip = False for op in ops: # string options being checked on individual basis, combine if they grow if op == 'noskip': noskip = True elif op.startswith('n'): # activation fn key = op[0] v = op[1:] if v == 're': value = get_act_layer('relu') elif v == 'r6': value = get_act_layer('relu6') elif v == 'hs': value = get_act_layer('hard_swish') elif v == 'sw': value = get_act_layer('swish') else: continue options[key] = value else: # all numeric options splits = re.split(r'(\d.*)', op) if len(splits) >= 2: key, value = splits[:2] options[key] = value # if act_layer is None, the model default (passed to model init) will be used act_layer = options['n'] if 'n' in options else None exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 fake_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def num_repeat = int(options['r']) # each type of block has different valid arguments, fill accordingly if block_type == 'ir': block_args = dict( block_type=block_type, dw_kernel_size=_parse_ksize(options['k']), exp_kernel_size=exp_kernel_size, pw_kernel_size=pw_kernel_size, out_chs=int(options['c']), exp_ratio=float(options['e']), se_ratio=float(options['se']) if 'se' in options else None, stride=int(options['s']), act_layer=act_layer, noskip=noskip, ) if 'cc' in options: block_args['num_experts'] = int(options['cc']) elif block_type == 'ds' or block_type == 'dsa': block_args = dict( block_type=block_type, dw_kernel_size=_parse_ksize(options['k']), pw_kernel_size=pw_kernel_size, out_chs=int(options['c']), se_ratio=float(options['se']) if 'se' in options else None, stride=int(options['s']), act_layer=act_layer, pw_act=block_type == 'dsa', noskip=block_type == 'dsa' or noskip, ) elif block_type == 'er': block_args = dict( block_type=block_type, exp_kernel_size=_parse_ksize(options['k']), pw_kernel_size=pw_kernel_size, out_chs=int(options['c']), exp_ratio=float(options['e']), fake_in_chs=fake_in_chs, se_ratio=float(options['se']) if 'se' in options else None, stride=int(options['s']), act_layer=act_layer, noskip=noskip, ) elif block_type == 'cn': block_args = dict( block_type=block_type, kernel_size=int(options['k']), out_chs=int(options['c']), stride=int(options['s']), act_layer=act_layer, ) else: assert False, 'Unknown block type (%s)' % block_type return block_args, num_repeat
271b8c11ea100a7b86cd4ee958ed13d8c19ab3ac
28,281
import collections def make_lc_resolver(type_: type[_T], /) -> collections.Callable[..., collections.Awaitable[_T]]: """Make an injected callback which resolves a LazyConstant. Notes ----- * This is internally used by `inject_lc`. * For this to work, a `LazyConstant` must've been set as a type dependency for the passed `type_`. Parameters ---------- type_ : type[_T] The type of the constant to resolve. Returns ------- collections.abc.Callable[..., collections.abc.Awaitable[_T]] An injected callback used to resolve the LazyConstant. """ async def resolve( # LazyConstant gets type arguments at runtime constant: LazyConstant[_T] = injecting.inject(type=LazyConstant[type_]), ctx: injecting.AbstractInjectionContext = injecting.inject(type=injecting.AbstractInjectionContext), ) -> _T: """Resolve a lazy constant.""" if (value := constant.get_value()) is not None: return value async with constant.acquire(): if (value := constant.get_value()) is not None: return value result = await constant.callback.resolve(ctx) constant.set_value(result) return result return resolve
30190cfb0f74eab96bafabfc55ef63d18ea25a0f
28,282
import hashlib def get_md5(string): """ Get md5 according to the string """ byte_string = string.encode("utf-8") md5 = hashlib.md5() md5.update(byte_string) result = md5.hexdigest() return result
968b8f8ec28720e4ed4d020093f815b6af33eea7
28,283
def clean_docs_and_uniquify(docs): """ normalize docs and uniquify the doc :param docs: :return: """ docs = [normalize(t) for t in docs if isinstance(t, str)] docs = dedupe(docs) return docs
6a267c1b5b6744cf28b2b6c73e6cf49f25f95727
28,284
from typing import Any import torch def shard_init_helper_(init_method, tensor: Tensor, **kwargs: Any) -> None: """ Helper function to initialize shard parameters. """ if hasattr(tensor, _PARALLEL_DIM): local_rank = get_rank() group = get_group() world_size = get_world_size() parallel_dim = getattr(tensor, _PARALLEL_DIM) tensor_shape = list(map(int, tensor.shape)) tensor_size = len(tensor_shape) # handle both weight and bias col_dim = tensor_size - 1 row_dim = 0 if parallel_dim == 0: tensor_shape[col_dim] *= world_size if tensor_size == 2 else 1 elif parallel_dim == 1 or parallel_dim == -1: tensor_shape[row_dim] *= world_size elif parallel_dim == None: pass else: raise ValueError data = torch.empty( tensor_shape, dtype=torch.float, requires_grad=False ).cuda(local_rank) init_method(data, **kwargs) dist.broadcast(data, src=0) if parallel_dim == 0: data = scatter(data, dim=col_dim) if tensor_size == 2 else data elif parallel_dim == 1 or parallel_dim == -1: data = scatter(data, dim=row_dim) elif parallel_dim == None: pass else: raise ValueError() tensor.data.copy_(data) del data else: return init_method(tensor, **kwargs)
df692b2766ed49d3358c72450aa5d4159915e4b5
28,285
def segment_zentf_tiling(image2d, model, tilesize=1024, classlabel=1, overlap_factor=1): """Segment a singe [X, Y] 2D image using a pretrained segmentation model from the ZEN. The out will be a binary mask from the prediction of ZEN czmodel which is a TF.SavedModel with metainformation. Before the segmentation via the network will be applied the image2d will be tiled in order to match the tile size to the required batch tile size of the used network. Default is (1024, 1024) :param image2d: image to be segmented :type image2d: NumPy.Array :param model: trained TF2 model used for segmentation :type model: TF.SavedModel :param tilesize: required tile size for the segmentation model, defaults to 1024 :type tilesize: int, optional :param classlabel: Index for the class one is interested in, defaults to 1 :type classlabel: int, optional :param overlap_factor: overlap_factor of 2 = stride between each tile is only tile_shape/overlap_factor and therefore overlap_factor = 1 means no overlap, defaults to 1 :type overlap_factor: int, optional :return: binary - binary mask of the specified class :rtype: Numpy.Array """ # create tile image using MightMosaic image2d_tiled = MightyMosaic.from_array(image2d, (tilesize, tilesize), overlap_factor=overlap_factor, fill_mode='reflect') print('image2d_tiled shape : ', image2d_tiled.shape) # get number of tiles num_tiles = image2d_tiled.shape[0] * image2d_tiled.shape[1] print('Number of Tiles: ', num_tiles) # create array for the binary results binary_tiled = image2d_tiled ct = 0 for n1 in range(image2d_tiled.shape[0]): for n2 in range(image2d_tiled.shape[1]): ct += 1 print('Processing Tile : ', ct, ' Size : ', image2d_tiled[n1, n2, :, :].shape) # extract a tile tile = image2d_tiled[n1, n2, :, :] # get the binary from the prediction for a single tile binary_tile = segment_zentf(tile, model, classlabel=classlabel) # cats the result into the output array binary_tiled[n1, n2, :, :] = binary_tile # created fused binary and covert to int binary = binary_tiled.get_fusion().astype(int) return binary
7f88124b86bfe6bde147aeedd9385ea7b19063e3
28,286
import os def setup_train_and_sub_df(path): """ Sets up the training and sample submission DataFrame. Args: path (str): Base diretory where train.csv and sample_submission.csv are located Returns: tuple of: train (pd.DataFrame): The prepared training dataframe with the extra columns: im_id & label sub (pd.DataFrame): The prepared sample submission dataframe with the same extra columns as train id_mask_count (pd.DataFrame): The dataframe prepared for splitting """ # Reading the in the .csvs train = pd.read_csv(os.path.join(path, "train.csv")) sub = pd.read_csv(os.path.join(path, "sample_submission.csv")) # setting the dataframe for training/inference train["label"] = train["ImageId_ClassId"].apply(lambda x: x.split("_")[1]) train["im_id"] = train["ImageId_ClassId"].apply(lambda x: x.split("_")[0]) sub["label"] = sub["ImageId_ClassId"].apply(lambda x: x.split("_")[1]) sub["im_id"] = sub["ImageId_ClassId"].apply(lambda x: x.split("_")[0]) id_mask_count = train.loc[train["EncodedPixels"].isnull() == False, "ImageId_ClassId"].apply(lambda x: x.split("_")[0]).value_counts().\ reset_index().rename(columns={"index": "im_id", "ImageId_ClassId": "count"}) return (train, sub, id_mask_count)
25dae06b76aa395c46d126d44129f7f75fc4a622
28,287
def key2cas(key): """ Find the CAS Registry Number of a chemical substance using an IUPAC InChIKey :param key - a valid InChIKey """ if _validkey(key): hits = query('InChIKey=' + key, True) if hits: if len(hits) == 1: return hits[0]['rn'] else: # check hits for smallest molar mass compound, i.e., not polymer minmm = 100000 minrn = '' for i, hit in enumerate(hits): mm = detail(hit['rn'], 'molecularMass') if mm != '': if float(mm) < minmm: minmm = float(mm) minrn = hit['rn'] return minrn else: return '' else: return ''
d0919e2e6c1b6b149409e2b6333bcd3129c53379
28,288
import time import re def connect_server(server, username, startpage, sleep_func=time.sleep, tracktype='recenttracks'): """ Connect to server and get a XML page.""" if server == "libre.fm": baseurl = 'http://alpha.libre.fm/2.0/?' urlvars = dict(method='user.get%s' % tracktype, api_key=('lastexport.py-%s' % __version__).ljust(32, '-'), user=username, page=startpage, limit=200) elif server == "last.fm": baseurl = 'http://ws.audioscrobbler.com/2.0/?' urlvars = dict(method='user.get%s' % tracktype, api_key='e38cc7822bd7476fe4083e36ee69748e', user=username, page=startpage, limit=50) else: if server[:7] != 'http://': server = 'http://%s' % server baseurl = server + '/2.0/?' urlvars = dict(method='user.get%s' % tracktype, api_key=('lastexport.py-%s' % __version__).ljust(32, '-'), user=username, page=startpage, limit=200) url = baseurl + urlencode(urlvars, quote_via=quote_plus) for interval in (1, 5, 10, 62, 240): try: f = urlopen(url) break except Exception as e: last_exc = e print('Exception occured, retrying in %ds: %s' % (interval, e)) sleep_func(interval) else: print('Failed to open page %s' % urlvars['page']) raise last_exc response = f.read() f.close() #bad hack to fix bad xml response = re.sub('\xef\xbf\xbe', '', str(response)) return response
694bfa6b1ace0ed51338983b4901f06c78f44afd
28,289
def eigenvalue_nonunitary_diamondnorm(A, B, mxBasis): """ Eigenvalue nonunitary diamond distance between A and B """ d2 = A.shape[0] evA = _np.linalg.eigvals(A) evB = _np.linalg.eigvals(B) return (d2 - 1.0) / d2 * _np.max(_tools.minweight_match(evA, evB, lambda x, y: abs(abs(x) - abs(y)), return_pairs=False))
97716d88829e4bf0beef914f38bd40d24c1fc32a
28,290
def str_to_int(value): """Convert str to int if possible Args: value(str): string to convert Returns: int: converted value. str otherwise """ try: return int(value) except ValueError: return value
30bd55fc34abffa67c79117b0941ba7e6388efeb
28,291
import json def json_response(data): """this function is used for ajax def route(request): return json_response(t.json()) """ header = 'HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n' body = json.dumps(data, ensure_ascii=False, indent=8) r = header + '\r\n' + body return r.encode(encoding='utf-8')
8678a8f62fab10d9120d354889387b6d70cddea9
28,292
from operator import sub from operator import add def linePointXY(l,p,inside=True,distance=False,params=False): """ For a point ``p`` and a line ``l`` that lie in the same XY plane, compute the point on ``l`` that is closest to ``p``, and return that point. If ``inside`` is true, then return the closest distance point between the point and the line segment. If ``distance`` is true, return the closest distance, not the point. If ``params`` is true, return the sampling parameter value of the closest point. """ a=l[0] b=l[1] # check for degenerate case of zero-length line abdist = dist(a,b) if abdist < epsilon: #raise ValueError('zero-length line passed to linePointXY') print('zero-length line passed to linePointXY') return False if distance and params: raise ValueError('incompatible distance and params parameters passed to linePointXY') x0=p[0] y0=p[1] z0=p[2] x1=a[0] y1=a[1] z1=a[2] x2=b[0] y2=b[1] z2=b[2] ## check to see if all three points lie in the same x,y plane if not isXYPlanar([p,a,b]): raise ValueError('non-XY points in linePointXY call') return false # if abs(z1-z0) > epsilon or abs(z2-z0) > epsilon: # return False linedist = abs( ((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)/abdist) ## this is the fast case: if not inside and distance: return linedist ## find out where the intersection between the original line and a ## line defined by the point and an orthogonal direction vector ## is. We do this by constructing two direction vectors ## orthogonal to the orgiginal line scaled by the line distance, ## and adding them to the point in question. Assuming that the ## line distance is not zero, only one of these constructed points ## will fall on the line ## compute unit direction vector for original line dir = sub(b,a) dir = scale3(dir,1.0/mag(dir)) ## compute two orthogonal direction vectors of length linedist ordir1 = scale3(orthoXY(dir),linedist) ordir2 = scale3(ordir1, -1.0) ## there are two possible intersection points pi1 = add(p,ordir1) pi2 = add(p,ordir2) ## compute distances d1pa = dist(a,pi1) d1pb = dist(pi1,b) d1 = d1pa+d1pb # "triangle" with pi1 d2pa = dist(a,pi2) d2pb = dist(pi2,b) d2 = d2pa+d2pb # "triangle" with pi2 ## the shortest "triangle" distance will signal the point that ## is actually on the line, even if that point falls outside ## the a,b line interval if params or not inside: # if we don't care about being inside the # line segment if d1 <= d2: if distance: return d1 elif params: return d1pb/abdist else: return pi1 else: if distance: return d2 elif params: return d2pb/abdist else: return pi2 ## if the closest point on the line to point p lies between ## the endpoints of the line, then either d1 or d2 will equal ## abdist. IF neither do, then we know that the closest point lies ## outside the endpoints if abs(d1-abdist) < epsilon: if distance: return linedist else: return pi1 if abs(d2-abdist) < epsilon: if distance: return linedist else: return pi2 ## closest point is outside the interval. That means that the ## distance from point p to whichever endpoint is smaller is the ## closest distance d3 = dist(a,p) d4 = dist(b,p) if d3 < d4: if distance: return d3 else: return a else: if distance: return d4 else: return b
b91f1497ba2dffb72caa4cbf4a6f43972ee3853e
28,293
def config_port_type(dut, interface, stp_type="rpvst", port_type="edge", no_form=False, cli_type="klish"): """ API to config/unconfig the port type in RPVST :param dut: :param port_type: :param no_form: :return: """ commands = list() command = "spanning-tree port type {}".format(port_type) if not no_form else "no spanning-tree port type" interface_details = utils.get_interface_number_from_name(interface) if not interface_details: st.log("Interface details not found {}".format(interface_details)) return False commands.append("interface {} {}".format(interface_details.get("type"), interface_details.get("number"))) commands.append(command) commands.append('exit') st.config(dut, commands, type=cli_type) return True
a0a8672b3fea945a57367236debd1a420e270185
28,294
from os import path def compute_output_pattern(mask_path, crop_output): """ Computes the output pattern of the region cropped (without the source file prefix) Args: mask_path: path to the masks crop_output: If True the output is cropped, and the descriptor CropRoi must exist Returns: the output pattern """ mask_filename = path.basename(mask_path) template_id = mask_filename.split("_")[0].split("-")[1] mask_descriptors = mask_filename.split("_")[1:-2:] roi_id = mask_filename.split("_")[-2].split("-")[1] if "desc-Crop" not in mask_descriptors and crop_output: mask_descriptors = ["desc-CropRoi"] + mask_descriptors elif "desc-Crop" in mask_descriptors: mask_descriptors = [ descriptor for descriptor in mask_descriptors if descriptor != "desc-Crop" ] if crop_output: mask_descriptors = ["desc-CropRoi"] + mask_descriptors else: mask_descriptors = ["desc-CropImage"] + mask_descriptors mask_pattern = "_".join(mask_descriptors) output_pattern = f"space-{template_id}_{mask_pattern}_roi-{roi_id}" return output_pattern
2f8fa42b7c3efeb753eed9545beec87638a9dfdc
28,295
def check_strand(strand): """ Check the strand format. Return error message if the format is not as expected. """ if (strand != '-' and strand != '+'): return "Strand is not in the expected format (+ or -)"
9c2e720069ad8dcc8f867a37925f6e27e91dcb3f
28,296
def to_halfpi(rin, za): # match with a shunt input l net, rin > za.real """ """ ra, xa = za.real, za.imag xd = np.sqrt(ra * (rin - ra)) if np.iscomplex(xd): raise ValueError x2 = np.array([-xa - xd, -xa + xd]) x1 = -(ra**2 + (x2 + xa)**2) / (x2 + xa) return np.transpose([x1 * 1j, x2 * 1j]).tolist()
210e4bb008cd58323fab1ab66ad6ef84456c569b
28,297
from pathlib import Path import yaml def load_config_or_exit(workdir="."): """Loads the challenge configuration file from the current directory, or prints a message and exits the script if it doesn't exist. Returns: dict: The config """ path = Path(workdir) if (path / "challenge.yml").exists(): path = path / "challenge.yml" elif (path / "challenge.yaml").exists(): path = path / "challenge.yaml" else: print(f"{CRITICAL}Could not find a challenge.yml file in this directory.") exit(1) with path.open() as f: raw_config = f.read() config = yaml.safe_load(raw_config) return config
e58a7725422d3ae053ab56ae81ab98a7d13be0b5
28,298
def checkOnes(x, y): """ Checks if any of the factors in y = 1 """ _ = BranchingValues() _.x = 1 for i in _range(len(y)): if _if(y[i][0] == 1): _.x = 0 _endif() if _if(y[i][1] == 1): _.x = 0 _endif() _endfor() return _.x
fec6b2aeae13750ec1b37d4b5d187d6836ab5aaa
28,299