content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import argparse def handle_program_options(): """ Uses the built-in argparse module to handle command-line options for the program. :return: The gathered command-line options specified by the user :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser(description="Convert Sanger-sequencing \ derived data files for use with the \ metagenomics analysis program QIIME, by \ extracting Sample ID information, adding\ barcodes and primers to the sequence \ data, and outputting a mapping file and\ single FASTA-formatted sequence file \ formed by concatenating all input data.") parser.add_argument('-i', '--input_dir', required=True, help="The directory containing sequence data files. \ Assumes all data files are placed in this \ directory. For files organized within folders by\ sample, use -s in addition.") parser.add_argument('-m', '--map_file', default='map.txt', help="QIIME-formatted mapping file linking Sample IDs \ with barcodes and primers.") parser.add_argument('-o', '--output', default='output.fasta', metavar='OUTPUT_FILE', help="Single file containing all sequence data found \ in input_dir, FASTA-formatted with barcode and \ primer preprended to sequence. If the -q option \ is passed, any quality data will also be output \ to a single file of the same name with a .qual \ extension.") parser.add_argument('-b', '--barcode_length', type=int, default=12, help="Length of the generated barcode sequences. \ Default is 12 (QIIME default), minimum is 8.") parser.add_argument('-q', '--qual', action='store_true', default=False, help="Instruct the program to look for quality \ input files") parser.add_argument('-u', '--utf16', action='store_true', default=False, help="UTF-16 encoded input files") parser.add_argument('-t', '--treatment', help="Inserts an additional column into the mapping \ file specifying some treatment or other variable\ that separates the current set of sequences \ from any other set of seqeunces. For example:\ -t DiseaseState=healthy") # data input options sidGroup = parser.add_mutually_exclusive_group(required=True) sidGroup.add_argument('-d', '--identifier_pattern', action=ValidateIDPattern, nargs=2, metavar=('SEPARATOR', 'FIELD_NUMBER'), help="Indicates how to extract the Sample ID from \ the description line. Specify two things: \ 1. Field separator, 2. Field number of Sample \ ID (1 or greater). If the separator is a space \ or tab, use \s or \\t respectively. \ Example: >ka-SampleID-2091, use -i - 2, \ indicating - is the separator and the Sample ID\ is field #2.") sidGroup.add_argument('-f', '--filename_sample_id', action='store_true', default=False, help='Specify that the program should\ the name of each fasta file as the Sample ID for use\ in the mapping file. This is meant to be used when \ all sequence data for a sample is stored in a single\ file.') return parser.parse_args()
288a21889d97bb01622c1dd113646f99f7a73559
3,647,200
def start_v_imp(model, lval: str, rval: str): """ Calculate starting value for parameter in data given data in model. For Imputer -- just copies values from original data. Parameters ---------- model : Model Model instance. lval : str L-value name. rval : str R-value name. Returns ------- float Starting value. """ mx = model.mod.mx_v rows, cols = model.mod.names_v i, j = rows.index(lval), cols.index(rval) v = mx[i, j] return v
113266955e5115b0eb43d32feaa506a2b0c93e14
3,647,201
def get_image_features(X, y, appearance_dim=32): """Return features for every object in the array. Args: X (np.array): a 3D numpy array of raw data of shape (x, y, c). y (np.array): a 3D numpy array of integer labels of shape (x, y, 1). appearance_dim (int): The resized shape of the appearance feature. Returns: dict: A dictionary of feature names to np.arrays of shape (n, c) or (n, x, y, c) where n is the number of objects. """ appearance_dim = int(appearance_dim) # each feature will be ordered based on the label. # labels are also stored and can be fetched by index. num_labels = len(np.unique(y)) - 1 labels = np.zeros((num_labels,), dtype='int32') centroids = np.zeros((num_labels, 2), dtype='float32') morphologies = np.zeros((num_labels, 3), dtype='float32') appearances = np.zeros((num_labels, appearance_dim, appearance_dim, X.shape[-1]), dtype='float32') # iterate over all objects in y props = regionprops(y[..., 0], cache=False) for i, prop in enumerate(props): # Get label labels[i] = prop.label # Get centroid centroid = np.array(prop.centroid) centroids[i] = centroid # Get morphology morphology = np.array([ prop.area, prop.perimeter, prop.eccentricity ]) morphologies[i] = morphology # Get appearance minr, minc, maxr, maxc = prop.bbox appearance = np.copy(X[minr:maxr, minc:maxc, :]) resize_shape = (appearance_dim, appearance_dim) appearance = resize(appearance, resize_shape) appearances[i] = appearance # Get adjacency matrix # distance = cdist(centroids, centroids, metric='euclidean') < distance_threshold # adj_matrix = distance.astype('float32') return { 'appearances': appearances, 'centroids': centroids, 'labels': labels, 'morphologies': morphologies, # 'adj_matrix': adj_matrix, }
fa5cb730227b20b54b8d25270550c9dae9fc1348
3,647,202
def deactivate_text(shell: dict, env_vars: dict) -> str: """Returns the formatted text to write to the deactivation script based on the passed dictionaries.""" lines = [shell["shebang"]] for k in env_vars.keys(): lines.append(shell["deactivate"].format(k)) return "\n".join(lines)
0a75134a55bf9cd8eceb311c48a5547ad373593d
3,647,203
from typing import get_origin def is_dict(etype) -> bool: """ Determine whether etype is a Dict """ return get_origin(etype) is dict or etype is dict
a65af54bf6b24c94906765c895c899b18bf5c1eb
3,647,204
import scipy def t_plot_parameters(thickness_curve, section, loading, molar_mass, liquid_density): """Calculates the parameters from a linear section of the t-plot.""" slope, intercept, corr_coef, p, stderr = scipy.stats.linregress( thickness_curve[section], loading[section]) # Check if slope is good if slope * (max(thickness_curve) / max(loading)) < 3: adsorbed_volume = intercept * molar_mass / liquid_density area = slope * molar_mass / liquid_density * 1000 result_dict = { 'section': section, 'slope': slope, 'intercept': intercept, 'corr_coef': corr_coef, 'adsorbed_volume': adsorbed_volume, 'area': area, } return result_dict return None
46d2f65cac5a424b2054359dc8b083d3a2138cc6
3,647,205
import requests def get_data(stock, start_date): """Fetch a maximum of the 100 most recent records for a given stock starting at the start_date. Args: stock (string): Stock Ticker start_date (int): UNIX date time """ # Build the query string request_url = f"https://api.pushshift.io/reddit/search/comment/?q={stock}&sort=asc&size=100&after={start_date}" # get the query and convert to json result_json = requests.get(request_url).json() return result_json
aafdc913d80346e82a21767cdb7b5e40f2376857
3,647,206
def depart_people(state, goal): """Departs all passengers that can depart on this floor""" departures = [] for departure in state.destin.items(): passenger = departure[0] if passenger in goal.served and goal.served[passenger]: floor = departure[1] if state.lift_at == floor and state.boarded[passenger] and not state.served[passenger]: departures.append(('depart', passenger, state.lift_at)) return departures
f3a18ad9a6f884a57d0be1d0e27b3dfeeb95d736
3,647,207
def get_topic_for_subscribe(): """ return the topic string used to subscribe for receiving future responses from DPS """ return _get_topic_base() + "res/#"
346841c7a11f569a7309b087baf0d621a63b8ae9
3,647,208
from Crypto import Random def generate_AES_key(bytes = 32): """Generates a new AES key Parameters ---------- bytes : int number of bytes in key Returns ------- key : bytes """ try: return Random.get_random_bytes(bytes) except ImportError: print('PyCrypto not install. Reading from /dev/random instead') with open('/dev/random', 'r') as rand: return rand.read(bytes)
4435aeea860bb3bca847156de0626c2cacde93e0
3,647,209
def remove_deploy_networkIPv6_configuration(user, networkipv6, equipment_list): """Loads template for removing Network IPv6 equipment configuration, creates file and apply config. Args: NetworkIPv6 object Equipamento objects list Returns: List with status of equipments output """ data = dict() # lock network id to prevent multiple requests to same id with distributedlock(LOCK_NETWORK_IPV6 % networkipv6.id): with distributedlock(LOCK_VLAN % networkipv6.vlan.id): if networkipv6.active == 0: data['output'] = 'Network already not active. Nothing to do.' return data # load dict with all equipment attributes dict_ips = get_dict_v6_to_use_in_configuration_deploy( user, networkipv6, equipment_list) status_deploy = dict() # TODO implement threads for equipment in equipment_list: # generate config file file_to_deploy = _generate_config_file( dict_ips, equipment, TEMPLATE_NETWORKv6_DEACTIVATE) # deploy config file in equipments lockvar = LOCK_EQUIPMENT_DEPLOY_CONFIG_NETWORK_SCRIPT % ( equipment.id) status_deploy[equipment.id] = deploy_config_in_equipment_synchronous( file_to_deploy, equipment, lockvar) networkipv6.deactivate(user) transaction.commit() if networkipv6.vlan.ativada == 1: # if there are no other networks active in vlan, remove int # vlan if not _has_active_network_in_vlan(networkipv6.vlan): # remove int vlan for equipment in equipment_list: if equipment.maintenance is not True: status_deploy[ equipment.id] += _remove_svi(equipment, networkipv6.vlan.num_vlan) networkipv6.vlan.remove(user) return status_deploy
eb0d33cbc4b3963388a768f9ce0a950c3b66cbe0
3,647,210
import os import argparse def extant_file(x): """ 'Type' for argparse - checks that file exists but does not open. Parameters ---------- x : str Candidate file path Returns ------- str Validated path """ if not os.path.isfile(x): # ArgumentTypeError gives a rejection message of the form: # error: argument input: <passed error message> if os.path.exists(x): raise argparse.ArgumentTypeError("{0} is not a file".format(x)) else: raise argparse.ArgumentTypeError("{0} does not exist".format(x)) return str(x)
3b54d821d020c00e566460b95daeb037406becb9
3,647,211
def can_pay_with_two_coins(denoms, amount): """ (list of int, int) -> bool Return True if and only if it is possible to form amount, which is a number of cents, using exactly two coins, which can be of any of the denominations in denoms. >>> can_pay_with_two_coins([1, 5, 10, 25], 35) True >>> can_pay_with_two_coins([1, 5, 10, 25], 20) True >>> can_pay_with_two_coins([1, 5, 10, 25], 12) #TODO: complete the example output """ #TODO: complete the function body for i in range(len(denoms)): for j in range(len(denoms)): if denoms[i] + denoms[j] == amount: return True return False
c0634d22095480d1a010f763d646453dc08d4476
3,647,212
def make_column_kernelizer(*transformers, **kwargs): """Construct a ColumnKernelizer from the given transformers. This is a shorthand for the ColumnKernelizer constructor; it does not require, and does not permit, naming the transformers. Instead, they will be given names automatically based on their types. It also does not allow weighting with ``transformer_weights``. Parameters ---------- *transformers : tuples Tuples of the form (transformer, columns) specifying the transformer objects to be applied to subsets of the data. transformer : {'drop', 'passthrough'} or estimator Estimator must support ``fit`` and ``transform``. Special-cased strings 'drop' and 'passthrough' are accepted as well, to indicate to drop the columns or to pass them through untransformed, respectively. If the transformer does not return a kernel (as informed by the attribute kernelizer=True), a linear kernelizer is applied after the transformer. columns : str, array-like of str, int, array-like of int, slice, \ array-like of bool or callable Indexes the data on its second axis. Integers are interpreted as positional columns, while strings can reference DataFrame columns by name. A scalar string or int should be used where ``transformer`` expects X to be a 1d array-like (vector), otherwise a 2d array will be passed to the transformer. A callable is passed the input data `X` and can return any of the above. To select multiple columns by name or dtype, you can use :obj:`make_column_selector`. remainder : {'drop', 'passthrough'} or estimator, default='drop' By default, only the specified columns in `transformers` are transformed and combined in the output, and the non-specified columns are dropped. (default of ``'drop'``). By specifying ``remainder='passthrough'``, all remaining columns that were not specified in `transformers` will be automatically passed through. This subset of columns is concatenated with the output of the transformers. By setting ``remainder`` to be an estimator, the remaining non-specified columns will use the ``remainder`` estimator. The estimator must support ``fit`` and ``transform``. n_jobs : int, default=None Number of jobs to run in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. n_jobs does not work with with GPU backends. verbose : bool, default=False If True, the time elapsed while fitting each transformer will be printed as it is completed. Returns ------- column_kernelizer : ColumnKernelizer See also -------- himalaya.kernel_ridge.ColumnKernelizer : Class that allows combining the outputs of multiple transformer objects used on column subsets of the data into a single feature space. Examples -------- >>> import numpy as np >>> from himalaya.kernel_ridge import make_column_kernelizer >>> from himalaya.kernel_ridge import Kernelizer >>> ck = make_column_kernelizer( ... (Kernelizer(kernel="linear"), [0, 1, 2]), ... (Kernelizer(kernel="polynomial"), slice(3, 5))) >>> X = np.array([[0., 1., 2., 2., 3.], [0., 2., 0., 0., 3.], [0., 0., 1., 0., 3.], ... [1., 1., 0., 1., 2.]]) >>> # Kernelize separately the first three columns and the last two >>> # columns, creating two kernels of shape (n_samples, n_samples). >>> ck.fit_transform(X).shape (2, 4, 4) """ # transformer_weights keyword is not passed through because the user # would need to know the automatically generated names of the transformers n_jobs = kwargs.pop('n_jobs', None) remainder = kwargs.pop('remainder', 'drop') verbose = kwargs.pop('verbose', False) if kwargs: raise TypeError('Unknown keyword arguments: "{}"'.format( list(kwargs.keys())[0])) transformer_list = _get_transformer_list(transformers) return ColumnKernelizer(transformer_list, n_jobs=n_jobs, remainder=remainder, verbose=verbose)
cfddec675782a6e70d1921372961abbb7853fa09
3,647,213
def plugin_info(): """ Returns information about the plugin. Args: Returns: dict: plugin information Raises: """ return { 'name': 'PT100 Poll Plugin', 'version': '1.9.2', 'mode': 'poll', 'type': 'south', 'interface': '1.0', 'config': _DEFAULT_CONFIG }
f6d54b5ff64013ae17364db604cf1cb6b5204aba
3,647,214
def get_engine(isolation_level=None): """ Creates an engine with the given isolation level. """ # creates a shallow copy with the given isolation level if not isolation_level: return _get_base_engine() else: return _get_base_engine().execution_options(isolation_level=isolation_level)
32e055b2a4a1d0e7ecbc591218bb61c721113a09
3,647,215
from selenium.webdriver import PhantomJS def phantomjs_driver(capabilities, driver_path, port): """ Overrides default `phantomjs_driver` driver from pytest-selenium. Default implementation uses ephemeral ports just as our tests but it doesn't provide any way to configure them, for this reason we basically recreate the driver fixture using port fixture. """ kwargs = {} if capabilities: kwargs['desired_capabilities'] = capabilities if driver_path is not None: kwargs['executable_path'] = driver_path kwargs['port'] = port.get() return PhantomJS(**kwargs)
5c6453f4d753cd765fa7c9fff47b61c6c6efac04
3,647,216
import numpy def to_TH1x( fName, fTitle, data, fEntries, fTsumw, fTsumw2, fTsumwx, fTsumwx2, fSumw2, fXaxis, fYaxis=None, fZaxis=None, fNcells=None, fBarOffset=0, fBarWidth=1000, fMaximum=-1111.0, fMinimum=-1111.0, fNormFactor=0.0, fContour=None, fOption="", fFunctions=None, fBufferSize=0, fBuffer=None, fBinStatErrOpt=0, fStatOverflows=2, fLineColor=602, fLineStyle=1, fLineWidth=1, fFillColor=0, fFillStyle=1001, fMarkerColor=1, fMarkerStyle=1, fMarkerSize=1.0, ): """ Args: fName (None or str): Temporary name, will be overwritten by the writing process because Uproot's write syntax is ``file[name] = histogram``. fTitle (str): Real title of the histogram. data (numpy.ndarray or :doc:`uproot.models.TArray.Model_TArray`): Bin contents with first bin as underflow, last bin as overflow. The dtype of this array determines the return type of this function (TH1C, TH1D, TH1F, TH1I, or TH1S). fEntries (float): Number of entries. (https://root.cern.ch/doc/master/classTH1.html) fTsumw (float): Total Sum of weights. fTsumw2 (float): Total Sum of squares of weights. fTsumwx (float): Total Sum of weight*X. fTsumwx2 (float): Total Sum of weight*X*X. fSumw2 (numpy.ndarray of numpy.float64 or :doc:`uproot.models.TArray.Model_TArrayD`): Array of sum of squares of weights. fXaxis (:doc:`uproot.models.TH.Model_TAxis_v10`): Use :doc:`uproot.writing.identify.to_TAxis` with ``fName="xaxis"`` and ``fTitle=""``. fYaxis (None or :doc:`uproot.models.TH.Model_TAxis_v10`): None generates a default for 1D histograms. fZaxis (None or :doc:`uproot.models.TH.Model_TAxis_v10`): None generates a default for 1D and 2D histograms. fNcells (None or int): Number of bins(1D), cells (2D) +U/Overflows. Computed from ``data`` if None. fBarOffset (int): (1000*offset) for bar charts or legos fBarWidth (int): (1000*width) for bar charts or legos fMaximum (float): Maximum value for plotting. fMinimum (float): Minimum value for plotting. fNormFactor (float): Normalization factor. fContour (None or numpy.ndarray of numpy.float64 or :doc:`uproot.models.TArray.Model_TArrayD`): Array to display contour levels. None generates an empty array. fOption (str or :doc:`uproot.models.TString.Model_TString`): Histogram options. fFunctions (None, list, or :doc:`uproot.models.TList.Model_TList`): ->Pointer to list of functions (fits and user). None generates an empty list. fBufferSize (None or int): fBuffer size. Computed from ``fBuffer`` if None. fBuffer (None or numpy.ndarray of numpy.float64): Buffer of entries accumulated before automatically choosing the binning. (Irrelevant for serialization?) None generates an empty array. fBinStatErrOpt (int): Option for bin statistical errors. fStatOverflows (int): Per object flag to use under/overflows in statistics. fLineColor (int): Line color. (https://root.cern.ch/doc/master/classTAttLine.html) fLineStyle (int): Line style. fLineWidth (int): Line width. fFillColor (int): Fill area color. (https://root.cern.ch/doc/master/classTAttFill.html) fFillStyle (int): Fill area style. fMarkerColor (int): Marker color. (https://root.cern.ch/doc/master/classTAttMarker.html) fMarkerStyle (int): Marker style. fMarkerSize (float): Marker size. This function is for developers to create TH1* objects that can be written to ROOT files, to implement conversion routines. The choice of TH1C, TH1D, TH1F, TH1I, or TH1S depends on the dtype of the ``data`` array. """ tobject = uproot.models.TObject.Model_TObject.empty() tobject._members["@fUniqueID"] = 0 tobject._members["@fBits"] = 0 tnamed = uproot.models.TNamed.Model_TNamed.empty() tnamed._deeply_writable = True tnamed._bases.append(tobject) tnamed._members["fName"] = fName tnamed._members["fTitle"] = fTitle tattline = uproot.models.TAtt.Model_TAttLine_v2.empty() tattline._deeply_writable = True tattline._members["fLineColor"] = fLineColor tattline._members["fLineStyle"] = fLineStyle tattline._members["fLineWidth"] = fLineWidth tattfill = uproot.models.TAtt.Model_TAttFill_v2.empty() tattfill._deeply_writable = True tattfill._members["fFillColor"] = fFillColor tattfill._members["fFillStyle"] = fFillStyle tattmarker = uproot.models.TAtt.Model_TAttMarker_v2.empty() tattmarker._deeply_writable = True tattmarker._members["fMarkerColor"] = fMarkerColor tattmarker._members["fMarkerStyle"] = fMarkerStyle tattmarker._members["fMarkerSize"] = fMarkerSize th1 = uproot.models.TH.Model_TH1_v8.empty() th1._bases.append(tnamed) th1._bases.append(tattline) th1._bases.append(tattfill) th1._bases.append(tattmarker) if fYaxis is None: fYaxis = to_TAxis(fName="yaxis", fTitle="", fNbins=1, fXmin=0.0, fXmax=1.0) if fZaxis is None: fZaxis = to_TAxis(fName="zaxis", fTitle="", fNbins=1, fXmin=0.0, fXmax=1.0) if fContour is None: fContour = numpy.array([], dtype=numpy.float64) if fFunctions is None: fFunctions = [] if fBuffer is None: fBuffer = numpy.array([], dtype=numpy.float64) if isinstance(data, uproot.models.TArray.Model_TArray): tarray_data = data else: tarray_data = to_TArray(data) if isinstance(fSumw2, uproot.models.TArray.Model_TArray): tarray_fSumw2 = fSumw2 else: tarray_fSumw2 = to_TArray(fSumw2) if not isinstance(tarray_fSumw2, uproot.models.TArray.Model_TArrayD): raise TypeError("fSumw2 must be an array of float64 (TArrayD)") if isinstance(fContour, uproot.models.TArray.Model_TArray): tarray_fContour = fContour else: tarray_fContour = to_TArray(fContour) if not isinstance(tarray_fContour, uproot.models.TArray.Model_TArrayD): raise TypeError("fContour must be an array of float64 (TArrayD)") if isinstance(fOption, uproot.models.TString.Model_TString): tstring_fOption = fOption else: tstring_fOption = to_TString(fOption) if isinstance(fFunctions, uproot.models.TList.Model_TList): tlist_fFunctions = fFunctions else: tlist_fFunctions = to_TList(fFunctions, name="") # FIXME: require all list items to be the appropriate class (TFunction?) th1._members["fNcells"] = len(tarray_data) if fNcells is None else fNcells th1._members["fXaxis"] = fXaxis th1._members["fYaxis"] = fYaxis th1._members["fZaxis"] = fZaxis th1._members["fBarOffset"] = fBarOffset th1._members["fBarWidth"] = fBarWidth th1._members["fEntries"] = fEntries th1._members["fTsumw"] = fTsumw th1._members["fTsumw2"] = fTsumw2 th1._members["fTsumwx"] = fTsumwx th1._members["fTsumwx2"] = fTsumwx2 th1._members["fMaximum"] = fMaximum th1._members["fMinimum"] = fMinimum th1._members["fNormFactor"] = fNormFactor th1._members["fContour"] = tarray_fContour th1._members["fSumw2"] = tarray_fSumw2 th1._members["fOption"] = tstring_fOption th1._members["fFunctions"] = tlist_fFunctions th1._members["fBufferSize"] = len(fBuffer) if fBufferSize is None else fBufferSize th1._members["fBuffer"] = fBuffer th1._members["fBinStatErrOpt"] = fBinStatErrOpt th1._members["fStatOverflows"] = fStatOverflows th1._speedbump1 = b"\x00" th1._deeply_writable = tlist_fFunctions._deeply_writable if isinstance(tarray_data, uproot.models.TArray.Model_TArrayC): cls = uproot.models.TH.Model_TH1C_v3 elif isinstance(tarray_data, uproot.models.TArray.Model_TArrayS): cls = uproot.models.TH.Model_TH1S_v3 elif isinstance(tarray_data, uproot.models.TArray.Model_TArrayI): cls = uproot.models.TH.Model_TH1I_v3 elif isinstance(tarray_data, uproot.models.TArray.Model_TArrayF): cls = uproot.models.TH.Model_TH1F_v3 elif isinstance(tarray_data, uproot.models.TArray.Model_TArrayD): cls = uproot.models.TH.Model_TH1D_v3 else: raise TypeError( "no TH1* subclasses correspond to {0}".format(tarray_data.classname) ) th1x = cls.empty() th1x._bases.append(th1) th1x._bases.append(tarray_data) th1x._deeply_writable = th1._deeply_writable return th1x
c268c6f7ea7e875bb1049940cac45bdec48afdcb
3,647,217
import argparse import textwrap def parse_args(): """ Parse and validate the command line arguments, and set the defaults. """ parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter, description='Utility commands for handle.net EPIC persistent identifiers') parser.add_argument('command', metavar='COMMAND', choices=['handle', 'handles', 'count', 'download', 'rewrite-aliases'], help=textwrap.dedent('''\ command to run: - `handle`: retrieve details for the given POSTFIX; this may be the same output as the public endpoint `https://hdl.handle.net/api/handles/<prefix>/<postfix>?pretty` - `handles`: retrieve details for the given postfixes taken from a file - `count`: count existing handles on the server, including special postfixes such as `ADMIN`, `CONTACT`, `EPIC_HEALTHCHECK` and `USER01` - `download`: create file with existing handles, each line holding `1-based-counter; prefix/postfix` - `rewrite-aliases`: rewrite handles based on a file, each line holding `postfix; postfix` where both should already exist as a handle, and where the first will become an alias for the latter ''')) parser.add_argument('postfix', metavar='POSTFIX', nargs='?', help='optional postfix, for a single full handle `<prefix>/<postfix>`') parser.add_argument('-p', '--prefix', required=True, help='prefix, like `21.12102`, required') parser.add_argument('-i', '--index', required=True, help='user index, like `312`, required') parser.add_argument('--server', default='https://epic-pid.storage.surfsara.nl:8001', help='base PID server URL, default `https://epic-pid.storage.surfsara.nl:8001`, to which, ' 'e.g., `/api/sessions` and `/api/handles` are appended') parser.add_argument('--certfile', help='certificate file, default `<prefix>_USER01_<index>_certificate_only.pem`') parser.add_argument('--keyfile', help='private key file, default `<prefix>_USER01_<index>_privkey.pem`') parser.add_argument('-f', '--file', metavar='INPUT', help='semicolon-separated input file, default `<command>.csv`') parser.add_argument('-o', '--output', help='semicolon-separated output file, default `<command>-<yyyymmdd>.csv`') parser.add_argument('--start', type=int, help='zero-based start row from input file (default 1, hence ignoring the header), or start ' 'page when downloading handles (default 0)') parser.add_argument('--count', default=3, type=int, help='number of rows to process or pages to download, default 3') parser.add_argument('--size', metavar='PAGESIZE', default=10000, type=int, help='page size when downloading paginated data, default 10,000') parser.add_argument('--throttle', metavar='SECONDS', default=10, type=float, help='number of seconds between requests, default 10') parser.add_argument('-l', '--log', help='log file, default `<command>-<yyyymmdd>.log`') parser.add_argument('-q', '--quiet', help='reduce output on terminal to be the same as the log', action='store_true') args = parser.parse_args() args.certfile = args.certfile or f'{args.prefix}_USER01_{args.index}_certificate_only.pem' args.keyfile = args.keyfile or f'{args.prefix}_USER01_{args.index}_privkey.pem' args.file = args.file or f'{args.command}.csv' args.output = args.output or f'{args.command}-{date.today().strftime("%Y%m%d")}.csv' args.log = args.log or f'{args.command}-{date.today().strftime("%Y%m%d")}.log' # For `rewrite-aliases` default to 1, skipping the CSV header args.start = args.start if args.start is not None else 1 if args.command == 'rewrite-aliases' else 0 return args
e639098ac7e629a4a78c3c1242bcc9f785186b7c
3,647,218
import time def parse(address, addr_spec_only=False, strict=False, metrics=False): """ Given a string, returns a scalar object representing a single full mailbox (display name and addr-spec), addr-spec, or a url. If parsing the entire string fails and strict is not set to True, fall back to trying to parse the last word only and assume everything else is the display name. Returns an Address object and optionally metrics on processing time if requested. Examples: >>> address.parse('John Smith <john@smith.com') John Smith <john@smith.com> >>> print address.parse('John <john@smith.com>', addr_spec_only=True) None >>> print address.parse('john@smith.com', addr_spec_only=True) 'john@smith.com' >>> address.parse('http://host.com/post?q') http://host.com/post?q >>> print address.parse('foo') None """ mtimes = {'parsing': 0} if addr_spec_only: parser = addr_spec_parser else: parser = mailbox_or_url_parser # normalize inputs to bytestrings if isinstance(address, unicode): address = address.encode('utf-8') # sanity checks if not address: return None, mtimes if len(address) > MAX_ADDRESS_LENGTH: _log.warning('address exceeds maximum length of %s', MAX_ADDRESS_LENGTH) return None, mtimes bstart = time() try: parse_rs = parser.parse(address.strip(), lexer=lexer.clone()) addr_obj = _lift_parse_result(parse_rs) except (LexError, YaccError, SyntaxError): addr_obj = None if addr_obj is None and not strict: addr_parts = address.split(' ') addr_spec = addr_parts[-1] if len(addr_spec) < len(address): try: parse_rs = parser.parse(addr_spec, lexer=lexer.clone()) addr_obj = _lift_parse_result(parse_rs) if addr_obj: addr_obj._display_name = ' '.join(addr_parts[:-1]) if isinstance(addr_obj._display_name, str): addr_obj._display_name = addr_obj._display_name.decode('utf-8') except (LexError, YaccError, SyntaxError): addr_obj = None mtimes['parsing'] = time() - bstart return addr_obj, mtimes
b6516d530892a7db405b816987598ce53a0dc776
3,647,219
import requests def unfreeze_file(user, data): """ unfreeze a file. :return: status code, response data """ r = requests.post('%s/unfreeze' % URL, json=data, auth=(user, PASS), verify=False) return r.status_code, r.json()
4ee59dd44f42685a02907dec766dc8026f939da2
3,647,220
def prompt_url(q): """ :param q: The prompt to display to the user :return: The user's normalized input. We ensure there is an URL scheme, a domain, a "/" path, and no trailing elements. :rtype: str """ return prompt(q, _url_coerce_fn)
dfe810a4552c880d71efabffb2f9167bfce0ad8a
3,647,221
def eval_mnl_logsums(choosers, spec, locals_d, trace_label=None): """ like eval_nl except return logsums instead of making choices Returns ------- logsums : pandas.Series Index will be that of `choosers`, values will be logsum across spec column values """ trace_label = tracing.extend_trace_label(trace_label, 'mnl') check_for_variability = tracing.check_for_variability() print("running eval_mnl_logsums") expression_values = eval_variables(spec.index, choosers, locals_d) if check_for_variability: _check_for_variability(expression_values, trace_label) # utility values utilities = compute_utilities(expression_values, spec) # logsum is log of exponentiated utilities summed across # columns of each chooser row utils_arr = utilities.as_matrix().astype('float') logsums = np.log(np.exp(utils_arr).sum(axis=1)) logsums = pd.Series(logsums, index=choosers.index) if trace_label: # add logsum to utilities for tracing utilities['logsum'] = logsums tracing.trace_df(choosers, '%s.choosers' % trace_label) tracing.trace_df(utilities, '%s.utilities' % trace_label, column_labels=['alternative', 'utility']) tracing.trace_df(logsums, '%s.logsums' % trace_label, column_labels=['alternative', 'logsum']) tracing.trace_df( expression_values, '%s.expression_values' % trace_label, column_labels=['expression', None]) return logsums
d9b00c2f5f436a0825cbe3bdd60c6b2257c769b3
3,647,222
def find_zeroed_indices(adjusted, original): """Find the indices of the values present in ``original`` but missing in ``adjusted``. Parameters ---------- adjusted: np.array original: array_like Returns ------- Tuple[np.ndarray] Indices of the values present in ``original`` but missing in ``adjusted``. """ if sp.issparse(original): i, j, v = sp.find(original) # Use hash maps to figure out which indices have been lost in the original original_indices = set(zip(i, j)) adjusted_indices = set(zip(*np.where(~adjusted.mask))) zeroed_indices = original_indices - adjusted_indices # Convert our hash map of coords into the standard numpy indices format indices = list(zip(*zeroed_indices)) indices = tuple(map(np.array, indices)) return indices else: original = np.ma.masked_array(original, mask=original <= 0) return np.where(adjusted.mask & ~original.mask)
c01b91ec8be0d1bc22aad9042328a451b7424996
3,647,223
def inventory_update(arr1, arr2): """Add the inventory from arr2 to arr1. If an item exists in both arr1 and arr2, then the quantity of the item is updated in arr1. If an item exists in only arr2, then the item is added to arr1. If an item only exists in arr1, then that item remains unaffected. Arguments: arr1: the destination inventory arr2: the inventory to add to the destination inventory Returns: a combined inventory """ # Set longer to the longer of the two arrays longer = arr2 if len(longer) > len(arr1): temp = arr1 arr1 = longer longer = temp # Since longer is potentially modified, set it # to a copy of itself. longer = longer.copy() # Iterate over the shorter array, appending # items that don't exist in the longer array, # or updating the quantity of existing items. for tup in arr1: qty = tup[0] name = tup[1] # Funny way to get the index of an array # object based on the object's own indexed # elements. try: i = [x[1] for x in longer].index(name) except ValueError: i = -1 if i < 0: longer.append(tup) else: longer[i][0] += qty # Man, why doesn't the index function accept a # key argument? Sort on the string description # of each inventory item. longer.sort(key=lambda x: x[1]) return longer
febba1d2dac6c79fabf4e8aaad8c0fd482478b50
3,647,224
import re from bs4 import BeautifulSoup def racaty(url: str) -> str: """ Racaty direct link generator based on https://github.com/SlamDevs/slam-mirrorbot""" dl_url = '' try: link = re.findall(r'\bhttps?://.*racaty\.net\S+', url)[0] except IndexError: raise DirectDownloadLinkException("No Racaty links found\n") scraper = create_scraper() r = scraper.get(url) soup = BeautifulSoup(r.text, "lxml") op = soup.find("input", {"name": "op"})["value"] ids = soup.find("input", {"name": "id"})["value"] rpost = scraper.post(url, data = {"op": op, "id": ids}) rsoup = BeautifulSoup(rpost.text, "lxml") dl_url = rsoup.find("a", {"id": "uniqueExpirylink"})["href"].replace(" ", "%20") return dl_url
8c0df1dd9bf96fcb63be7f59db20ae6c9e4cef00
3,647,225
import logging import os import types def get_logger( name='mltk', level='INFO', console=False, log_file=None, log_file_mode='w', parent:logging.Logger=None ): """Get or create a logger, optionally adding a console and/or file handler""" logger = logging.getLogger(name) if len(logger.handlers) == 0: if parent is None: logger.propagate = False else: logger.parent = parent logger.propagate = True logger.setLevel('DEBUG') if console: add_console_logger(logger, level=level) if log_file: log_dir = os.path.dirname(log_file) if log_dir: os.makedirs(log_dir, exist_ok=True) fh = logging.FileHandler(log_file, mode=log_file_mode) fh.setLevel('DEBUG') logger.addHandler(fh) if not hasattr(logger, 'close'): def _close(cls): for handler in cls.handlers: if isinstance(handler, logging.FileHandler): handler.close() logger.close = types.MethodType(_close, logger) return logger
f4126c317ddda806354505ea7defa23686084f2a
3,647,226
from typing import Tuple from typing import List import argparse import sys def _parse_cli_args() -> Tuple[str, List[str]]: """Parses CLI args to return device name and args for unittest runner.""" parser = argparse.ArgumentParser( description="Runs a GDM + unittest reboot test on a device. All " "arguments other than the device name are passed through to " "the unittest runner.") parser.add_argument( "-d", "--device", required=True, help="GDM device name to run the test on. For example, 'device-1234'. " "The device must be shown as 'available' or 'connected' in the " "output of 'gdm devices'.") args, remaining_argv = parser.parse_known_args() return args.device, [sys.argv[0]] + remaining_argv
07b2b8c8223f789fca2099f432afede7aee3ef78
3,647,227
from typing import List def build_tree(tree, parent, counts, ordered_ids): """ Recursively splits the data, which contained in the tree object itself and is indexed by ordered_ids. Parameters ---------- tree: Tree object parent: TreeNode object The last node added to the tree, which will be the parent of the two nodes resulting from the split (if any) of this function call. counts: numpy array (int) The class counts of the samples reaching the parent node. ordered_ids: numpy array (int) The ids of the samples reaching the parent node. """ root = TreeNode(0, counts, parent, ordered_ids, False) queue = List() queue.append(root) n_nodes = 1 np.random.seed(tree.random_state) while len(queue) > 0: node = queue.pop(0) split = find_best_split(node, tree, np.random.randint(1e6)) if split is not None: node.split = split left_child = TreeNode(n_nodes, split.left_counts, node, split.left_ids, False) node.left_child = left_child queue.append(left_child) n_nodes += 1 right_child = TreeNode(n_nodes, split.right_counts, node, split.right_ids, False) node.right_child = right_child queue.append(right_child) n_nodes += 1 else: node.isleaf = True tree.depth = max(tree.depth, node.depth) return root, n_nodes
8957ef481ef6b2ba02b6e60c97165a25231d89ae
3,647,228
def get_perf_measure_by_group(aif_metric, metric_name): """Get performance measures by group.""" perf_measures = ['TPR', 'TNR', 'FPR', 'FNR', 'PPV', 'NPV', 'FDR', 'FOR', 'ACC'] func_dict = { 'selection_rate': lambda x: aif_metric.selection_rate(privileged=x), 'precision': lambda x: aif_metric.precision(privileged=x), 'recall': lambda x: aif_metric.recall(privileged=x), 'sensitivity': lambda x: aif_metric.sensitivity(privileged=x), 'specificity': lambda x: aif_metric.specificity(privileged=x), 'power': lambda x: aif_metric.power(privileged=x), 'error_rate': lambda x: aif_metric.error_rate(privileged=x), } if metric_name in perf_measures: metric_func = lambda x: aif_metric.performance_measures(privileged=x)[metric_name] elif metric_name in func_dict.keys(): metric_func = func_dict[metric_name] else: raise NotImplementedError df = pd.DataFrame({ 'Group': ['all', 'privileged', 'unprivileged'], metric_name: [metric_func(group) for group in [None, True, False]], }) return df
d4b861c882d6f5502798d211c2ab1322e19cf9b2
3,647,229
from datetime import datetime def hello_world(request): """Return a greeting.""" return HttpResponse('Hello, world!{now}'.format( now=datetime.now().strftime('%b %dth, %Y : %M HttpResponses') ))
bcdf4c504d44883c7afc75c8a76ff052cd0b246d
3,647,230
import mimetypes import zlib def getfile(id, name): """ Retorna um arquivo em anexo. """ mime = mimetypes.guess_type(name)[0] if mime is None: mime = "application/octet-stream" c = get_cursor() c.execute( """ select files.ticket_id as ticket_id, files.size as size, files.contents as contents, tickets.admin_only as admin_only from files join tickets on tickets.id = files.ticket_id where files.id = :id """, {"id": id}, ) row = c.fetchone() blob = zlib.decompress(row["contents"]) if not user_admin(current_user()) and row["admin_only"] == 1: return "você não tem permissão para acessar este recurso!" else: response.content_type = mime return blob
1ce8322301b33a0d6762aa545344d4c0fe38269c
3,647,231
def get_default_wavelet(): """Sets the default wavelet to be used for scaleograms""" global DEFAULT_WAVELET return DEFAULT_WAVELET
0c403b5b7a21bedbd55c0cbd6faa6a3648c3a0cc
3,647,232
def check_output(file_path: str) -> bool: """ This function checks an output file, either from geomeTRIC or from Psi4, for a successful completion keyword. Returns True if the calculation finished successfully, otherwise False. """ with open(file_path, "r") as read_file: text = read_file.read() checks = ["Converged! =D", "Psi4 exiting successfully"] return any([check in text for check in checks])
2f0dea67216aff945b1b0db74e0131022acc3019
3,647,233
def dumps(value): """ Dumps a data structure to TOML source code. The given value must be either a dict of dict values, a dict, or a TOML file constructed by this module. """ if not isinstance(value, TOMLFile): raise RuntimeError( 'Can only dump a TOMLFile instance loaded by load() or loads()' ) return value.dumps()
f92b906b502bc2b0ba2b8bf3840083bafce14086
3,647,234
def calc_graph(dict_graph): """ creates scatter of comfort and curves of constant relative humidity :param dict_graph: contains comfort conditions to plot, output of comfort_chart.calc_data() :type dict_graph: dict :return: traces of scatter plot of 4 comfort conditions :rtype: list of plotly.graph_objs.Scatter """ traces = [] # draw scatter of comfort conditions in building trace = go.Scatter(x=dict_graph['t_op_occupied_winter'], y=dict_graph['x_int_occupied_winter'], name='occupied hours winter', mode='markers', marker=dict(color=COLORS_TO_RGB['red'])) traces.append(trace) trace = go.Scatter(x=dict_graph['t_op_unoccupied_winter'], y=dict_graph['x_int_unoccupied_winter'], name='unoccupied hours winter', mode='markers', marker=dict(color=COLORS_TO_RGB['blue'])) traces.append(trace) trace = go.Scatter(x=dict_graph['t_op_occupied_summer'], y=dict_graph['x_int_occupied_summer'], name='occupied hours summer', mode='markers', marker=dict(color=COLORS_TO_RGB['purple'])) traces.append(trace) trace = go.Scatter(x=dict_graph['t_op_unoccupied_summer'], y=dict_graph['x_int_unoccupied_summer'], name='unoccupied hours summer', mode='markers', marker=dict(color=COLORS_TO_RGB['orange'])) traces.append(trace) return traces
19a277db0f59e2b871130099eab3b714bd5b94b9
3,647,235
import sys def index(): """Display a user's account information.""" if not current_user.is_authenticated: return redirect(url_for("account.login")) cancel_reservation_form = CancelReservationForm() if cancel_reservation_form.validate_on_submit(): cancel_id = int(cancel_reservation_form.id.data) if cancel_reservation_form.type.data == "space": print("SR ID: " + str(cancel_id), file=sys.stderr) sr = Space_Reservation.query.filter_by(id=cancel_id).first() db.session.delete(sr) db.session.commit() flash('Your Space Reservation has been cancelled') elif cancel_reservation_form.type.data == "equipment": print("ER ID: " + str(cancel_id), file=sys.stderr) er = Equipment_Reservation.query.filter_by(id=cancel_id).first() db.session.delete(er) db.session.commit() flash('Your Equipment Reservation has been cancelled') space_sql = '''SELECT sr.*, s.name AS space_name, l.name AS location_name, c.name AS campus_name FROM space_reservations sr JOIN spaces s ON s.id=sr.space_id JOIN locations l ON s.location_id = l.id JOIN campuses c ON l.campus_id = c.id WHERE sr.reserver_id=''' + str(current_user.id) + ";" sr_response = db.engine.execute(text(space_sql)) space_reservations = [] for sr in sr_response: space_reservations.append(dict(zip(sr.keys(), sr))) equipment_sql = '''SELECT er.*, e.name AS equipment_name, et.name AS equipment_type_name, l.name AS location_name, c.name AS campus_name FROM equipment_reservations er JOIN equipment e ON e.id=er.equipment_id JOIN equipment_types et ON e.equipment_type_id = et.id JOIN locations l ON e.location_id = l.id JOIN campuses c ON l.campus_id = c.id WHERE er.reserver_id=''' + str(current_user.id) + ";" er_response = db.engine.execute(text(equipment_sql)) equipment_reservations = [] for er in er_response: equipment_reservations.append(dict(zip(er.keys(), er))) return render_template('main/index.html', user=current_user, space_reservations=space_reservations, equipment_reservations=equipment_reservations, cancel_reservation_form=cancel_reservation_form)
12784a21c3229102c450dd6612bd10f45e7643bd
3,647,236
def generate_arrays(df, resize=True, img_height=50, img_width=200): """ Generates image array and labels array from a dataframe """ num_items = len(df) images = np.zeros((num_items, img_height, img_width), dtype=np.float32) labels = [0] * num_items for i in range(num_items): input_img = keras.preprocessing.image.load_img(df["img_path"][i], color_mode='grayscale') img_array = keras.preprocessing.image.img_to_array(input_img) if resize: img_array = np.resize(img_array, (img_height, img_width)) img_array = (img_array/255.).astype(np.float32) label = df["label"][i] if is_valid_captcha(label): images[i, :, :] = img_array labels[i] = label return images, np.array(labels)
2a50fd84d5b8da2845205b65cd12f61868bd421d
3,647,237
def compute_cosine_distance(Q, feats, names): """ feats and Q: L2-normalize, n*d """ dists = np.dot(Q, feats.T) # print("dists:",dists) # exit(1) idxs = np.argsort(dists)[::-1] rank_dists = dists[idxs] rank_names = [names[k] for k in idxs] return (idxs, rank_dists, rank_names)
e15007fb6fc73aab27db00d7cf283300077dd1c7
3,647,238
def phase(ifc, inc_pt, d_in, normal, z_dir, wvl, n_in, n_out): """ apply phase shift to incoming direction, d_in, about normal """ try: d_out, dW = ifc.phase(inc_pt, d_in, normal, z_dir, wvl, n_in, n_out) return d_out, dW except ValueError: raise TraceEvanescentRayError(ifc, inc_pt, d_in, normal, n_in, n_out)
6289674f20718ed4e1e78b1a4da0fe5d4b89df75
3,647,239
from typing import Iterator def generate_udf(spec: "rikai.spark.sql.codegen.base.ModelSpec"): """Construct a UDF to run sklearn model. Parameters ---------- spec : ModelSpec the model specifications object Returns ------- A Spark Pandas UDF. """ def predict(model, X): if hasattr(model, "predict"): return model.predict(X) elif hasattr(model, "transform"): return model.transform(X) else: raise RuntimeError("predict or transform is not available") def sklearn_inference_udf( iter: Iterator[pd.Series], ) -> Iterator[pd.Series]: model = spec.load_model() for series in list(iter): X = np.vstack(series.apply(_pickler.loads).to_numpy()) y = [_pickler.dumps(pred.tolist()) for pred in predict(model, X)] yield pd.Series(y) return pandas_udf(sklearn_inference_udf, returnType=BinaryType())
ceab18240abc73c361108b859817723c08bdd0e3
3,647,240
def ssl_loss_mean_teacher(labels_x, logits_x, logits_teacher, logits_student): """ Computes two cross entropy losses based on the labeled and unlabeled data. loss_x is referring to the labeled CE loss and loss_u to the unlabeled CE loss. Args: labels_x: tensor, contains labels corresponding to logits_x of shape [batch, num_classes] logits_x: tensor, contains the logits of an batch of images of shape [batch, num_classes] logits_teacher: tensor, logits of teacher model of shape [batch, num_classes] labels_student: tensor, logits of student model of shape [batch, num_classes] Returns: Two floating point numbers, the first representing the labeled CE loss and the second holding the MSE loss values. """ x_loss = tf.nn.softmax_cross_entropy_with_logits(labels=labels_x, logits=logits_x) x_loss = tf.reduce_mean(x_loss) loss_mt = tf.reduce_mean((tf.nn.softmax(logits_teacher) - tf.nn.softmax(logits_student)) ** 2, -1) loss_mt = tf.reduce_mean(loss_mt) return x_loss, loss_mt
016192ea6cf1002a0aa8735003e76a7c2af7526c
3,647,241
from typing import Tuple def _sch_el(self, *wert, **kwargs): """Element einer Schar; für einen Parameter""" if kwargs.get('h'): print("\nElement einer Schar von Matrizen\n") print("Aufruf matrix . sch_el( wert )\n") print(" matrix Matrix") print(" wert Wert des Scharparameters") print("\nEs ist nur ein Scharparameter zugelassen\n") return schar = any([ve.is_schar for ve in self.vekt]) if not schar or len(self.sch_par) > 1: print('agla: keine Schar mit einem Parameter') return if not wert or len(wert) != 1: print('agla: einen Wert für den Scharparameter angeben') return p = Tuple(*self.sch_par)[0] wert = sympify(*wert) if not is_zahl(wert): print('agla: für den Scharparameter Zahl oder freien Parameter angeben') return try: wert = nsimplify(wert) except RecursionError: pass vekt = [] for ve in self.vekt: if p in ve.sch_par: vekt.append(ve.sch_el(wert)) else: vekt.append(ve) return Matrix(*vekt)
8e88e04ee6e4f1b4be658c120a1bc66060aafc81
3,647,242
import os def check_directory(path, read=False, write=False, execute=False): """Does that path exist and can the current user rwx.""" if os.path.isdir(path) and check_mode(path, read=read, write=write, execute=execute): return True else: return False
cbfdaed4b33a47c040829404edca39ff1aed36a2
3,647,243
def scsilun_to_int(lun): """ There are two style lun number, one's decimal value is <256 and the other is full as 16 hex digit. According to T10 SAM, the full 16 hex digit should be swapped and converted into decimal. For example, SC got zlinux lun number from DS8K API, '40294018'. And it should be swapped to '40184029' and converted into decimal, 1075331113. When the lun number is '0c' and its decimal value is <256, it should be converted directly into decimal, 12. https://github.com/kubernetes/kubernetes/issues/45024 """ pretreated_scsilun = int(lun, 16) if pretreated_scsilun < 256: return pretreated_scsilun return (pretreated_scsilun >> 16 & 0xFFFF) | \ (pretreated_scsilun & 0xFFFF) << 16
2022938ccb5abbc89d5fb6f5f109d629e980c0ba
3,647,244
def ordered_indices(src_sizes,tgt_sizes,common_seed,shuffle=True,buckets=None): """Return an ordered list of indices. Batches will be constructed based on this order.""" if shuffle: indices = np.random.RandomState(common_seed).permutation(len(src_sizes)).astype(np.int64) else: indices = np.arange(len(src_sizes), dtype=np.int64) if buckets is None: # sort by target length, then source length # 排序 if tgt_sizes is not None: # 先按照tgt的tokens数排序 indices = indices[ np.argsort(tgt_sizes[indices], kind="mergesort")] # 把indices把tgtsize打乱后,再用稳定的mergesort排序,得到排序后的索引 return indices[np.argsort(src_sizes[indices], kind="mergesort")] # 再按照src tokens排序 else: # 按照最大的进行排序 # sort by bucketed_num_tokens, which is: # max(padded_src_len, padded_tgt_len) bucketed_num_tokens=np.array([max(src_size,tgt_size) for src_size,tgt_size in zip(src_sizes,tgt_sizes)]) return indices[ np.argsort(bucketed_num_tokens[indices], kind="mergesort") ]
469d7f963134d7df9c72be07182e7ba4e2533472
3,647,245
import io def get_predictions(single_stream, class_mapping_dict, ip, port, model_name): """Gets predictions for a single image using Tensorflow serving Arguments: single_stream (dict): A single prodigy stream class_mapping_dict (dict): with key as int and value as class name ip (str): tensorflow serving IP port (str): tensorflow serving port model_name (str): model name in tensorflow serving Returns: A tuple containing numpy arrays: (class_ids, class_names, scores, boxes) """ image_byte_stream = b64_uri_to_bytes(single_stream["image"]) encoded_image_io = io.BytesIO(image_byte_stream) image = Image.open(encoded_image_io) width, height = image.size filename = str(single_stream["meta"]["file"]) file_extension = filename.split(".")[1].lower() if file_extension == "png": image_format = b'png' elif file_extension in ("jpg", "jpeg"): image_format = b'jpg' else: log(("Only 'png', 'jpeg' or 'jpg' files are supported by ODAPI. " "Got {}. Thus treating it as `jpg` file. " "Might cause errors".format(file_extension) )) image_format = b'jpg' filename = filename.encode("utf-8") tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(image_byte_stream), 'image/format': dataset_util.bytes_feature(image_format), })) boxes, class_ids, scores = tf_odapi_client(tf_example.SerializeToString(), ip, port, model_name, "serving_default", input_name="serialized_example", timeout=300 ) class_names = np.array([class_mapping_dict[class_id] for class_id in class_ids]) return (class_ids, class_names, scores, boxes)
631c21878df03c240d32556279d9b31ebc6d723f
3,647,246
import itertools def interaction_graph(matrix): """Create a networkx graph object from a (square) matrix. Parameters ---------- matrix : numpy.ndarray | Matrix of mutual information, the information for the edges is taken from the upper matrix Returns ------- graph : networkx.Graph() The graph with MI as weighted edges and positions as nodes Raises ------ AssertionError If the matrix is not square """ # Assert if the matrix is a square matrix. assert matrix.shape[0] == matrix.shape[1], "The matrix is not square" graph = nx.Graph() positions = len(matrix) for pos1, pos2 in itertools.combinations(range(positions), 2): graph.add_edge(pos1, pos2, weight=matrix[pos1, pos2]) return graph
d1da8b6f0e269c1118f56840173e7895d5efb587
3,647,247
import os def tags_filter(osm_pbf, dst_fname, expression, overwrite=True): """Extract OSM objects based on their tags. The function reads an input .osm.pbf file and uses `osmium tags-filter` to extract the relevant objects into an output .osm.pbf file. Parameters ---------- osm_pbf : str Path to input .osm.pbf file. dst_fname : str Path to output .osm.pbf file. expression : str Osmium tags-filter expression. See `osmium tags-filter` manpage for details. overwrite : bool, optional Overwrite existing file. Returns ------- dst_fname : str Path to output .osm.pbf file. """ expression_parts = expression.split(" ") command = ["osmium", "tags-filter", osm_pbf] command += expression_parts command += ["-o", dst_fname] if overwrite: command += ["--overwrite"] logger.info(f"Running command: {' '.join(command)}") run(command, check=True, stdout=DEVNULL, stderr=DEVNULL) src_size = human_readable_size(os.path.getsize(osm_pbf)) dst_size = human_readable_size(os.path.getsize(dst_fname)) logger.info( f"Extracted {os.path.basename(dst_fname)} ({dst_size}) " f"from {os.path.basename(osm_pbf)} ({src_size})." ) return dst_fname
60fb579bcfccb80dbd66ebb50d8478b4f718b2db
3,647,248
import os def bound_n_samples_from_env(n_samples: int): """Bound number of samples from environment variable. Uses environment variable `PYPESTO_MAX_N_SAMPLES`. This is used to speed up testing, while in application it should not be used. Parameters ---------- n_samples: Number of samples desired. Returns ------- n_samples_new: The original number of samples, or the minimum with the environment variable, if exists. """ if PYPESTO_MAX_N_SAMPLES not in os.environ: return n_samples n_samples_new = min(n_samples, int(os.environ[PYPESTO_MAX_N_SAMPLES])) logger.info( f"Bounding number of samples from {n_samples} to {n_samples_new} via " f"environment variable {PYPESTO_MAX_N_SAMPLES}" ) return n_samples_new
c578a16ab698b5921604100c5f5d04574363b4b8
3,647,249
import torch def weight_inter_agg(num_relations, self_feats, neigh_feats, embed_dim, weight, alpha, n, cuda): """ Weight inter-relation aggregator Reference: https://arxiv.org/abs/2002.12307 :param num_relations: number of relations in the graph :param self_feats: batch nodes features or embeddings :param neigh_feats: intra-relation aggregated neighbor embeddings for each relation :param embed_dim: the dimension of output embedding :param weight: parameter used to transform node embeddings before inter-relation aggregation :param alpha: weight parameter for each relation used by Rio-Weight :param n: number of nodes in a batch :param cuda: whether use GPU :return: inter-relation aggregated node embeddings """ # transform batch node embedding and neighbor embedding in each relation with weight parameter center_h = weight.mm(self_feats.t()) neigh_h = weight.mm(neigh_feats.t()) # compute relation weights using softmax w = F.softmax(alpha, dim=1) # initialize the final neighbor embedding if cuda: aggregated = torch.zeros(size=(embed_dim, n)).cuda() else: aggregated = torch.zeros(size=(embed_dim, n)) # add weighted neighbor embeddings in each relation together for r in range(num_relations): aggregated += torch.mul(w[:, r].unsqueeze(1).repeat(1, n), neigh_h[:, r * n:(r + 1) * n]) # sum aggregated neighbor embedding and batch node embedding # feed them to activation function combined = F.relu(center_h + aggregated) return combined
c664bd88fbd8abf30b050ca93c264a3e5ead147b
3,647,250
def ho2ro(ho): """Axis angle pair to Rodrigues-Frank vector.""" return Rotation.ax2ro(Rotation.ho2ax(ho))
be3ce1dd6ac9e0815a4cb50ff922f0816320fcae
3,647,251
def get_ratio(old, new): # type: (unicode, unicode) -> float """Return a "similiarity ratio" (in percent) representing the similarity between the two strings where 0 is equal and anything above less than equal. """ if not all([old, new]): return VERSIONING_RATIO if IS_SPEEDUP: return Levenshtein.distance(old, new) / (len(old) / 100.0) else: return levenshtein_distance(old, new) / (len(old) / 100.0)
28648934701445c9066e88b787465ccc21aa6ba5
3,647,252
def sample2D(F, X, Y, mask=None, undef_value=0.0, outside_value=None): """Bilinear sample of a 2D field *F* : 2D array *X*, *Y* : position in grid coordinates, scalars or compatible arrays *mask* : if present must be a 2D matrix with 1 at valid and zero at non-valid points *undef_value* : value to put at undefined points *outside_value* : value to return outside the grid defaults to None, raising ValueError if any points are outside Note reversed axes, for integers i and j we have ``sample2D(F, i, j) = F[j,i]`` If jmax, imax = F.shape then inside values requires 0 <= x < imax-1, 0 <= y < jmax-1 Using bilinear interpolation """ # --- Argument checking --- # X and Y should be broadcastable to the same shape Z = np.add(X, Y) # scalar is True if both X and Y are scalars scalar = np.isscalar(Z) if np.ndim(F) != 2: raise ValueError("F must be 2D") if mask is not None: if mask.shape != F.shape: raise ValueError("Must have mask.shape == F.shape") jmax, imax = F.shape # Broadcast X and Y X0 = X + np.zeros_like(Z) Y0 = Y + np.zeros_like(Z) # Find integer I, J such that # 0 <= I <= X < I+1 <= imax-1, 0 <= J <= Y < J+1 <= jmax-1 # and local increments P and Q I = X0.astype("int") J = Y0.astype("int") P = X0 - I Q = Y0 - J outside = (X0 < 0) | (X0 >= imax - 1) | (Y0 < 0) | (Y0 >= jmax - 1) if np.any(outside): if outside_value is None: raise ValueError("point outside grid") I = np.where(outside, 0, I) J = np.where(outside, 0, J) # try: # J[outside] = 0 # I[outside] = 0 # except TypeError: # Zero-dimensional # I = np.array(0) # J = np.array(0) # Weights for bilinear interpolation W00 = (1 - P) * (1 - Q) W01 = (1 - P) * Q W10 = P * (1 - Q) W11 = P * Q SW = 1.0 # Sum of weights if mask is not None: W00 = mask[J, I] * W00 W01 = mask[J + 1, I] * W01 W10 = mask[J, I + 1] * W10 W11 = mask[J + 1, I + 1] * W11 SW = W00 + W01 + W10 + W11 SW = np.where(SW == 0, -1.0, SW) # Avoid division by zero below S = np.where( SW <= 0, undef_value, (W00 * F[J, I] + W01 * F[J + 1, I] + W10 * F[J, I + 1] + W11 * F[J + 1, I + 1]) / SW, ) # Set in outside_values if outside_value: S = np.where(outside, outside_value, S) # Scalar input gives scalar output if scalar: S = float(S) return S
746782b7712ff28f76db280e9c55977e81a370a5
3,647,253
import time def read(file=None, timeout=10, wait=0.2, threshold=32): """Return the external temperature. Keyword arguments: file -- the path to the 1-wire serial interface file timeout -- number of seconds without a reading after which to give up wait -- number of seconds to wait after a failed read before retying threshold -- log a warning if temperature exceed threshold Although the DS18B20 only measures the temperature, this method returns a two-element tuple to allow easier interchangibility with the DHT22 which returns temperature and humidity. """ if file is None: file = _discover() logger.debug('Started reading sensor at {}'.format(file)) t1 = time.time() try: temp = _read(file, timeout, wait) except (RuntimeError, FileNotFoundError) as e: logger.warn(e.args) raise t2 = time.time() if temp > threshold: logger.warning( 'temp {:.1f}C exceeds threshold {:.1f}C' \ .format(temp, threshold) ) logger.info('temp={:.1f}C'.format(temp)) logger.debug('Finished reading sensor ({:.1f}s)'.format(t2-t1)) return temp, None
93d409c6d2d019ba90bb61a5faa6d2fb761ed8a5
3,647,254
def rotations_to_radians(rotations): """ converts radians to rotations """ return np.pi * 2 * rotations
15beacbccbbe6d22ac4f659aa5cf22a4e63b503e
3,647,255
def _expect_ket(oper, state): """Private function to calculate the expectation value of an operator with respect to a ket. """ oper, ket = jnp.asarray(oper), jnp.asarray(state) return jnp.vdot(jnp.transpose(ket), jnp.dot(oper, ket))
c7b261852f0e77bda7dcb3cae53939f637e1dca7
3,647,256
def resnet152(pretrained=False, last_stride=1, model_path=''): """Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _resnet(pretrained=pretrained, last_stride=last_stride, block=Bottleneck, layers=[3, 8, 36, 3], model_path=model_path, model_name='resnet152')
ad2837271ef98861dc8f2c3eae9687fc71d435b6
3,647,257
def check_wheel_move_during_closed_loop(data, wheel_gain=None, **_): """ Check that the wheel moves by approximately 35 degrees during the closed-loop period on trials where a feedback (error sound or valve) is delivered. Metric: M = abs(w_resp - w_t0) - threshold_displacement, where w_resp = position at response time, w_t0 = position at go cue time, threshold_displacement = displacement required to move 35 visual degrees Criterion: displacement < 3 visual degrees Units: degrees angle of wheel turn :param data: dict of trial data with keys ('wheel_timestamps', 'wheel_position', 'choice', 'intervals', 'goCueTrigger_times', 'response_times', 'feedback_times', 'position') :param wheel_gain: the 'STIM_GAIN' task setting """ # Get the Bpod extracted wheel data timestamps = data['wheel_timestamps'] position = data['wheel_position'] return _wheel_move_during_closed_loop(timestamps, position, data, wheel_gain, tol=3)
6b696158a086cf899cc23d207b0c6142f1f50a65
3,647,258
def next_fast_len(target: int) -> int: """ Find the next fast size of input data to `fft`, for zero-padding, etc. SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this returns the next composite of the prime factors 2, 3, and 5 which is greater than or equal to `target`. (These are also known as 5-smooth numbers, regular numbers, or Hamming numbers.) Parameters ---------- target : int Length to start searching from. Must be a positive integer. Returns ------- out : int The first 5-smooth number greater than or equal to `target`. Notes ----- .. versionadded:: 0.18.0 Examples -------- On a particular machine, an FFT of prime length takes 133 ms: >>> from scipy import fftpack >>> min_len = 10007 # prime length is worst case for speed >>> a = np.random.randn(min_len) >>> b = fftpack.fft(a) Zero-padding to the next 5-smooth length reduces computation time to 211 us, a speedup of 630 times: >>> fftpack.helper.next_fast_len(min_len) 10125 >>> b = fftpack.fft(a, 10125) Rounding up to the next power of 2 is not optimal, taking 367 us to compute, 1.7 times as long as the 5-smooth size: >>> b = fftpack.fft(a, 16384) """ hams = (8, 9, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80, 81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192, 200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384, 400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720, 729, 750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024, 1080, 1125, 1152, 1200, 1215, 1250, 1280, 1296, 1350, 1440, 1458, 1500, 1536, 1600, 1620, 1728, 1800, 1875, 1920, 1944, 2000, 2025, 2048, 2160, 2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, 2700, 2880, 2916, 3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645, 3750, 3840, 3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800, 4860, 5000, 5120, 5184, 5400, 5625, 5760, 5832, 6000, 6075, 6144, 6250, 6400, 6480, 6561, 6750, 6912, 7200, 7290, 7500, 7680, 7776, 8000, 8100, 8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000) target = int(target) if target <= 6: return target # Quickly check if it's already a power of 2 if not (target & (target-1)): return target # Get result quickly for small sizes, since FFT itself is similarly fast. if target <= hams[-1]: return hams[bisect_left(hams, target)] match = float('inf') # Anything found will be smaller p5 = 1 while p5 < target: p35 = p5 while p35 < target: # Ceiling integer division, avoiding conversion to float # (quotient = ceil(target / p35)) quotient = -(-target // p35) # Quickly find next power of 2 >= quotient p2 = 2**((quotient - 1).bit_length()) N = p2 * p35 if N == target: return N elif N < match: match = N p35 *= 3 if p35 == target: return p35 if p35 < match: match = p35 p5 *= 5 if p5 == target: return p5 if p5 < match: match = p5 return match
e00aa69ffd425489cceef5f50b77c977e18b4a1f
3,647,259
def human_format(val : int, fmt = '.1f') -> str : """ convert e.g. 1230 -> 1.23k """ units = ['', 'K', 'M', 'G'] base = min(int(np.floor(np.log10(val)))//3, len(units)) if base==0: return str(val) val = val/10**(3*base) res = ('{{:{}}} {}'.format(fmt, units[base])).format(val) return res
73dfffa4f9afaa2c294ebceeabda4947d3a6cbe1
3,647,260
def select_or_insert(conn, table, id_name, payload, name=None, multi=False, insert=True): """ Prepare the SQL statements, payload MUST be a list """ log.debug('payload: {}'.format(payload)) if multi is False: sql_str = ''.join(['SELECT ', id_name, ' FROM ', table, ' WHERE ', name, ' LIKE (%s);']) result = execute_sql(conn, sql_str, payload) log.debug('select: {}'.format(result)) if result is None and insert is True: sql_str = ''.join(['INSERT INTO ', table, '(', name, ') VALUES (%s) RETURNING ', id_name, ';']) result = execute_sql(conn, sql_str, payload, commit=True) log.debug('insert: {}'.format(result)) else: id1, id2 = id_name sql_str = ''.join(['SELECT ', id1, ',', id2, ' FROM ', table, ' WHERE ', id1, ' = (%s) AND ', id2, ' = (%s);']) result = execute_sql(conn, sql_str, payload) log.debug('select: {}'.format(result)) if result is None and insert is True: sql_str = ''.join(['INSERT INTO ', table, '(', id1, ',', id2, ') VALUES (%s, %s) RETURNING ', id1, ',', id2, ';']) result = execute_sql(conn, sql_str, payload, commit=True) log.debug('insert: {}'.format(result)) return result
0280eb5d3877fa80a9632589e967669fd22254e1
3,647,261
def random_choice(number: int) -> bool: """ Generate a random int and compare with the argument passed :param int number: number passed :return: is argument greater or equal then a random generated number :rtype: bool """ return number >= randint(1, 100)
d88e7e23bcff89b0f33a43e34b2ba640589fb0e3
3,647,262
from typing import Any def request_l3_attachments(session, apic) -> Any: """Request current policy enformation for encap for Outs""" root = None uri = f"https://{apic}/api/class/l3extRsPathL3OutAtt.xml" response = session.get(uri, verify=False) try: root = ET.fromstring(response.text) except ET.ParseError: print("Something went wrong. Please try again") # If reponse has totalcount of 0, notify user that encap wasnt found if response.text.rfind("totalCount=\"0\"") != -1 or response.text.rfind("error code") != -1: print("\n######## No External Policy Assigned ##########") return root
0623ffde29a67e0f96ec5284b0a27109bff5b1aa
3,647,263
def bet_plot( pressure, bet_points, minimum, maximum, slope, intercept, p_monolayer, bet_monolayer, ax=None ): """ Draw a BET plot. Parameters ---------- pressure : array Pressure points which will make up the x axis. bet_points : array BET-transformed points which will make up the y axis. minimum : int Lower bound of the selected points. maximum : int Higher bound of the selected points. slope : float Slope of the chosen linear region. intercept : float Intercept of the chosen linear region. p_monolayer : float Pressure at which statistical monolayer is achieved. rol_monolayer : float BET transform of the point at which statistical monolayer is achieved. ax : matplotlib axes object, default None The axes object where to plot the graph if a new figure is not desired. Returns ------- matplotlib.axes Matplotlib axes of the graph generated. The user can then apply their own styling if desired. """ # Generate the figure if needed if ax is None: _, ax = plt.pyplot.subplots(figsize=(6, 4)) ax.plot( pressure, bet_points, label='all points', **POINTS_ALL_STYLE, ) ax.plot( pressure[minimum:maximum], bet_points[minimum:maximum], label='chosen points', **POINTS_SEL_STYLE, ) x_lim = [0, pressure[maximum]] y_lim = [slope * x_lim[0] + intercept, slope * x_lim[1] + intercept] ax.plot( x_lim, y_lim, linestyle='--', color='black', label='model fit', ) ax.plot( p_monolayer, bet_monolayer, marker='X', markersize=10, linestyle='', color='k', label='monolayer point' ) ax.set_ylim(bottom=0, top=bet_points[maximum] * 1.2) ax.set_xlim(left=0, right=pressure[maximum] * 1.2) ax.set_title("BET plot") ax.set_xlabel('p/p°', fontsize=15) ax.set_ylabel('$\\frac{p/p°}{n ( 1- p/p°)}$', fontsize=15) ax.legend(loc='best') return ax
751abe12683ceff72066b3b2cd6938d6e9a67507
3,647,264
from typing import Optional def paged_response( *, view: viewsets.GenericViewSet, queryset: Optional[QuerySet] = None, status_code: Optional[int] = None, ): """ paged_response can be used when there is a need to paginate a custom API endpoint. Usage: class UsersView(ModelViewSet): ... @action( ['get'], detail=True, serializer_class=PostSerializer, filterset_class=PostsFilterSet, ) def posts(self, request: Request, pk: Optional[str] = None): queryset = Post.objects.filter(user=self.get_object()) return paged_response(view=self, queryset=queryset) :param view: any instance that statisfies the GenericViewSet interface :param queryset: Optional django.db.models.QuerySet. Default: get_queryset output :param status_code: Optional int :return: rest_framework.response.Response """ status_code = status_code or status.HTTP_200_OK queryset = queryset or view.get_queryset() queryset = view.filter_queryset(queryset) page = view.paginate_queryset(queryset) if page is not None: serializer = view.get_serializer(page, many=True) return view.get_paginated_response(serializer.data) serializer = view.get_serializer(queryset, many=True) return response.Response(serializer.data, status=status_code)
73c38abbedf8f22a57bb6bda1b42d6013520885a
3,647,265
def getObjectPositions(mapData, threshold, findCenterOfMass = True): """Creates a segmentation map and find objects above the given threshold. Args: mapData (:obj:`numpy.ndarray`): The 2d map to segment. threshold (float): The threshold above which objects will be selected. findCenterOfMass: If True, return the object center weighted according to the values in mapData. If False, return the pixel that holds the maximum value. Returns: objIDs (:obj:`numpy.ndarray`): Array of object ID numbers. objPositions (list): List of corresponding (y, x) positions. objNumPix (:obj:`numpy.ndarray`): Array listing number of pixels per object. segmentationMap (:obj:`numpy.ndarray`): The segmentation map (2d array). """ if threshold < 0: raise Exception("Detection threshold (thresholdSigma in the config file) cannot be negative unless in forced photometry mode.") sigPix=np.array(np.greater(mapData, threshold), dtype=int) sigPixMask=np.equal(sigPix, 1) segmentationMap, numObjects=ndimage.label(sigPix) objIDs=np.unique(segmentationMap) if findCenterOfMass == True: objPositions=ndimage.center_of_mass(mapData, labels = segmentationMap, index = objIDs) else: objPositions=ndimage.maximum_position(mapData, labels = segmentationMap, index = objIDs) objNumPix=ndimage.sum(sigPixMask, labels = segmentationMap, index = objIDs) return objIDs, objPositions, objNumPix, segmentationMap
d070f70270837ec1b1ff6f29eedc21deb2b4846c
3,647,266
def specific_humidity(p,RH,t,A=17.625,B=-30.11,C=610.94,masked=False): """ From Mark G. Lawrence, BAMS Feb 2005, eq. (6) q = specific_humidity(p,RH,t,A,B,C) inputs: p = pressure (Pa) RH = relative humidity (0-1) t = temperature (K) keywords: A, B and C are optional fitting parameters from Alduchov and Eskridge (1996). Masked = False (if True, perform operation on masked arrays) output: q, specific humidity (kg/kg) p, RH and t can be arrays. """ if masked==False: es = C * exp(A*(t-273.15)/(B+t)) q = 0.62198*(RH*es)/(maximum(p,es)-(1-0.62198)*es) else: es = C * ma.exp(A*(t-273.15)/(B+t)) q = 0.62198*(RH*es)/(maximum(p,es)-(1-0.62198)*es) return q
2cfd4cad24a0f412d8021fdfdbc9874823093dcc
3,647,267
import os def list_versions(): """ List the EMDB-SFF versions that are migratable to the current version :return: status :return: version_count """ version_count = len(VERSION_LIST) for version in VERSION_LIST[:-1]: _print('* {version}'.format(version=version)) return os.EX_OK, version_count
a02a7c74177aec70ec3dae4e6a86ff2ed62465a7
3,647,268
from typing import Optional from typing import Literal def build_parser(): """ Build a pyparsing parser for our custom topology description language. :return: A pyparsing parser. :rtype: pyparsing.MatchFirst """ ParserElement.setDefaultWhitespaceChars(' \t') nl = Suppress(LineEnd()) inumber = Word(nums).setParseAction(lambda l, s, t: int(t[0])) fnumber = ( Combine( Optional('-') + Word(nums) + '.' + Word(nums) + Optional('E' | 'e' + Optional('-') + Word(nums)) ) ).setParseAction(lambda toks: float(toks[0])) boolean = ( CaselessLiteral('true') | CaselessLiteral('false') ).setParseAction(lambda l, s, t: t[0].casefold() == 'true') comment = Literal('#') + restOfLine + nl text = QuotedString('"') identifier = Word(alphas, alphanums + '_') empty_line = LineStart() + LineEnd() item_list = ( (text | fnumber | inumber | boolean) + Optional(Suppress(',')) + Optional(nl) ) custom_list = ( Suppress('(') + Optional(nl) + Group(OneOrMore(item_list)) + Optional(nl) + Suppress(')') ).setParseAction(lambda tok: tok.asList()) attribute = Group( identifier('key') + Suppress(Literal('=')) + ( custom_list | text | fnumber | inumber | boolean | identifier )('value') + Optional(nl) ) attributes = ( Suppress(Literal('[')) + Optional(nl) + OneOrMore(attribute) + Suppress(Literal(']')) ) node = identifier('node') port = Group( node + Suppress(Literal(':')) + (identifier | inumber)('port') ) link = Group( port('endpoint_a') + Suppress(Literal('--')) + port('endpoint_b') ) environment_spec = ( attributes + nl ).setResultsName('env_spec', listAllMatches=True) nodes_spec = ( Group( Optional(attributes)('attributes') + Group(OneOrMore(node))('nodes') ) + nl ).setResultsName('node_spec', listAllMatches=True) ports_spec = ( Group( Optional(attributes)('attributes') + Group(OneOrMore(port))('ports') ) + nl ).setResultsName('port_spec', listAllMatches=True) link_spec = ( Group( Optional(attributes)('attributes') + link('links') ) + nl ).setResultsName('link_spec', listAllMatches=True) statements = OneOrMore( comment | link_spec | ports_spec | nodes_spec | environment_spec | empty_line ) return statements
1eccb042b18c3c53a69a41e711a4347a6edf55b9
3,647,269
import math def decode_owner(owner_id: str) -> str: """Decode an owner name from an 18-character hexidecimal string""" if len(owner_id) != 18: raise ValueError('Invalid owner id.') hex_splits = split_by(owner_id, num=2) bits = '' for h in hex_splits: bits += hex_to_bin(h) test_owner = '' for seq in split_by(bits, 6): num = bin_to_dec(seq) test_owner += get_ascii_val_from_bit_value(num) if test_owner[0] != '?': return test_owner[:math.ceil(MAX_OWNER_LENGTH/2)] + '..' + test_owner[-math.floor(MAX_OWNER_LENGTH/2):] while test_owner[0] == '?': test_owner = test_owner[1:] return test_owner
1460ebe3dfd2f36aa2f5e42b28b2d7651d0d2cee
3,647,270
def _get_back_up_generator(frame_function, *args, **kwargs): """Create a generator for the provided animation function that backs up the cursor after a frame. Assumes that the animation function provides a generator that yields strings of constant width and height. Args: frame_function: A function that returns a FrameGenerator. args: Arguments for frame_function. kwargs: Keyword arguments for frame_function. Returns: a generator that generates backspace/backline characters for the animation func generator. """ lines = next(frame_function(*args, **kwargs)).split('\n') width = len(lines[0]) height = len(lines) if height == 1: return util.BACKSPACE_GEN(width) return util.BACKLINE_GEN(height)
a395e91864115f69dc0a7d8d8a3bb2eb90d957e9
3,647,271
from typing import Any from typing import Optional def from_aiohttp( schema_path: str, app: Any, *, base_url: Optional[str] = None, method: Optional[Filter] = None, endpoint: Optional[Filter] = None, tag: Optional[Filter] = None, operation_id: Optional[Filter] = None, skip_deprecated_operations: bool = False, validate_schema: bool = True, force_schema_version: Optional[str] = None, data_generation_methods: DataGenerationMethodInput = DEFAULT_DATA_GENERATION_METHODS, code_sample_style: str = CodeSampleStyle.default().name, **kwargs: Any, ) -> BaseOpenAPISchema: """Load Open API schema from an AioHTTP app. :param str schema_path: An in-app relative URL to the schema. :param app: An AioHTTP app instance. """ from ...extra._aiohttp import run_server # pylint: disable=import-outside-toplevel port = run_server(app) app_url = f"http://127.0.0.1:{port}/" url = urljoin(app_url, schema_path) return from_uri( url, base_url=base_url, method=method, endpoint=endpoint, tag=tag, operation_id=operation_id, skip_deprecated_operations=skip_deprecated_operations, validate_schema=validate_schema, force_schema_version=force_schema_version, data_generation_methods=data_generation_methods, code_sample_style=code_sample_style, **kwargs, )
11c7d2cf9e19e8876ef45118f3842b51fbc734b9
3,647,272
import sys import random import string def summarize(fname, start, stop,output_dir): """ Process file[start:stop] start and stop both point to first char of a line (or EOF) """ ls_1995_1996 = [] for i in range (1995,2006): ls_1995_1996.append([]) with open(fname, newline='', encoding='utf-8') as inf: # jump to start position pos = start inf.seek(pos) for line in inf: sys.stdout.write("\r" + random.choice(string.ascii_letters)) sys.stdout.flush() if "1995" in line: ls_1995_1996[0].append(line) elif "1996" in line: ls_1995_1996[1].append(line) elif "1997" in line: ls_1995_1996[2].append(line) elif "1998" in line: ls_1995_1996[3].append(line) elif "1999" in line: ls_1995_1996[4].append(line) elif "2000" in line: ls_1995_1996[5].append(line) elif "2001" in line: ls_1995_1996[6].append(line) elif "2002" in line: ls_1995_1996[7].append(line) elif "2003" in line: ls_1995_1996[8].append(line) elif "2004" in line: ls_1995_1996[9].append(line) elif "2005" in line: ls_1995_1996[10].append(line) pos += len(line) if pos >= stop: break write_to_file(fname, ls_1995_1996, output_dir, start, stop) return ls_1995_1996
443855fa4e591b4a40779c7fbb50f3b445a41c64
3,647,273
def compute_recall(true_positives, false_negatives): """Compute recall >>> compute_recall(0, 10) 0.0 >>> compute_recall(446579, 48621) 0.901815 """ return true_positives / (true_positives + false_negatives)
876bee73150d811e6b7c1a5de8d8e4349105c59b
3,647,274
def get_highest_seat_id(): """ Returns the highest seat ID from all of the boarding passes. """ return max(get_seat_ids())
0e8f95c9455869d283acfb9d6230c8a6f2ca10eb
3,647,275
def error_function_index(gpu_series, result_series): """ utility function to compare GPU array vs CPU array Parameters ------ gpu_series: cudf.Series GPU computation result series result_series: pandas.Series Pandas computation result series Returns ----- double maximum error of the two arrays int maximum index value diff """ err = error_function(gpu_series, result_series) error_index = np.abs(gpu_series.index.to_array() - result_series.index.values).max() return err, error_index
1886df532808be8e54ffc2448c74fcb415b4424a
3,647,276
def get_tipo_aqnext(tipo) -> int: """Solve the type of data used by DJANGO.""" tipo_ = 3 # subtipo_ = None if tipo in ["int", "uint", "serial"]: tipo_ = 16 elif tipo in ["string", "stringlist", "pixmap", "counter"]: tipo_ = 3 elif tipo in ["double"]: tipo_ = 19 elif tipo in ["bool", "unlock"]: tipo_ = 18 elif tipo in ["date"]: tipo_ = 26 elif tipo in ["time"]: tipo_ = 27 return tipo_
d5a066b98aa56785c4953a7ec8d7052e572e5630
3,647,277
from typing import List from typing import Dict def fetch_indicators_command(client: Client) -> List[Dict]: """Wrapper for fetching indicators from the feed to the Indicators tab. Args: client: Client object with request Returns: Indicators. """ indicators = fetch_indicators(client) return indicators
eb59b68362e0b30fdc5643259a1ddf757b7afce1
3,647,278
def hr_lr_ttest(hr, lr): """ Returns the t-test (T statistic and p value), comparing the features for high- and low-risk entities. """ res = stats.ttest_ind(hr.to_numpy(), lr.to_numpy(), axis=0, nan_policy="omit", equal_var=False) r0 = pd.Series(res[0], index=hr.columns) r1 = pd.Series(res[1], index=hr.columns) return pd.DataFrame({"ttest_T": r0, "ttest_p": r1})
86ccbbf3119ce7fc809ec68d50b57d514efb29b2
3,647,279
def _is_empty(str_: str) -> bool: """文字列が空か 文字列が空であるかを判別する Args: str_ (str): 文字列 Returns: bool: 文字列が空のときはTrue, 空でないときはFalseを返す. """ if str_: return False return True
f0eff540767028a80a3042e2d5bc6951ad28fe24
3,647,280
import random def energy_generate_random_range_dim2(filepath,dim_1_low,dim_1_high,dim_2_low,dim_2_high,num=500): """ 6, 8 and 10 """ queryPool=[] query=[] for _ in range(num): left1 = random.randint(dim_1_low, dim_1_high) right1 = random.randint(left1, dim_1_high) query.append((left1, right1)) left2 = random.randint(dim_2_low, dim_2_high) # right2 = random.randint(left2, dim_2_high) query.append((left2, left2)) queryPool.append(query[:]) query.clear() with open(filepath,"w+") as f: f.write(str(queryPool)) return queryPool
cdcafba427dbbab9b9e318f58f54a3a3c834bbd3
3,647,281
import os def preprocess(data_folder): """ Runs the whole pipeline and returns NumPy data array""" SAMPLE_TIME = 30 CHANNELS = ['EEG Fpz-Cz', 'EEG Pz-Oz'] res_array = [] for path in os.listdir(data_folder): if path.endswith("PSG.edf"): full_path = os.path.join(data_folder, path) raw = mne.io.read_raw_edf(full_path, preload=True) mne_eeg = remove_sleepEDF(raw, CHANNELS) mne_filtered = filter(mne_eeg, CHANNELS) epochs = divide_epochs(mne_filtered, SAMPLE_TIME) epochs = downsample(epochs, CHANNELS) epochs = epochs.get_data() # turns into NumPy Array f_epochs = normalization(epochs) res_array.append([f_epochs, path[:path.index("-")]]) #save(f_epochs, path[:path.index("-")], output_folder) return res_array
62e44f384768541715d4254832d32169ea51f533
3,647,282
from typing import Optional from typing import Sequence def get_waas_policies(compartment_id: Optional[str] = None, display_names: Optional[Sequence[str]] = None, filters: Optional[Sequence[pulumi.InputType['GetWaasPoliciesFilterArgs']]] = None, ids: Optional[Sequence[str]] = None, states: Optional[Sequence[str]] = None, time_created_greater_than_or_equal_to: Optional[str] = None, time_created_less_than: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWaasPoliciesResult: """ This data source provides the list of Waas Policies in Oracle Cloud Infrastructure Web Application Acceleration and Security service. Gets a list of WAAS policies. ## Example Usage ```python import pulumi import pulumi_oci as oci test_waas_policies = oci.waas.get_waas_policies(compartment_id=var["compartment_id"], display_names=var["waas_policy_display_names"], ids=var["waas_policy_ids"], states=var["waas_policy_states"], time_created_greater_than_or_equal_to=var["waas_policy_time_created_greater_than_or_equal_to"], time_created_less_than=var["waas_policy_time_created_less_than"]) ``` :param str compartment_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment. This number is generated when the compartment is created. :param Sequence[str] display_names: Filter policies using a list of display names. :param Sequence[str] ids: Filter policies using a list of policy OCIDs. :param Sequence[str] states: Filter policies using a list of lifecycle states. :param str time_created_greater_than_or_equal_to: A filter that matches policies created on or after the specified date and time. :param str time_created_less_than: A filter that matches policies created before the specified date-time. """ __args__ = dict() __args__['compartmentId'] = compartment_id __args__['displayNames'] = display_names __args__['filters'] = filters __args__['ids'] = ids __args__['states'] = states __args__['timeCreatedGreaterThanOrEqualTo'] = time_created_greater_than_or_equal_to __args__['timeCreatedLessThan'] = time_created_less_than if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('oci:waas/getWaasPolicies:getWaasPolicies', __args__, opts=opts, typ=GetWaasPoliciesResult).value return AwaitableGetWaasPoliciesResult( compartment_id=__ret__.compartment_id, display_names=__ret__.display_names, filters=__ret__.filters, id=__ret__.id, ids=__ret__.ids, states=__ret__.states, time_created_greater_than_or_equal_to=__ret__.time_created_greater_than_or_equal_to, time_created_less_than=__ret__.time_created_less_than, waas_policies=__ret__.waas_policies)
4ab181b9776226a96b93757feb124c10b68eacc8
3,647,283
def _get_output_type(output): """Choose appropriate output data types for HTML and LaTeX.""" if output.output_type == 'stream': html_datatype = latex_datatype = 'ansi' text = output.text output.data = {'ansi': text[:-1] if text.endswith('\n') else text} elif output.output_type == 'error': html_datatype = latex_datatype = 'ansi' output.data = {'ansi': '\n'.join(output.traceback)} else: for datatype in DISPLAY_DATA_PRIORITY_HTML: if datatype in output.data: html_datatype = datatype break else: html_datatype = ', '.join(output.data.keys()) for datatype in DISPLAY_DATA_PRIORITY_LATEX: if datatype in output.data: latex_datatype = datatype break else: latex_datatype = ', '.join(output.data.keys()) return html_datatype, latex_datatype
4940f931f7ac3b87b68a5e84a5038feea331dac1
3,647,284
import gc def cat_train_validate_on_cv( logger, run_id, train_X, train_Y, test_X, metric, kf, features, params={}, num_class=None, cat_features=None, log_target=False, ): """Train a CatBoost model, validate using cross validation. If `test_X` has a valid value, creates a new model with number of best iteration found during holdout phase using training as well as validation data. Note: For CatBoost, categorical features need to be in String or Category data type. """ if num_class: # This should be true for multiclass classification problems y_oof = np.zeros(shape=(len(train_X), num_class)) y_predicted = np.zeros(shape=(len(test_X), num_class)) else: y_oof = np.zeros(shape=(len(train_X))) y_predicted = np.zeros(shape=(len(test_X))) cv_scores = [] result_dict = {} feature_importance = pd.DataFrame() best_iterations = [] fold = 0 n_folds = kf.get_n_splits() for train_index, validation_index in kf.split(train_X[features], train_Y): fold += 1 logger.info(f"fold {fold} of {n_folds}") X_train, X_validation, y_train, y_validation = _get_X_Y_from_CV( train_X, train_Y, train_index, validation_index ) if log_target: # feature_names accepts only list cat_train = Pool( data=X_train, label=np.log1p(y_train), feature_names=features, cat_features=cat_features, ) cat_eval = Pool( data=X_validation, label=np.log1p(y_validation), feature_names=features, cat_features=cat_features, ) else: # feature_names accepts only list cat_train = Pool( data=X_train, label=y_train, feature_names=features, cat_features=cat_features, ) cat_eval = Pool( data=X_validation, label=y_validation, feature_names=features, cat_features=cat_features, ) model = CatBoost(params=params) # List of categorical features have already been passed as a part of Pool # above. No need to pass via the argument of fit() model.fit(cat_train, eval_set=cat_eval, use_best_model=True) del train_index, X_train, y_train, cat_train gc.collect() if log_target: y_oof[validation_index] = np.expm1(model.predict(cat_eval)) else: y_oof[validation_index] = model.predict(cat_eval) if test_X is not None: cat_test = Pool( data=test_X, feature_names=features, cat_features=cat_features ) if log_target: y_predicted += np.expm1(model.predict(cat_test)) else: y_predicted += model.predict(cat_test) del cat_eval, cat_test best_iteration = model.best_iteration_ best_iterations.append(best_iteration) logger.info(f"Best number of iterations for fold {fold} is: {best_iteration}") cv_oof_score = _calculate_perf_metric( metric, y_validation, y_oof[validation_index] ) cv_scores.append(cv_oof_score) logger.info(f"CV OOF Score for fold {fold} is {cv_oof_score}") del validation_index, X_validation, y_validation gc.collect() feature_importance_on_fold = model.get_feature_importance() feature_importance = _capture_feature_importance_on_fold( feature_importance, features, feature_importance_on_fold, fold ) # util.update_tracking( # run_id, # "metric_fold_{}".format(fold), # cv_oof_score, # is_integer=False, # no_of_digits=5, # ) result_dict = _evaluate_and_log( logger, run_id, train_Y, y_oof, y_predicted, metric, n_folds, result_dict, cv_scores, best_iterations, ) del y_oof gc.collect() result_dict = _capture_feature_importance( feature_importance, n_important_features=10, result_dict=result_dict ) logger.info("Training/Prediction completed!") return result_dict
d4a1248463d7fa1f9f8f192cc9fa02f8fcdcf020
3,647,285
def find_left(char_locs, pt): """Finds the 'left' coord of a word that a character belongs to. Similar to find_top() """ if pt not in char_locs: return [] l = list(pt) while (l[0]-1, l[1]) in char_locs: l = [l[0]-1, l[1]] return l
8e924f301203bcad2936d4cf4d82c6e21cbebb16
3,647,286
import pathlib import urllib def make_file_url(file_id, base_url): """Create URL to access record by ID.""" url_parts = list(urlparse.urlparse(base_url)) url_parts[2] = pathlib.posixpath.join( DATAVERSE_API_PATH, DATAVERSE_FILE_API ) args_dict = {'persistentId': file_id} url_parts[4] = urllib.parse.urlencode(args_dict) return urllib.parse.urlunparse(url_parts)
e4b60f2cfd31a9617ee775d7d8ca0caaa9c692fd
3,647,287
def std_func(bins, mass_arr, vel_arr): """ Calculate std from mean = 0 Parameters ---------- bins: array Array of bins mass_arr: array Array of masses to be binned vel_arr: array Array of velocities Returns --------- std_arr: array Standard deviation from 0 of velocity difference values in each mass bin """ last_index = len(bins)-1 std_arr = [] for index1, bin_edge in enumerate(bins): cen_deltav_arr = [] for index2, stellar_mass in enumerate(mass_arr): if stellar_mass >= bin_edge and index1 == last_index: cen_deltav_arr.append(vel_arr[index2]) elif stellar_mass >= bin_edge and stellar_mass < bins[index1+1]: cen_deltav_arr.append(vel_arr[index2]) mean = 0 # mean = np.mean(cen_deltav_arr) diff_sqrd_arr = [] for value in cen_deltav_arr: diff = value - mean diff_sqrd = diff**2 diff_sqrd_arr.append(diff_sqrd) mean_diff_sqrd = np.mean(diff_sqrd_arr) std = np.sqrt(mean_diff_sqrd) std_arr.append(std) return std_arr
13e53952af3106fb7891859f81c146d4bc92703b
3,647,288
def log_neg(rho,mask=[1,0]): """ Calculate the logarithmic negativity for a density matrix Parameters: ----------- rho : qobj/array-like Input density matrix Returns: -------- logneg: Logarithmic Negativity """ if rho.type != 'oper': raise TypeError("Input must be a density matrix") rhopt = partial_transpose(rho,mask) logneg = log2( rhopt.norm() ) return logneg
b8ed0cd54dd879985ef6265085b789e91beceba7
3,647,289
def create_polygon(pixels_selected: set, raster_path: str) -> gpd.GeoDataFrame: """ It allows to transform each of the indexes of the pixel data in coordinates for further processing the answer polygon Parameters -------------- pixels_selected: set Set with the pixels selected for the Connected component raster_path: str Route to the raster of origin Return -------------- polygon: geopands.GeoDataFrame Polygon generated from the points """ with rio.open(raster_path) as raster: pixels_cords = [] for x, y in pixels_selected: cord = raster.xy(x, y) pixels_cords.append(cord) new_polygon_geometry = Polygon(pixels_cords) polygon_raw = gpd.GeoDataFrame( index=[0], crs=raster.meta["crs"], geometry=[new_polygon_geometry] ).unary_union.convex_hull new_polygon = gpd.GeoDataFrame( index=[0], crs=raster.meta["crs"], geometry=[polygon_raw] ) return new_polygon
f2484afcfb73a3adbdaaeacf25287c1c2ce1584a
3,647,290
import copy def read_output(path_elec,path_gas): """ Used to read the building simulation I/O file Args: path_elec: file path where data is to be read from in minio. This is a mandatory parameter and in the case where only one simulation I/O file is provided, the path to this file should be indicated here. path_gas: This would be path to the gas output file. This is optional, if there is no gas output file to the loaded, then a value of path_gas ='' should be used Returns: btap_df: Dataframe containing the clean building parameters file. floor_sq: the square foot of the building """ # Load the data from blob storage. s3 = acm.establish_s3_connection(settings.MINIO_URL, settings.MINIO_ACCESS_KEY, settings.MINIO_SECRET_KEY) logger.info("read_output s3 connection %s", s3) btap_df_elec = pd.read_excel(s3.open(settings.NAMESPACE.joinpath(path_elec).as_posix())) if path_gas: btap_df_gas = pd.read_excel(s3.open(settings.NAMESPACE.joinpath(path_gas).as_posix())) btap_df = pd.concat([btap_df_elec, btap_df_gas], ignore_index=True) else: btap_df = copy.deepcopy(btap_df_elec) floor_sq = btap_df['bldg_conditioned_floor_area_m_sq'].unique() # dropping output features present in the output file and dropping columns with one unique value output_drop_list = ['Unnamed: 0', ':erv_package', ':template'] for col in btap_df.columns: if ((':' not in col) and (col not in ['energy_eui_additional_fuel_gj_per_m_sq', 'energy_eui_electricity_gj_per_m_sq', 'energy_eui_natural_gas_gj_per_m_sq', 'net_site_eui_gj_per_m_sq'])): output_drop_list.append(col) btap_df = btap_df.drop(output_drop_list,axis=1) btap_df = copy.deepcopy(clean_data(btap_df)) btap_df['Total Energy'] = copy.deepcopy(btap_df[['net_site_eui_gj_per_m_sq']].sum(axis=1)) drop_list=['energy_eui_additional_fuel_gj_per_m_sq','energy_eui_electricity_gj_per_m_sq','energy_eui_natural_gas_gj_per_m_sq','net_site_eui_gj_per_m_sq'] btap_df = btap_df.drop(drop_list,axis=1) return btap_df,floor_sq
7ec4ce2d9776946e310fd843e722d0189c4ebcb2
3,647,291
def parse_lmap(filename, goal, values): """Parses an LMAP file into a map of literal weights, a LiteralDict object, the literal that corresponds to the goal variable-value pair, and the largest literal found in the file.""" weights = {} max_literal = 0 literal_dict = LiteralDict() for line in open(filename): if (line.startswith('cc$I') or line.startswith('cc$C') or line.startswith('cc$P')): components = line.split('$') literal = int(components[2]) weights[literal] = components[3] max_literal = max(max_literal, abs(literal)) if line.startswith('cc$I'): variable = components[5] value = int(components[6].rstrip()) literal_dict.add(variable, values[variable][value], literal=literal) if variable == goal.variable and value == goal.value_index: goal_literal = literal return weights, literal_dict, goal_literal, max_literal
db6a0e5f56817e7dd0ef47b5e72b2ea30256b2a3
3,647,292
def read_image(path: str): """ Read an image file :param path: str. Path to image :return: The image """ return imageio.imread(path)
8f3342f2454a3d3e821962d7040eebdbaee502cf
3,647,293
def electrolyte_conductivity_Capiglia1999(c_e, T, T_inf, E_k_e, R_g): """ Conductivity of LiPF6 in EC:DMC as a function of ion concentration. The original data is from [1]. The fit is from Dualfoil [2]. References ---------- .. [1] C Capiglia et al. 7Li and 19F diffusion coefficients and thermal properties of non-aqueous electrolyte solutions for rechargeable lithium batteries. Journal of power sources 81 (1999): 859-862. .. [2] http://www.cchem.berkeley.edu/jsngrp/fortran.html Parameters ---------- c_e: :class: `numpy.Array` Dimensional electrolyte concentration T: :class: `numpy.Array` Dimensional temperature T_inf: double Reference temperature E_k_e: double Electrolyte conductivity activation energy R_g: double The ideal gas constant Returns ------- :`numpy.Array` Solid diffusivity """ sigma_e = ( 0.0911 + 1.9101 * (c_e / 1000) - 1.052 * (c_e / 1000) ** 2 + 0.1554 * (c_e / 1000) ** 3 ) arrhenius = np.exp(E_k_e / R_g * (1 / T_inf - 1 / T)) return sigma_e * arrhenius
ea487399aba6cd1e70d1b5c84dd6f9294f8754b9
3,647,294
import random def random_bdays(n): """Returns a list of integers between 1 and 365, with length n. n: int returns: list of int """ t = [] for i in range(n): bday = random.randint(1, 365) t.append(bday) return t
7871548db569d435a5975bfa118ad6c262406333
3,647,295
def int_to_charset(val, charset): """ Turn a non-negative integer into a string. """ if not val >= 0: raise ValueError('"val" must be a non-negative integer.') if val == 0: return charset[0] output = "" while val > 0: val, digit = divmod(val, len(charset)) output += charset[digit] # reverse the characters in the output and return return output[::-1]
ec30e014aaf42b6cc3904f13776b4226b0b75a5b
3,647,296
def search(tabela, *, parms='*', clause=None): """ Função que recebe como parâmetro obrigatório o nome da tabela a ser consultada, como parâmetro padrão recebe os filtros da pesquisa e retorna todas as linhas encontradas """ banco = Banco() banco.connect() banco.execute(f"SELECT {parms} FROM {tabela} {clause}") rows = banco.fetchall() banco.disconnect() return rows
0cb0dad5fe0661ee7027ab8b43c28b0351d42a48
3,647,297
import os from tasks import immath import numpy as np from scipy import ndimage from taskinit import iatool def make_mask_3d(imagename, thresh, fl=False, useimage=False, pixelmin=0, major=0, minor=0, pixelsize=0, line=False, overwrite_old=True, closing_diameter=6, pbimage=None, myresidual=None, myimage=None, extension='.fullmask', spectral_closing=3): """ Makes a mask on any image you want it to. Parameters ---------- imagename : {casa image without file extention} Name of image you want to mask, without the file extention. thresh : {float} Masking thershold in whatever units are using fl : {bool} If you want to combine the mask with a previous iteration of clean (True), if not (i.e. you are using the dirty image) then False. useimage : {bool} If you want to use the dirty image or the residual for the masking (I usually use the residual - so set to False) pixelmin : {float} Min number of pixels within a masked region to be taken into the final mask, i.e. if your beam size is 1arcsec and pixel size is 0.2 arcsec, then three beams would be pixelmin = 75 major : {float} beam major axis, in arsec minor : {float} beam minor axis, in arsec pixelsize : {float} length of one side of pixel, in arcsec line : {bool} if the image is a line or continuum Returns ------- mask : {ndarray} The final mask (hopefully) as the ".fullmask" image """ ia = iatool() mymask = imagename + '.mask' if myimage is None: myimage = imagename + '.image' maskim_nopb = imagename + '{}.nopb'.format(extension) maskim = imagename + extension threshmask = imagename + '.threshmask' if myresidual is None: myresidual = imagename + '.residual' if pbimage is None: pbimage = imagename + '.pb' if overwrite_old: os.system('rm -rf ' + maskim) os.system('rm -rf ' + maskim_nopb) os.system('rm -rf ' + threshmask) if useimage: print 'Using Image' immath(imagename=[myimage], outfile=threshmask, expr='iif(IM0 > ' + str(thresh) + ',1.0,0.0)') else: immath(imagename=[myresidual], outfile=threshmask, expr='iif(IM0 > ' + str(thresh) + ',1.0,0.0)') if fl: print 'Combining with previous mask..' immath(outfile=maskim_nopb, expr='iif(("' + threshmask + '" + "' + mymask + '") > 0.1,1.0,0.0)') else: print 'Making fresh new mask from image/residual' os.system('cp -r ' + threshmask + ' ' + maskim_nopb) immath(imagename=[pbimage, maskim_nopb], outfile=maskim, expr='iif(IM0 > 0.0, IM1, 0.0)') print "Using pixelmin=", pixelmin beamarea = (major * minor * np.pi / (4. * np.log(2.))) / (pixelsize**2) print 'Beam area', beamarea ia.open(maskim) mask = ia.getchunk() diam = closing_diameter # Change for large beam dilation structure = np.ones((diam, diam)) dist = ((np.indices((diam, diam)) - (diam - 1) / 2.)**2).sum(axis=0)**0.5 # circularize the closing element structure[dist > diam / 2.] = 0 if line: for k in range(mask.shape[3]): mask_temp = mask[:, :, 0, k] mask_temp = ndimage.binary_closing(mask_temp, structure=structure) labeled, j = ndimage.label(mask_temp) myhistogram = ndimage.measurements.histogram(labeled, 0, j + 1, j + 1) object_slices = ndimage.find_objects(labeled) threshold = pixelmin for i in range(j): if myhistogram[i + 1] < threshold: mask_temp[object_slices[i]] = 0.0 mask[:, :, 0, k] = mask_temp # add an additional closing run, this time with a 3d (4d?) st. element structure_3d = np.ones((diam, diam, 1, spectral_closing)) dist = ((np.indices((diam, diam)) - (diam - 1) / 2.)**2).sum(axis=0)**0.5 # circularize the closing element dist_3d = np.repeat(dist[:, :, None, None], spectral_closing, axis=3) structure_3d[dist_3d > diam / 2.] = 0 mask_closed = ndimage.binary_closing(mask, structure=structure_3d) else: raise RuntimeError("3D closing operation can only operate on cubes.") ia.putchunk(mask_closed.astype(int)) ia.done() print 'Mask created.' return maskim
01e625551d7fb6a8492a1f334294b742283600c1
3,647,298
def hash(data): """run the default hashing algorithm""" return _blacke2b_digest(data)
e12433388a0d392f16a8e11ba812629ed4573ace
3,647,299