content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def sampleWST_boundedFromOneHalf(m,h,decimal_precision=10): """sampleWST_boundedFromOneHalf: Returns a Sample of a Reciprocal Relation Q with `m` alternatives, which is WST and whose entries Q[i,j] fulfill |Q[i,j]-0.5|>h. m: Number of Alternatives h: `float` in the interval :math:`(0,0.5)` decimal_precision: decimal_precision of the resulting Reciprocal Relation >> Q = sampleWST_boundedFromOneHalf(3,0.2,4) >> Q.show() |- 0.9355 0.7037 | |- - 0.8775 | |- - - | Returns a Sample n times n relation Q which is WST with values |Q[i,j]-1/2|>=h """ assert type(m) is int and m>=1, "The parameter `m` has to be a positive integer." #DEBUG assert type(h) is float and 0<h<1/2, "The parameter `h` has to be a `float` in the interval :math:`(0,0.5)`" A=np.zeros((m,m)) Q=ReciprocalRelation(np.random.uniform(A+0.5+h,A+1),decimal_precision) assert isWST(Q), "An error occured, Q sampled in `sampleWST_boundedFromOneHalf` is not WST." #DEBUG Q.permute(samplePermutation(m)) return(Q)
5aa27a3a5f318fc636cfb87db8888203ee0f4491
3,631,200
import os def which(program): """Returns path of the executable, if it exists""" def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None
0f334c76db98b0624082a8097c47bbded64657a6
3,631,201
def mstep(X: np.ndarray, responsibilities: np.ndarray) -> GaussianMixture: """ M-step: Updates the gaussian mixture by maximizing the log-likelihood of the weighted dataset :param X: (n, d) array holding the data :param responsibilities: (n, K) array holding the responsibilities for all components for all examples :return GaussianMixture: the new gaussian mixture """ n, K = responsibilities.shape total_resp = np.sum(responsibilities, axis=0) mean = compute_means(X, responsibilities, total_resp) proportion = compute_mixture_weights(total_resp, n, K) covariance = compute_variances(X, mean, responsibilities, total_resp) return GaussianMixture(mean, covariance, proportion)
4b2c79d786fd613598248937a34e86bbc413e10b
3,631,202
def gene_mode(reference, reads, compress, tab, keep): """For when inputs are in-frame genes""" check_input(reference, reads) tab = check_tab(tab) ids = [] names = [] seqs = [] og_seqs = {} err = [] for record in SeqIO.parse(reference, 'fasta'): ids.append(record.id) names.append(record.description) seqs.append(record.seq.translate(table=tab)) og_seqs[record.id] = record.seq for record in SeqIO.parse(reads, 'fasta'): if '*' in record.seq.translate(table=tab)[:-1]: err.append(record.id) else: ids.append(record.id) names.append(record.description) seqs.append(record.seq.translate(table=tab)) og_seqs[record.id] = record.seq records = [SeqRecord(seqs[0], id=ids[0], description=names[0])] if compress: seqs, names, ids, og_seqs = compressor(seqs[1:], names[1:], ids[1:], og_seqs) combine_align(records, ids, names, seqs, keep) new_names = dict(zip(ids, names)) new_names[records[0].id] = records[0].description restore_codons(og_seqs, new_names) if len(err) > 0: print('The following '+str(len(err))+' sequence(s) were suspected of ' 'containing frameshifts, an early stop codon, or contained too ' 'many Ns and so were thrown out before multiple alignment:') for e in err: print(e) print('\n') return 0
3b9406ac52f88855983942f0b406154c5aa08e00
3,631,203
def _acceptance_rule(fx: float, fn: float, temp: float) -> bool: """Metropolis acceptance rule""" dfx = fn - fx return (dfx < 0) or ( (dfx > 0) and (np.random.rand() <= np.exp(-(fn - fx) / temp)) )
d3c01d194cd23a1201326fd5f968b08df5d705e0
3,631,204
def listGenericServerEndpoints(): """Return list of names of generic server endpoints""" return getObjectNameList( 'GenericServerEndpoint' )
c05256b4c9a6701eaf393ea0284e31eaddf42966
3,631,205
def fnCalculate_MaxUnamRangeRate(prf,centre_frequency): """ Calculate the maximum unambiguous range rate in the case of unknown direction Date: 19 June 2017 """ return AstCnst.c*prf/(4.*centre_frequency);
7bb24520bf37313246e4335ec1f7c26c67552b94
3,631,206
from datetime import datetime def convert_moz_time( moz_time_entry ): """ Convert Mozilla timestamp-alike data entries to an ISO 8601-ish representation """ # [ https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSPR/Reference/PRTime ] ## result = datetime.fromtimestamp( moz_time_entry/1000000 ).strftime('%Y-%m-%d %H:%M:%S') result = datetime.fromtimestamp( moz_time_entry/1000000 ) return result
ba15a7ed86d9b608799384e9663d36c1cff36fae
3,631,207
from pydantic import BaseModel # noqa: E0611 def generate_model_from_yaml(serialized, tablename, primary_key, deserialize_function = None, cls = BaseModel, serialization_config = None, skip_nested = True, default_to_str = False, type_mapping = None, base_model_attrs = None, deserialize_kwargs = None, **kwargs): """Generate a :term:`model class` from a serialized :term:`YAML <YAML Ain't a Markup Language (YAML)>` string. .. versionadded: 0.3.0 .. note:: This function *cannot* programmatically create :term:`relationships <relationship>`, :term:`hybrid properties <hybrid property>`, or :term:`association proxies <association proxy>`. :param serialized: The YAML data whose keys will be treated as column names, while value data types will determine :term:`model attribute` data types, or the path to a file whose contents will be the YAML object in question. :type serialized: :class:`str <python:str>` / Path-like object :param tablename: The name of the SQL table to which the model corresponds. :type tablename: :class:`str <python:str>` :param primary_key: The name of the column/key that should be used as the table's primary key. :type primary_key: :class:`str <python:str>` :param deserialize_function: Optionally override the default YAML deserializer. Defaults to :obj:`None <python:None>`, which calls the default ``yaml.safe_load()`` function from the `PyYAML <https://github.com/yaml/pyyaml>`_ library. .. note:: Use the ``deserialize_function`` parameter to override the default YAML deserializer. A valid ``deserialize_function`` is expected to accept a single :class:`str <python:str>` and return a :class:`dict <python:dict>`, similar to ``yaml.safe_load()``. If you wish to pass additional arguments to your ``deserialize_function`` pass them as keyword arguments (in ``kwargs``). :type deserialize_function: callable / :obj:`None <python:None>` :param cls: The base class to use when generating a new :term:`model class`. Defaults to :class:`BaseModel` to provide serialization/de-serialization support. If a :class:`tuple <python:tuple>` of classes, will include :class:`BaseModel` in that list of classes to mixin serialization/de-serialization support. If not :obj:`None <python:None>` and not a :class:`tuple <python:tuple>`, will mixin :class:`BaseModel` with the value passed to provide serialization/de-serialization support. :type cls: :obj:`None <python:None>` / :class:`tuple <python:tuple>` of classes / class object :param serialization_config: Collection of :class:`AttributeConfiguration <sqlathanor.attributes.AttributeConfiguration>` that determine the generated model's :term:`serialization`/:term:`de-serialization` :ref:`configuration <configuration>`. If :obj:`None <python:None>`, will support serialization and de-serialization across all keys in ``serialized_dict``. Defaults to :obj:`None <python:None>`. :type serialization_config: Iterable of :class:`AttributeConfiguration <sqlathanor.attributes.AttributeConfiguration>` or coercable :class:`dict <python:dict>` objects / :obj:`None <python:None>` :param skip_nested: If ``True`` then any keys in ``serialized_json`` that feature nested items (e.g. iterables, JSON objects, etc.) will be ignored. If ``False``, will treat serialized items as :class:`str <python:str>`. Defaults to ``True``. :type skip_nested: :class:`bool <python:bool>` :param default_to_str: If ``True``, will automatically set a key/column whose value type cannot be determined to ``str`` (:class:`Text <sqlalchemy:sqlalchemy.types.Text>`). If ``False``, will use the value type's ``__name__`` attribute and attempt to find a mapping. Defaults to ``False``. :type default_to_str: :class:`bool <python:bool>` :param type_mapping: Determines how value types in ``serialized`` map to SQL column data types. To add a new mapping or override a default, set a key to the name of the value type in Python, and set the value to a :doc:`SQLAlchemy Data Type <sqlalchemy:core/types>`. The following are the default mappings applied: .. list-table:: :widths: 30 30 :header-rows: 1 * - Python Literal - SQL Column Type * - ``bool`` - :class:`Boolean <sqlalchemy:sqlalchemy.types.Boolean>` * - ``str`` - :class:`Text <sqlalchemy:sqlalchemy.types.Text>` * - ``int`` - :class:`Integer <sqlalchemy:sqlalchemy.types.Integer>` * - ``float`` - :class:`Float <sqlalchemy:sqlalchemy.types.Float>` * - ``date`` - :class:`Date <sqlalchemy:sqlalchemy.types.Date>` * - ``datetime`` - :class:`DateTime <sqlalchemy:sqlalchemy.types.DateTime>` * - ``time`` - :class:`Time <sqlalchemy:sqlalchemy.types.Time>` :type type_mapping: :class:`dict <python:dict>` with type names as keys and column data types as values. :param base_model_attrs: Optional :class:`dict <python:dict>` of special attributes that will be applied to the generated :class:`BaseModel <sqlathanor.declarative.BaseModel>` (e.g. ``__table_args__``). Keys will correspond to the attribute name, while the value is the value that will be applied. Defaults to :obj:`None <python:None>`. :type base_model_attrs: :class:`dict <python:dict>` / :obj:`None <python:None>` :param deserialize_kwargs: Optional additional keyword arguments that will be passed to the deserialize function. Defaults to :obj:`None <python:None>`. :type deserialize_kwargs: :class:`dict <python:dict>` / :obj:`None <python:None>` :param kwargs: Any additional keyword arguments will be passed to :func:`declarative_base() <sqlathanor.declarative.declarative_base>` when generating the programmatic :class:`BaseModel <sqlathanor.declarative.BaseModel>`. :returns: :term:`Model class` whose structure matches ``serialized``. :rtype: :class:`BaseModel` :raises UnsupportedValueTypeError: when a value in ``serialized`` does not have a corresponding key in ``type_mapping`` :raises ValueError: if ``tablename`` is empty """ # pylint: disable=line-too-long if deserialize_kwargs: from_yaml = parse_yaml(serialized, deserialize_function = deserialize_function, **deserialize_kwargs) else: from_yaml = parse_yaml(serialized, deserialize_function = deserialize_function) if isinstance(from_yaml, list): from_yaml = from_yaml[0] generated_model = generate_model_from_dict(from_yaml, tablename, primary_key, cls = cls, serialization_config = serialization_config, skip_nested = skip_nested, default_to_str = default_to_str, type_mapping = type_mapping, base_model_attrs = base_model_attrs, **kwargs) return generated_model
901f1223e9eb59ebd31eaa4420c8c06ce28d9b46
3,631,208
import signal def stft_power(data, sf, window=2, step=.2, band=(1, 30), interp=True, norm=False): """Compute the pointwise power via STFT and interpolation. Parameters ---------- data : array_like Single-channel data. sf : float Sampling frequency of the data. window : int Window size in seconds for STFT. 2 or 4 seconds are usually a good default. Higher values = higher frequency resolution = lower time resolution. step : int Step in seconds for the STFT. A step of 0.2 second (200 ms) is usually a good default. * If ``step`` == 0, overlap at every sample (slowest) * If ``step`` == nperseg, no overlap (fastest) Higher values = higher precision = slower computation. band : tuple or None Broad band frequency range. Default is 1 to 30 Hz. interp : boolean If True, a cubic interpolation is performed to ensure that the output is the same size as the input (= pointwise power). norm : bool If True, return bandwise normalized band power, i.e. for each time point, the sum of power in all the frequency bins equals 1. Returns ------- f : :py:class:`numpy.ndarray` Frequency vector t : :py:class:`numpy.ndarray` Time vector Sxx : :py:class:`numpy.ndarray` Power in the specified frequency bins of shape (f, t) Notes ----- 2D Interpolation is done using :py:class:`scipy.interpolate.RectBivariateSpline` which is much faster than :py:class:`scipy.interpolate.interp2d` for a rectangular grid. The default is to use a bivariate spline with 3 degrees. """ # Safety check data = np.asarray(data) assert step <= window step = 1 / sf if step == 0 else step # Define STFT parameters nperseg = int(window * sf) noverlap = int(nperseg - (step * sf)) # Compute STFT and remove the last epoch f, t, Sxx = signal.stft(data, sf, nperseg=nperseg, noverlap=noverlap, detrend=False, padded=True) # Let's keep only the frequency of interest if band is not None: idx_band = np.logical_and(f >= band[0], f <= band[1]) f = f[idx_band] Sxx = Sxx[idx_band, :] # Compute power Sxx = np.square(np.abs(Sxx)) # Interpolate if interp: func = RectBivariateSpline(f, t, Sxx) t = np.arange(data.size) / sf Sxx = func(f, t) if norm: sum_pow = Sxx.sum(0).reshape(1, -1) np.divide(Sxx, sum_pow, out=Sxx) return f, t, Sxx
25533d6bc9f353f12ef63a093d284ea4c81ffe17
3,631,209
from opf_python.universal import get_HNF_diagonals def base_mono_29_30(n): """Finds the symmetry preserving HNFs for the base centered monoclinic lattices with a determinant of n. Assuming for basis 29 A = [[-0.666125, 1.16613 , 2.04852 ], [ 1. , 1. , 0. ], [ 1.61803 , -0.618034, 1. ]], for basis 30 A = [[ 1. , 1. , 0. ], [ 1.61803 , -0.618034 , 1. ], [-0.0361373, 0.536137 , 2.38982 ]]. Args: n (int): The determinant of the HNFs. Returns: spHNFs (list of lists): The symmetry preserving HNFs. """ diags = get_HNF_diagonals(n) spHNFs = [] for diag in diags: a = diag[0] c = diag[1] f = diag[2] if f%c==0: for e in range(0,f,c): g21 = 2*e+e*e/float(c) if g21%f==0: for d in range(0,f,c): g11 = 2*d+d*e/float(c) if g11%f==0: for b in range(c): HNF = [[a,0,0],[b,c,0],[d,e,f]] spHNFs.append(HNF) return spHNFs
a3ed438b9cf4c60523698838fe90d628f8dc4aac
3,631,210
import argparse def create_parser(): """ Create the CLI parser :returns: a parser with subparsers: init, populate, update & query -------- """ parser = argparse.ArgumentParser(description=__doc__, epilog=epi) parser.add_argument('-v', '--verbose', action='store_true', default=False, help='verbose output') subparsers = parser.add_subparsers(help='Available commands:') init_p = subparsers.add_parser('init', help='Initialise a DB') init_p.add_argument('--force', action='store_true', default=False, help=('Reinitilise a database & tables even if ' 'already exists')) populate_p = subparsers.add_parser('populate', help=('Populates a database with ' 'results of an experiment')) populate_p.add_argument('run_type', action='store', choices=('qc', 'mapping', 'assembly', 'ordering', 'annotation'), help=('Populate the database with data from the ' 'given pipeline step')) populate_p.add_argument('run_path', action='store', help=('Full path to a directory containing ' 'finished experiments from a pipeline run')) update_p = subparsers.add_parser('update', help=('Updates a database ' 'with results from a ' 'new experiment')) update_p.add_argument('run_type', action='store', choices=('qc', 'mapping', 'assembly', 'ordering', 'annotation'), help=('Populate the database with data from the ' 'given pipeline step')) update_p.add_argument('run_path', action='store', help=('Full path to a directory containing finished ' 'experiments from a pipeline run')) query_p = subparsers.add_parser('query', help=('List available or provide ' 'database query functions')) query_p.add_argument('-l', '--list', action='store_true', default=False, help='List the pre-defined queries') query_p.add_argument('-r', '--ReQL', action='store', default='', help='A ReQL statement') init_p.set_defaults(func=init_database_with_default_tables) populate_p.set_defaults(func=populate_database_with_data) update_p.set_defaults(func=updateDB) query_p.set_defaults(func=db_query) return parser
5dfd5f55ca1e372af7f823253beb64f198eec3f2
3,631,211
from ._common import connections def _write_conne(parameters): """Write CONNE block data.""" # Reorder connections if parameters["connections_order"] is not None: order = parameters["connections_order"] else: order = parameters["connections"].keys() # Format label_length = len(max(parameters["connections"], key=len)) // 2 fmt = block_to_format["CONNE"] fmt = str2format(fmt[label_length]) out = [] for k in order: data = deepcopy(connections) data.update(parameters["connections"][k]) values = [ k, data["nseq"], data["nadd"][0] if data["nadd"] is not None else None, data["nadd"][1] if data["nadd"] is not None else None, data["permeability_direction"], data["nodal_distances"][0], data["nodal_distances"][1], data["interface_area"], data["gravity_cosine_angle"], data["radiant_emittance_factor"], ] out += write_record(values, fmt) return out
2cdd27db72f98c7265273aa90ba1968102ff0e0f
3,631,212
def objects_to_coordinate_arrays(posobjs,coords='auto',degrees=True): """ converts a sequence of position objects into an array of coordinates. `coords` determines the order of the output coordinates - it can be a comma-seperated list of coordinate names or if 'auto', it will be 'lat,long' for all coordinate systems except for Equatorial, which will use 'ra,dec' if `degrees` is True, returned arrays are in degrees, otherwise radians """ if coords=='auto': coordnames = None else: coordnames = coords.split(',') coords = [] if degrees: for o in posobjs: if coordnames is None: if isinstance(o,EquatorialCoordinates): coords.append((o.ra.d,o.dec.d)) else: coords.append((o.lat.d,o.long.d)) else: coords.append([getattr(o,c).d for c in coordnames]) else: for o in posobjs: if coordnames is None: if isinstance(o,EquatorialCoordinates): coords.append((o.ra.r,o.dec.r)) else: coords.append((o.lat.r,o.long.r)) else: coords.append([getattr(o,c).r for c in coordnames]) return np.array(coords).T
0b9e4a0bd2a253242e7c88a8119010f94d2294a5
3,631,213
def primset_var(*prims): """Create a variable that matches a Primitive node.""" return var(lambda node: is_constant(node) and node.value in prims)
d8ef25d456052326d338f7ca8ef9a6b55d070709
3,631,214
def flatten_basis_data(basis): """ Takes in a dictionary of basis set info and flattens all primitive data into vectors. """ nshells = len(basis) coeffs = [] exps = [] atoms = [] ams = [] indices = [] dims = [] # Smush primitive data together into vectors nbf = 0 for i in range(nshells): tmp_coeffs = basis[i]['coef'] tmp_exps = basis[i]['exp'] nbf += basis[i]['idx_stride'] for j in tmp_coeffs: coeffs.append(j) atoms.append(basis[i]['atom']) ams.append(basis[i]['am']) indices.append(basis[i]['idx']) dims.append(basis[i]['idx_stride']) for j in tmp_exps: exps.append(j) coeffs = jnp.array(np.asarray(coeffs)) exps = jnp.array(np.asarray(exps)) atoms = jnp.array(np.asarray(atoms)) ams = jnp.array(np.asarray(ams)) indices = jnp.array(np.asarray(indices)) dims = jnp.array(np.asarray(dims)) return coeffs, exps, atoms, ams, indices, dims
83929bf7237caa1fca0db8e47f5674c2729ed8bc
3,631,215
def generate_spherical_1D_filter(size): """ Generate a discrete circle-shaped 1D filter of odd size Return a list of length size Le filtre en forme de demi-cercle a été choisi car il présente une forme de plateau """ assert size%2==1 x = np.linspace(-1,1,num = size+2)[1:-1] y = np.sqrt(1-np.square(x)) return y
551dbab78eb18297085e2835368b8618de0670e2
3,631,216
def send_email_with_rate_control( user: User, alert_type: str, to_email: str, subject, plaintext, html=None, max_nb_alert=MAX_ALERT_24H, nb_day=1, ) -> bool: """Same as send_email with rate control over alert_type. Make sure no more than `max_nb_alert` emails are sent over the period of `nb_day` days Return true if the email is sent, otherwise False """ to_email = to_email.lower().strip() min_dt = arrow.now().shift(days=-1 * nb_day) nb_alert = ( SentAlert.query.filter_by(alert_type=alert_type, to_email=to_email) .filter(SentAlert.created_at > min_dt) .count() ) if nb_alert >= max_nb_alert: LOG.warning( "%s emails were sent to %s in the last %s days, alert type %s", nb_alert, to_email, nb_day, alert_type, ) return False SentAlert.create(user_id=user.id, alert_type=alert_type, to_email=to_email) db.session.commit() send_email(to_email, subject, plaintext, html) return True
94abf02d10bbf49ac59a8a0ebc1ac1f258911eef
3,631,217
def equatorial_to_ecliptic(ra, dec): """ translate from equatorial ra & dec to ecliptic ones """ sc = SkyCoord(ra, dec, unit='deg', frame='icrs', obstime='J2000') \ .transform_to('barycentrictrueecliptic') return sc.lat.value, sc.lon.value
ba12d078e47aeefc3a6c3c7df0919bbf81e5e68b
3,631,218
def get_decode_dir_name(ckpt_name): """Make a descriptive name for the decode dir, including the name of the checkpoint we use to decode. This is called in single_pass mode.""" if "train" in FLAGS.data_path: dataset = "train" elif "val" in FLAGS.data_path: dataset = "val" elif "test" in FLAGS.data_path: dataset = "test" else: raise ValueError("FLAGS.data_path %s should contain one of train, val or test" % (FLAGS.data_path)) dirname = "decode_%s_%imaxenc_%ibeam_%imindec_%imaxdec" % (dataset, FLAGS.max_enc_steps, FLAGS.beam_size, FLAGS.min_dec_steps, FLAGS.max_dec_steps) if ckpt_name is not None: dirname += "_%s" % ckpt_name return dirname
8d0f7283ae4342dfd6e3d017c91d7a6a1bc1f11a
3,631,219
from xone import calendar def trade_day(dt, cal='US'): """ Latest trading day w.r.t given dt Args: dt: date of reference cal: trading calendar Returns: pd.Timestamp: last trading day Examples: >>> trade_day('2018-12-25', cal='US').strftime('%Y-%m-%d') '2018-12-24' """ dt = pd.Timestamp(dt).date() return calendar.trading_dates(start=dt - pd.Timedelta('10D'), end=dt, cal=cal)[-1]
bd9fc3e1262f6bff1d7945b140f64f5d7b369bbe
3,631,220
def read_cclist(self): """ Read cross-correlation data from file 'cclist.dat'. Parameters: ----------- self (saes_core), an instance of the saes_core class. Returns: --------- evid1 evid2 sta_cc cc_val evdict """ evid1,evid2 = None,None data = np.genfromtxt(self.maindir+'/input/cclist.dat',skip_header=1,dtype=['U10','U24','U24','f8']) evid1 = [data[i][1] for i in range(len(data))] evid2 = [data[i][2] for i in range(len(data))] sta_cc = [data[i][0] for i in range(len(data))] cc_val = [data[i][3] for i in range(len(data))] evdict = {} for i,j,k,l in zip(evid1,evid2,sta_cc,cc_val): if i not in evdict.keys(): evdict[i] = {} if j not in evdict[i].keys(): evdict[i][j] = [] evdict[i][j].append([k,l]) self.evdict = evdict return evid1
2a5c765b226fdc00c54df673a08c49be471a3828
3,631,221
def witchingHours(symbol="", **kwargs): """This is when option contracts and futures contracts expire on the exact same day. https://iexcloud.io/docs/api/#witching-hours Args: symbol (str): symbol to use """ return _base(id="PREMIUM_WALLSTREETHORIZON_WITCHING_HOURS", symbol=symbol, **kwargs)
ae779cc2c9f000cb7a7e394cc7aa190079db4deb
3,631,222
import requests import logging def is_downloadable(major: int, minor: int, patch: int) -> bool: """Test whether is a downloadable nuke version. Args: major (int): Major version minor (int): Minor version patch (int): Patch version Returns: bool: Test result """ version = f'{major}.{minor}v{patch}' url = ('https://thefoundry.s3.amazonaws.com/' f'products/nuke/releases/{version}/Nuke{version}-linux-x86-release-64.tgz') resp = requests.head(url) logging.getLogger(__name__).debug('testing download url: %s', version) return resp.status_code == 200
97e7f4c8dc86ac5b5c03c20f34f349800b679abc
3,631,223
def create_write_buffer(context, host): """Shorthand for creating a write-only buffer on the GPU.""" return ocl.Buffer(context, ocl.mem_flags.WRITE_ONLY, host.nbytes)
1efdcabb32dd12341bdc1a5b91ac851d9b77e20a
3,631,224
import struct import binascii def packet_read(serial_connection): """Read a packet from given serial connection. """ header = serial_connection.read(3) if len(header) != 3: print('error: failed to read packet header') return None, None command_type, payload_size = struct.unpack('>bH', header) if payload_size > 0: payload = serial_connection.read(payload_size) if len(payload) != payload_size: print('error: received {} bytes when expecting {}'.format( len(payload), payload_size)) print('error: payload:', binascii.hexlify(payload)) return None, None else: payload = b'' footer = serial_connection.read(2) if len(footer) != 2: print('error: failed to read packet footer') return None, None crc = struct.unpack('>H', footer)[0] if crc != crc_ccitt(header + payload): print('error: crc mismatch of received packet') return None, None return command_type, payload
6396cb748d4b2c72525d16d8983450634f643635
3,631,225
def is_nonnegative_length(G, l): """ Checks whether a length function, defined on the arcs, satisfies the non-negative condition. Args: G: An instance of Graph class. l: A dictionary that defines a length function on the edge set. Returns: A boolean, True if the length function satisfies the non-negativity condition, False in other case. """ assert G.directed # Condición de no negatividad for e in G.aristas: if l[e] < 0: return False return True
c99aaf07b65f9a192b6421b4b3ccf73c98917500
3,631,226
def prepare(): """ Configure environment for testing (like ansible playbook) """ tmt.steps.Prepare.enabled = True return 'prepare'
f3ca8a5914a6e4747cb3ea46cfbe8115c0331245
3,631,227
import os def get_verified_absolute_path(path): """Verify and return absolute path of argument. Args: path : Relative/absolute path Returns: Absolute path """ installed_path = os.path.abspath(path) if not os.path.exists(installed_path): raise RuntimeError("No valid path for requested component exists") return installed_path
2d7c6dcb6066c81b3506837534a72aa814e1faa6
3,631,228
def unflatten_tensor(input, feat_size, anchors): """ Un-flattens and un-permutes a tensor from size [B x (W x H) x C] --> [B x C x W x H] """ bsize = input.shape[0] if len(input.shape) >= 3: csize = input.shape[2] else: csize = 1 input = input.view(bsize, feat_size[0] * anchors.shape[0], feat_size[1], csize) input = input.permute(0, 3, 1, 2).contiguous() return input
9e7b603071312ea35fa214b3e5a6f586d652c760
3,631,229
def pct_change(df_or_series, periods=1, fill_method='pad', limit=None, freq=None, **kwargs): """ Percentage change between the current and a prior element. Computes the percentage change from the immediately previous row by default. This is useful in comparing the percentage of change in a time series of elements. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. fill_method : str, default 'pad' How to handle NAs before computing percent changes. limit : int, default None The number of consecutive NAs to fill before stopping. freq : DateOffset, timedelta, or str, optional Increment to use from time series API (e.g. 'M' or BDay()). **kwargs Additional keyword arguments are passed into `DataFrame.shift` or `Series.shift`. Returns ------- chg : Series or DataFrame The same type as the calling object. See Also -------- Series.diff : Compute the difference of two elements in a Series. DataFrame.diff : Compute the difference of two elements in a DataFrame. Series.shift : Shift the index by some number of periods. DataFrame.shift : Shift the index by some number of periods. Examples -------- **Series** >>> import mars.dataframe as md >>> s = md.Series([90, 91, 85]) >>> s.execute() 0 90 1 91 2 85 dtype: int64 >>> s.pct_change().execute() 0 NaN 1 0.011111 2 -0.065934 dtype: float64 >>> s.pct_change(periods=2).execute() 0 NaN 1 NaN 2 -0.055556 dtype: float64 See the percentage change in a Series where filling NAs with last valid observation forward to next valid. >>> s = md.Series([90, 91, None, 85]) >>> s.execute() 0 90.0 1 91.0 2 NaN 3 85.0 dtype: float64 >>> s.pct_change(fill_method='ffill').execute() 0 NaN 1 0.011111 2 0.000000 3 -0.065934 dtype: float64 **DataFrame** Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. >>> df = md.DataFrame({ ... 'FR': [4.0405, 4.0963, 4.3149], ... 'GR': [1.7246, 1.7482, 1.8519], ... 'IT': [804.74, 810.01, 860.13]}, ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df.execute() FR GR IT 1980-01-01 4.0405 1.7246 804.74 1980-02-01 4.0963 1.7482 810.01 1980-03-01 4.3149 1.8519 860.13 >>> df.pct_change().execute() FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 0.013810 0.013684 0.006549 1980-03-01 0.053365 0.059318 0.061876 Percentage of change in GOOG and APPL stock volume. Shows computing the percentage change between columns. >>> df = md.DataFrame({ ... '2016': [1769950, 30586265], ... '2015': [1500923, 40912316], ... '2014': [1371819, 41403351]}, ... index=['GOOG', 'APPL']) >>> df.execute() 2016 2015 2014 GOOG 1769950 1500923 1371819 APPL 30586265 40912316 41403351 >>> df.pct_change(axis='columns').execute() 2016 2015 2014 GOOG NaN -0.151997 -0.086016 APPL NaN 0.337604 0.012002 """ axis = validate_axis(kwargs.pop('axis', 0)) if fill_method is None: data = df_or_series else: data = df_or_series.fillna(method=fill_method, axis=axis, limit=limit) rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1 if freq is not None: # Shift method is implemented differently when freq is not None # We want to restore the original index rs = rs.loc[~rs.index.duplicated()] rs = rs.reindex_like(data) return rs
e4bc49bdaa2fb5ebf0919a68d685eef612819864
3,631,230
def get_percentage_from_total(total_serie, fraction_serie): """ Get which percentage is each element of a serie from the same element (same date) on another serie. """ return compute_time_series( [fraction_serie, total_serie], utils.get_percent)
eb08c429da50c3408c6cdb90ea24c67d9f099680
3,631,231
def get_isotope_data(isotope_string: str) -> dict: """Get the isotope's intrinsinc properties from a JSON data file.""" formatted_isotope_string = format_isotope_string(isotope_string) isotope_dict = dict(ISOTOPE_DATA[formatted_isotope_string]) isotope_dict.update({"isotope": formatted_isotope_string}) return isotope_dict
08c5271e49db49f5cef45eb539797db8a2575eeb
3,631,232
def resample_bins(xb, yb, min_bin=10, beta=0.5): """ Do a bootstrap resample within the len(xb) bins in the list yb. Only resample if there are at least min_bin elements in a bin, otherwise reject the entire bin with probability (1-beta). """ xb = np.asarray(xb) bin_size = np.array(map(len, yb)) b_mask = bin_size >= min_bin yb_r = [y_[np.random.randint(0, len(y_), len(y_))] for y_, b in zip(yb, b_mask) if b] yb_sm = [y_ for y_, b in zip(yb, b_mask) if not b] xb_r = xb[b_mask] keep_small = np.random.rand(len(xb) - b_mask.sum()) < beta xb_r = np.r_[xb_r, xb[~b_mask][keep_small]] yb_r.extend([y_ for y_, k in zip(yb_sm, keep_small) if k]) return xb_r, yb_r
f9aa729266562332e5a5b67beb0e218a20b65733
3,631,233
from typing import Dict from typing import Any import logging def _run_optimizers( product_batch: Dict[str, Any], language: str, country: str, currency: str, cached_optimizers: optimizer_cache.OptimizerCache, ) -> (Dict[str, Any], Dict[str, optimization_result.OptimizationResult]): """Transforms a JSON payload of product data using optimizers. Args: product_batch: A batch of product data. language: The language to use for this request. country: The country to use for this request. currency: The currency to use for this request. cached_optimizers: A cache of optimizer classes. Returns: The optimized product batch: Dict[str, Any] The results of each optimizer run: Dict[str, optimization_result.OptimizationResult] """ optimization_results = {} mined_attributes = _get_mined_attributes( product_batch, language, country) if _mined_attributes_required() else {} optimizers = [ optimizer_class(mined_attributes) for optimizer_class in cached_optimizers.optimizer_classes ] optimizer_parameters = [ optimizer.optimizer_parameter for optimizer in optimizers ] optimizer_mapping = dict(zip(optimizer_parameters, optimizers)) for optimizer_parameter in _generate_optimizer_parameter_list_to_run( optimizer_parameters_to_run_last=_OPTIMIZERS_TO_RUN_LAST): optimizer = optimizer_mapping.get(optimizer_parameter) if optimizer: logging.info( 'Running optimization %s with language %s, country %s, currency %s', optimizer_parameter, language, country, currency) product_batch, result = optimizer.process( product_batch, language, country, currency) optimization_results[optimizer_parameter] = result return product_batch, optimization_results
e2346bd670099e86787d2b8690d7294a5da9f7d2
3,631,234
import curses import math def get_colour_depth(): """ Returns the maximum number of possible values per color channel, that can be used with the availible number of colours and colour pairs in the terminal. """ nr_colours = curses.COLORS return int(math.pow(nr_colours, 1. / 3.))
3bd47ee65a7db72d87ac7cc965a43e37724e148a
3,631,235
def find_best_hand(*args): """Takes some cards and produces the best possible hand. Parameters ---------- args : Multiple 'Card' type. These cards form the sample of cards to use to attempt to make the best hand possible. Returns ------- TODO """ assert len(args) >= 6, "Less than 6 cards provided." combos = it.combinations(args, 5) # Look at first combo best_hand = next(combos) best_score = classify_hand(*best_hand) # Search for better combos for c in combos: cur_score = classify_hand(*c) if cur_score > best_score: best_hand, best_score = c, cur_score return best_hand, best_score
ba6cc2d190d06783210031124b2f92aeef0319ce
3,631,236
def google_translate(request): """Get translation from Google machine translation service.""" try: text = request.GET["text"] locale_code = request.GET["locale"] if not locale_code: raise ValueError("Locale code is empty") except (MultiValueDictKeyError, ValueError) as e: return JsonResponse( {"status": False, "message": "Bad Request: {error}".format(error=e)}, status=400, ) data = get_google_translate_data(text, locale_code) if not data["status"]: return JsonResponse(data, status=400) return JsonResponse(data)
03d5746ac0eddb953d0f02fd78f6883c6c0809c9
3,631,237
import re def parseLemma(lines): """ :param lines: :param normalize: :return: # ToDo: Fix issue with Caeres2=Cāeres|miles|Cāerĭt,Cāerĭtēt||ĭtis, (-ētis), f.|2 Caerēs=Cāerēs|diues|Cāerĭt||ĭtis|2 """ lemmas = {} lemma_without_variations = re.compile( r"^(?P<lemma>\w+\d?){1}(?:\=(?P<quantity>[\w,]+))?\|" r"(?P<model>\w+)?\|" r"[-]*(?P<geninf>[\w,]+)?[-]*\|" r"[-]*(?P<perf>[\w,]+)?[-]*\|" r"(?P<lexicon>.*)?", flags=re.UNICODE ) for lineno, line in enumerate(lines): if not line.startswith("!") and "|" in line: if line.count("|") != 4: # We need to clean up the mess # Some line lacks a | # I assume this means we need;ĭbŭs to add as many before the dictionary should_have = 4 missing = should_have - line.count("|") last_one = line.rfind("|") line = line[:last_one] + "|" * missing + line[last_one:] result = lemma_without_variations.match(line) if result: result = result.groupdict(default=None) # we always normalize the key lemmas[normalize_unicode(result["lemma"])] = result else: print("Unable to parse lemma", line) return lemmas
c45f80badb3f3d2a2d04777bec304608269f28d4
3,631,238
from typing import Optional import re def get_genre_regexp(ctx: Context, actor: Actor) -> Optional[str]: """ Extract the genre from user request if present Use adapter to insert as a condition or a processing function """ last_request = ctx.last_request for key in GENRE_DICT.keys(): if re.search(GENRE_DICT[key], last_request): return key return None
3d15c7babdb76be6ae63970df59366cbe4ebeaef
3,631,239
def mapper(request_id, bucket, prefix): """Get instructions on how to process an input zarr store by chunk. For a zarr store at s3://bucket/prefix, return a list of row boundaries that correspond to chunks in the store. Assumes that the s3 path is readable by an anonymous client. """ s3_path = f"{bucket}/{prefix}" print(f"Opening path {s3_path}") root = open_zarr(s3_path, anon=True) chunk_rows = root.data.chunks[0] nchunks = root.data.nchunks print(f"ChunkRows {chunk_rows} nchunks {nchunks}") return [{"bucket": bucket, "prefix": prefix, "start_row": n*chunk_rows, "num_rows": chunk_rows} for n in range(nchunks)]
e025a26521a53c8b9aec7410a63ae9c9875123c8
3,631,240
from typing import Optional from typing import Union def byteify(data: Optional[Union[str, bytes]], encoding='utf-8', if_none=None) -> bytes: """ Convert a piece of data into bytes if it isn't already:: >>> byteify("hello world") b"hello world" By default, if ``data`` is ``None``, then a :class:`TypeError` will be raised by :func:`bytes`. If you'd rather convert ``None`` into a blank bytes string, use ``if_node=""``, like so:: >>> byteify(None) TypeError: encoding without a string argument >>> byteify(None, if_none="") b'' """ if data is None and if_none is not None: return bytes(if_none, encoding) if type(if_none) is not bytes else if_none return bytes(data, encoding) if type(data) is not bytes else data
aac62c4925ab204386d4fcfb927972fbc47c974b
3,631,241
def vote_details(request, id): """ Details for a vote. Parameters: id -- the id of the `Vote`. """ vote = Vote.objects.filter(uid=id) if not vote: return {} data = { 'id': vote.id, 'author_id': vote.author.id, 'author': vote.author.name, 'post_id': vote.post.id, 'type': vote.get_type_display(), 'type_id': vote.type, 'date': util.datetime_to_iso(vote.date), } return data
f8066bc8f94308b2df2e0acc7d4703bd8937b48a
3,631,242
def get_p_vals(location_median_results, author_gender_median_results, date_median_results): """ Takes results from **results_by_location(results, 'median')**, **results_by_author_gender**, **results_by_date**. ANOVA test for independence of: - male vs female authors' median distance between female instances - UK vs. US vs. other country authors' median distance between female instances - Date ranges authors' median distance between female instances :param location_median_results: result of **results_by_location(results, 'median')** :param author_gender_median_results: result of **results_by_author_gender(results, 'median)** :param date_median_results: result of **results_by_date(results, 'median')** :return: data-frame with 3 p-values, one for each category comparison """ r1 = location_median_results r2 = author_gender_median_results r3 = date_median_results names = ["location", "male_vs_female_authors", "date"] # median_distance_between_female_pronouns_pvals = [] location_medians = [] author_gender_medians = [] date_medians = [] med = [location_medians, author_gender_medians, date_medians] res = [r1, r2, r3] for r in range(0, 3): for key in list(res[r].keys()): medians = [] for el in list(res[r][key]): medians.append(el[1]) med[r].append(medians) _, location_pval = stats.f_oneway(location_medians[0], location_medians[1]) _, author_gender_pval = stats.f_oneway(author_gender_medians[0], author_gender_medians[1]) _, date_pval = stats.f_oneway(*date_medians) median_distance_between_female_pronouns_pvals = [location_pval, author_gender_pval, date_pval] return pnds.DataFrame({"names": names, "pvals": median_distance_between_female_pronouns_pvals})
8bd4f9ef60891f2767c79f28a9d51bcff72d2851
3,631,243
from shutil import copyfile def copyfiles(filelist, dest, copy=False): """Copy or symlink files in ``filelist`` to ``dest`` directory. Parameters ---------- filelist : list List of files to copy. dest : path/files full path to destination. If it is a list of length greater than 1, then it assumes that these are the names of the new files. copy : Bool specifies whether to copy or symlink files (default=False) but only for posix systems Returns ------- None """ outfiles = filename_to_list(dest) newfiles = [] for i,f in enumerate(filename_to_list(filelist)): if isinstance(f, list): newfiles.insert(i, copyfiles(f, dest, copy=copy)) else: if len(outfiles) > 1: destfile = outfiles[i] else: destfile = fname_presuffix(f, newpath=outfiles[0]) copyfile(f,destfile,copy) newfiles.insert(i,destfile) return newfiles
1e05b1ff194babbe7f32ec59dfb0d426656caec4
3,631,244
def ancestors(repo, subset, x): """Changesets that are ancestors of changesets in set, including the given changesets themselves. If depth is specified, the result only includes changesets up to the specified generation. """ # startdepth is for internal use only until we can decide the UI args = getargsdict(x, 'ancestors', 'set depth startdepth') if 'set' not in args: # i18n: "ancestors" is a keyword raise error.ParseError(_('ancestors takes at least 1 argument')) startdepth = stopdepth = None if 'startdepth' in args: n = getinteger(args['startdepth'], "ancestors expects an integer startdepth") if n < 0: raise error.ParseError("negative startdepth") startdepth = n if 'depth' in args: # i18n: "ancestors" is a keyword n = getinteger(args['depth'], _("ancestors expects an integer depth")) if n < 0: raise error.ParseError(_("negative depth")) stopdepth = n + 1 return _ancestors(repo, subset, args['set'], startdepth=startdepth, stopdepth=stopdepth)
06642e762d040a49600084db94ea36c6c1cec0e7
3,631,245
def mock_down_payment_time_with_raise_closure(option = 1): """ @fn mock_down_payment_time_with_raise_closure """ def mock_down_payment_time_with_raise_input(input_prompt): if "annual salary" in input_prompt.lower(): return down_payment_time_with_raise_test_values( option).annual_salary if "percent of your salary to save" in input_prompt.lower(): return down_payment_time_with_raise_test_values( option).portion_saved if "cost of your dream home" in input_prompt.lower(): return down_payment_time_with_raise_test_values( option).total_cost # if "semi-annual salary raise" in input_prompt.lower(): if "percent raise every 6 months" in input_prompt.lower(): return down_payment_time_with_raise_test_values( option).semi_annual_raise # Should not reach here - should be an error. return None return mock_down_payment_time_with_raise_input
669f7dc5b9945d0d153d908f63b6ea7c0d0ec332
3,631,246
def hist_intersection(histA, histB): """ Calcuates the intersection of two histograms. If two normalised histograms are the same then the sum of the intersection will be one. Assumes histograms are normalised. Parameters ---------- histA: 1D numpy array normalised array where the sum of elements equals 1 histB: 1D numpy array normalised array where the sum of elements equals 1 Returns ------- similarity: number Range 0-1. With similar -> 1 """ if histA == None or histB == None: return 0 if len(histA) != len(histB): # Histogram same size return 0 if histA.ndim != 1: # Must be single dimension histogrsm return 0 return np.sum([min(a,b) for (a,b) in zip(histA,histB)])
029c2d81a80d9ce89b5fd383fe1db1b8e40cf0ba
3,631,247
import six import traceback def failure_format_traceback(fail): """ :param fail: must be an IFailedFuture returns a string """ try: f = six.StringIO() traceback.print_exception( fail._type, fail.value, fail._traceback, file=f, ) return f.getvalue() except Exception: return u"Failed to format failure traceback for '{0}'".format(fail)
fdcbdf9f7617f401d511c9ce9b58420367419250
3,631,248
def ConvT3D(parent, filters, kernel_size, strides=[1, 1, 1], padding="same", use_bias=True, groups=1, dilation_rate=[1, 1, 1], name=""): """\ 3D Transposed convolution layer (sometimes called deconvolution). The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution. :param parent: parent layer :param filters: dimensionality of the output space (i.e., the number of output filters in the convolution) :param kernel_size: the depth, height and width of the 3D convolution window :param strides: the strides of the convolution along the depth, height and width dimensions :param padding: one of "valid" or "same" :param use_bias: whether the layer uses a bias vector :param dilation_rate: the dilation rate to use for dilated convolution. Spacing between kernel elements :param name: name of the output layer :return: ConvT3D layer """ return _eddl.ConvT3D(parent, filters, kernel_size, strides, padding, use_bias, groups, dilation_rate, name)
e56980ec2a09c16d4392eca96ad0156e4c17fc1d
3,631,249
def bytes_to_int(b: bytes) -> int: """ Convert bytes to a big-endian unsigned int. :param b: The bytes to be converted. :return: The int. """ return int.from_bytes(bytes=b, byteorder='big', signed=False)
eb08ae0b2663047557b8f102c6c6ed565aae8044
3,631,250
import argparse def parse_arguments() -> argparse.Namespace: """Parse arguments provided by the command-line :return: list of decoded arguments """ parser = argparse.ArgumentParser(description=__doc__) pa = parser.add_argument pa('filename', type=str, help='input contents filename') pa('-p', '--part', type=int, help='solve only the given part') pa('-v', '--verbose', action='count', default=0) arguments = parser.parse_args() return arguments
637775c9c4950a07ff5ed3391db76c0502ef3068
3,631,251
def train_step(sess, dataset, sequence_number, model, parameters): """ Train. """ # Perform one iteration token_indices_sequence = dataset.token_indices['train'][sequence_number] for i, token_index in enumerate(token_indices_sequence): if token_index in dataset.infrequent_token_indices and np.random.uniform() < 0.5: token_indices_sequence[i] = dataset.UNK_TOKEN_INDEX feed_dict = { model.input_token_indices: token_indices_sequence, model.input_label_indices_vector: dataset.label_vector_indices['train'][sequence_number], model.input_token_character_indices: dataset.character_indices_padded['train'][sequence_number], model.input_token_lengths: dataset.token_lengths['train'][sequence_number], model.input_label_indices_flat: dataset.label_indices['train'][sequence_number], model.dropout_keep_prob: 1-parameters['dropout_rate']} _, _, loss, accuracy, transition_params_trained = sess.run( [model.train_op, model.global_step, model.loss, model.accuracy, model.transition_parameters],feed_dict) return transition_params_trained
bd5b1aef942b585d0ecaad3b1f90ad1b02a21fb4
3,631,252
import numpy def cos_distance_numpy_vector(v1, v2): """get cos angle (similarity) between two vectors""" d1 = numpy.sum(v1 * v1) d1 = numpy.sqrt(d1) # magnitude of v1 d2 = numpy.sum(v2 * v2) d2 = numpy.sqrt(d2) # magnitude of v2 n1 = v1 / d1 n2 = v2 / d2 return numpy.sum(n1 * n2)
fdbc02c5cba377c561843dd57e9ca13a2e9c6960
3,631,253
import os def send_smart_contract_thresholds(request, pk): """ Create a smartcontract file on disk and transmit it. """ try: print("IN SEND_SAMART_COTNRA") templ = Template.objects.get(id=pk) print("IN SEND_SAMART_COTNRA2") temp_name = str(slugify( templ.template_name)) all_thres=Threshold.objects.filter(template=templ) device = Device.objects.get(id=Template.objects.get(id=pk).device_id) print("IN SEND_SAMART_COTNRA3" + str(device)) mets_quarry = device.metrics.all() path = create_new_smart_contract_with_thresholds(template_name=temp_name, thresholds=all_thres) test_file = FileWrapper(open(path, 'rb')) print("IN SEND_SAMART_COTNRA2") response = HttpResponse(test_file, content_type='text/plain') response['Content-Disposition'] = r'attachment; filename=MyPollutionMonitoringContract.sol' os.remove(path) return response except: pass
7ff6b827dd7d83f2e4e7440b1252ce0322beb7a8
3,631,254
def list_all_submodules(package): """ List all the modules in this package with their fully qualified names.""" root_modname = package.__name__ # if the module is not a package do nothing, we check this by the # presence of the __path__ attribute which only packages have if not hasattr(package, "__path__"): return [root_modname] submod_list = [] for _, submod_basename, ispkg in iter_modules(package.__path__): submod_fqname = "{}.{}".format(root_modname, submod_basename) submod = import_module(submod_fqname) # if this module is a package, we recursively search for more # modules and add those to the submodule list as a dictionary if ispkg: submod_list.extend(list_all_submodules(submod)) # otherwise we just add the string basename for this non-package module # to the submodule list else: submod_list.append(submod_fqname) # return all the modules as one flattened list return [root_modname] + submod_list
81c22be084ebbba6bd300d95e52642b9b33954bc
3,631,255
def int_greater_than(x, inclusive: bool = False): """Creates property that must be an int greater than (or equal to) some value Parameters: x: Value that the property must be greater than inclusive (bool): If set to True, includes x as a possibility. Returns: property """ return greater_than(x, inclusive=inclusive, type_constraint=int)
8b8a74fe09590d90a77fda2d932a4b3b2905dbcc
3,631,256
from typing import Optional async def get_Sales_with_date_with_login(q: Query, start_date: Optional[str] = "", end_date: Optional[str] = ""): """ Get the sales of a particular day """ try: tkit = Toolkit(shop_url=q.shop_url, api_secret=q.api_secret) results = tkit.getSales(start_date, end_date) return results except Exception as msg: raise HTTPException(status_code=500, detail=repr(msg))
f83fc10fe5fbfaec6805c9a2079217b1e64c6c1e
3,631,257
def cell_to_se2_batch(cell_idx, mapmin, mapres): """ Coversion for Batch input : cell_idx = [batch_size, 2] OUTPUT: [batch_size, 2] """ return (cell_idx[:,0] + 0.5) * mapres[0] + mapmin[0], (cell_idx[:,1] + 0.5) * mapres[1] + mapmin[1]
72a91b5b3a90014322ad8754848e5f85751d3b3a
3,631,258
def merge(elems, field = None, **kwargs): """ merge the fields for all elements in a list return it as a single element. Parameters ----------- elems : list. a list of element object kwargs: dict. other properties of the new element. Examples ---------- >>> bpm = getElements('BPM') >>> vpar = { 'virtual': 1, 'name': 'VBPM' } >>> vbpm = merge(bpm, **vpar) Notes ------ It does not merge the unit conversion. All raw unit. seealso :class:`CaElement` """ # count 'field' owners and its rb,wb PVs. count, pvdict = {}, {} for e in elems: fds = e.fields() for f in fds: if f in count: count[f] += 1 else: count[f] = 1 pvrb = e.pv(field=f, handle='readback') pvsp = e.pv(field=f, handle='setpoint') if f not in pvdict: pvdict[f] = [[], []] #print f, pvrb, pvsp pvdict[f][0].extend(pvrb) pvdict[f][1].extend(pvsp) elem = CaElement(**kwargs) #print "merged:", elem # consider only the common fields if field is None: for k,v in count.iteritems(): if v < len(elems): _logger.warn("field '%s' has %d < %d" % (k, v, len(elems))) pvdict.pop(k) #print pvdict.keys() for fld,pvs in pvdict.iteritems(): if len(pvs[0]) > 0: elem.setGetAction(pvs[0], fld, None, '') if len(pvs[1]) > 0: elem.setPutAction(pvs[1], fld, None, '') elem.sb = [e.sb for e in elems] elem.se = [e.se for e in elems] elem._name = [e.name for e in elems] elif field in pvdict: pvrb, pvsp = pvdict[field][0], pvdict[field][1] if len(pvrb) > 0: elem.setGetAction(pvrb, field, None, '') if len(pvsp) > 0: elem.setPutAction(pvsp, field, None, '') # count the element who has the field elemgrp = [e for e in elems if field in e.fields()] elem.sb = [e.sb for e in elemgrp] elem.se = [e.se for e in elemgrp] elem._name = [e.name for e in elemgrp] #print pvsp else: _logger.warn("no pv merged for {0}".format([ e.name for e in elems])) # if all raw units are the same, so are the merged element for fld in elem.fields(): units = sorted([e.getUnit(fld, unitsys=None) for e in elems if fld in e.fields()]) if units[0] == units[-1]: elem.setUnit(fld, units[0], unitsys=None) return elem
0801709d9af67963ab0223f502513bf9825e44f1
3,631,259
def add_answerset(m_json, mid=None, **kwargs): """Add answerset.""" if mid is None: mid = str(uuid4()) with session_scope() as session: aset = Answerset(m_json, id=mid, **kwargs) session.add(aset) return mid
8bf0d9cc9ef0636a465af67eb26a9d0fe446f3ec
3,631,260
def is_reserved(ips): """Indicates whether each address is reserved. *** Addresses must be IPv4. IPv6 not yet supported. *** """ res = cudf.Series(rmm.device_array(len(ips), dtype="bool")) ptr = res.data.mem.device_ctypes_pointer.value reserved_ipv4_REGEX = r"^(2(4[0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))$" ips.str.match(reserved_ipv4_REGEX, devptr=ptr) return res
d40a0092268eee5c6a8d6b22e0b6de9b1ebd8132
3,631,261
def split_last_dimension(x, n): """Reshape x so that the last dimension becomes two dimensions. The first of these two dimensions is n. Parameters ---------- x A Tensor with shape [..., m] n: int An integer. Returns ------- y A Tensor with shape [..., n, m/n] """ x_shape = shape_list(x) m = x_shape[-1] if isinstance(m, int) and isinstance(n, int): assert m % n == 0 return tf.reshape(x, x_shape[:-1] + [n, m // n])
9909db0fda7cc5ee0666b910459c598741d5c6e3
3,631,262
from typing import Tuple async def authenticate_user( request: web.Request, for_password_modification=False ) -> Tuple[User, Claims]: """Multiple schemes authentication using request Authorization header. Raises HTTPUnauthorized on failure. """ if not request.headers.get("Authorization"): raise web.HTTPUnauthorized(reason="Missing authorization header") scheme, value = request.headers["Authorization"].strip().split(" ", 1) if scheme == "Basic": return await _basic_authentication(value, request.app["identity_backend"]) if scheme == "Bearer": return await _jwt_authentication( value, request.app["identity_backend"], request.app["settings"]["jwt"]["public_key"], for_password_modification=for_password_modification, ) raise web.HTTPUnauthorized(reason="Bad authorization")
72b0daa26f4b4c8e589223313c6fbba3e348decf
3,631,263
def nest(collection, *properties): """This method is like :func:`group_by` except that it supports nested grouping by multiple string `properties`. If only a single key is given, it is like calling ``group_by(collection, prop)``. Args: collection (list|dict): Collection to iterate over. *properties (str): Properties to nest by. Returns: dict: Results of nested grouping by `properties`. Example: >>> results = nest([{'shape': 'square', 'color': 'red', 'qty': 5},\ {'shape': 'square', 'color': 'blue', 'qty': 10},\ {'shape': 'square', 'color': 'orange', 'qty': 5},\ {'shape': 'circle', 'color': 'yellow', 'qty': 5},\ {'shape': 'circle', 'color': 'pink', 'qty': 10},\ {'shape': 'oval', 'color': 'purple', 'qty': 5}],\ 'shape', 'qty') >>> expected = {\ 'square': {5: [{'shape': 'square', 'color': 'red', 'qty': 5},\ {'shape': 'square', 'color': 'orange', 'qty': 5}],\ 10: [{'shape': 'square', 'color': 'blue', 'qty': 10}]},\ 'circle': {5: [{'shape': 'circle', 'color': 'yellow', 'qty': 5}],\ 10: [{'shape': 'circle', 'color': 'pink', 'qty': 10}]},\ 'oval': {5: [{'shape': 'oval', 'color': 'purple', 'qty': 5}]}} >>> results == expected True .. versionadded:: 4.3.0 """ if not properties: return collection properties = pyd.flatten(properties) first, rest = properties[0], properties[1:] return pyd.map_values(group_by(collection, first), lambda value: nest(value, *rest))
ca66ca29c9a33674d8c8bb3dc1937cb1e6018a2d
3,631,264
def pyav_decode_stream( container, start_pts, end_pts, stream, stream_name, buffer_size=0 ): """ Decode the video with PyAV decoder. Args: container (container): PyAV container. start_pts (int): the starting Presentation TimeStamp to fetch the video frames. end_pts (int): the ending Presentation TimeStamp of the decoded frames. stream (stream): PyAV stream. stream_name (dict): a dictionary of streams. For example, {"video": 0} means video stream at stream index 0. buffer_size (int): number of additional frames to decode beyond end_pts. Returns: result (list): list of frames decoded. max_pts (int): max Presentation TimeStamp of the video sequence. """ # Seeking in the stream is imprecise. Thus, seek to an ealier PTS by a # margin pts. margin = 1024 seek_offset = max(start_pts - margin, 0) container.seek(seek_offset, any_frame=False, backward=True, stream=stream) frames = {} buffer_count = 0 max_pts = 0 for frame in container.decode(**stream_name): max_pts = max(max_pts, frame.pts) if frame.pts < start_pts: continue if frame.pts <= end_pts: frames[frame.pts] = frame else: buffer_count += 1 frames[frame.pts] = frame if buffer_count >= buffer_size: break result = [frames[pts] for pts in sorted(frames)] return result, max_pts
5b012899c047dcd3ee90d793c68ebdd1d2f413c1
3,631,265
import sqlite3 def encode_data_for_sqlite(value): """Fix encoding bytes.""" try: return value.decode() except (UnicodeDecodeError, AttributeError): return sqlite3.Binary(value)
fe59a2b0dde5ff7c41acc02c4de6724cc75553fb
3,631,266
def inject_sync_poller(sender, caller, st_type, user, **kwargs): """Inject javascript code.""" condition = ( caller != "top" or st_type != "js" or not hasattr(user, "mailbox") or not user.parameters.get_value("enable_carddav_sync") ) if condition: return "" return """<script> $(document).ready(function () { new Poller('%s', { interval: %d * 1000 }); }); </script> """ % (reverse("api:addressbook-sync-from-cdav"), user.parameters.get_value("sync_frequency"))
18479a46f02424e7b3e4a0a6e8d2543621ad1115
3,631,267
import logging import json def findExtensions(extensionsPath): """ Finds extensions in the selected Firefox profile. Parameters ---------- extensionsPath : str Path to the extensions.json file. Return Values ------------- extensionsList : [Extension] List of identified extensions. """ extensionList = [] # Check for presence of extensions.json in profile folder. Return if not found. if not fileExists(extensionsPath): logging.debug("[!] Failed to Gather Extensions from 'extensions.json'") return extensionList # Open the file containing addons and load as json object. with open(extensionsPath) as extensionsFile: extractedExtensions = json.load(extensionsFile)["addons"] # Loop through extensions in file and instanciate python objects with relevant attributes. for profileExtension in extractedExtensions: name = profileExtension["defaultLocale"]["name"] URL = profileExtension["sourceURI"] permissions = profileExtension["userPermissions"] if permissions != None: permissions = permissions["permissions"] else: permissions = [] extensionObject = Extension( name=name, URL=URL, permissions=permissions, ) extensionList.append(extensionObject) # Return list of extension objects. logging.debug("[^] Successfully Gathered Extensions From 'extensions.json'") return extensionList
e455d0159e345622705f96bcdd98df9fa7c974f7
3,631,268
def parse_pointstamped(point_input): """ Parse point_input into PointStamped. """ try: assert isinstance(point_input, PointStamped) return point_input except: pass try: assert isinstance(point_input, Point) point = PointStamped(point = point_input) point.header.stamp = rospy.Time.now() return point except: pass try: assert isinstance(point_input, Point32) point = PointStamped(point = Point(x=point_input.x, y=point_input.y, z=point_input.z)) point.header.stamp = rospy.Time.now() return point except: pass try: point = point_input point = PointStamped(point = Point(x=point[0], y=point[1], z=point[2])) point.header.stamp = rospy.Time.now() return point except Exception as e: raise ValueError('Point not properly specified (should be Point, PointStamped or [3] list type)!')
0103abc2b73581daa73c8981d45ad87d77cfba78
3,631,269
def ext_binary_gcd_env(a, b): """Extended binary GCD. Given input a, b the function returns d, s, t such that gcd(a,b) = d = as + bt.""" u, v, s, t, r = 1, 0, 0, 1, 0 while (a & 1 == 0) and (b & 1 == 0): a, b, r = a >> 1, b >> 1, r + 1 alpha, beta = a, b # # from here on we maintain a = u * alpha + v * beta # and b = s * alpha + t * beta # while (a & 1 == 0): a = a >> 1 if (u & 1 == 0) and (v & 1 == 0): u, v = u >> 1, v >> 1 else: u, v = (u + beta) >> 1, (v - alpha) >> 1 while a != b: if (b & 1 == 0): b = b >> 1 # # Commentary: note that here, since b is even, # (i) if s, t are both odd then so are alpha, beta # (ii) if s is odd and t even then alpha must be even, so beta is odd # (iii) if t is odd and s even then beta must be even, so alpha is odd # so for each of (i), (ii) and (iii) s + beta and t - alpha are even # if (s & 1 == 0) and (t & 1 == 0): s, t = s >> 1, t >> 1 else: s, t = (s + beta) >> 1, (t - alpha) >> 1 elif b < a: a, b, u, v, s, t = b, a, s, t, u, v else: b, s, t = b - a, s - u, t - v return a << r, s << r, t << r
c189fbdd27dcff14bec9093924067f247ea38f88
3,631,270
import os import yaml def write_init_file(name, data, path=""): """ This function writes config files for devices and default files :param name: Name of the file to be written to :param data: Data in Dict format :param path: Path where to write :return: """ # find the resource and exclude it from the file data = data.copy() # Removes the Visa resource if needed try: data.remove("Visa_Resource") except: pass if os.path.isfile(os.path.abspath(str(path) + str(name.split(".")[0]) + ".yaml")): os.remove(os.path.abspath(path + str(name.split(".")[0]) + ".yaml")) filename, version = create_new_file( str(name.split(".")[0]), path, os_file=False, suffix=".yaml" ) yaml.dump(data, filename, indent=4) close_file(filename) elif not os.path.isfile(os.path.abspath(path + str(name.split(".")[0]) + ".yaml")): # directory = path[:len(path) - len(path.split("/")[-1])] filename, version = create_new_file( str(name.split(".")[0]), path, os_file=False, suffix=".yaml" ) yaml.dump(data, filename, indent=4) close_file(filename) # Debricated # for items in data.items(): # if type(items[1]) != type([]): # string = str(items[0]) + " = \"" + str(items[1]) + "\"\n" # os.write(filename, str(string)) # else: # string = str(items[0]) + " = \"" # for i in items[1]: # string += str(i).strip("'").strip("[").strip("]") + "," # string = string[:-1] # string += "\"\n" # print string # os.write(filename, string) else: return -1
18c7d6d5b2eb9b0eee3b09a5f15635da32dce015
3,631,271
import random def gen_tasksets( number_of_sets=100, number_of_task=15, util_req=0.5, period_pdf=[0.03, 0.02, 0.02, 0.25, 0.40, 0.03, 0.2, 0.01, 0.04], scalingFlag=True, threshold=0.1, cylinder=4, sumRunnable=True): """Main function to generate task sets with the WATERS benchmark. Variables: number_of_sets: number of task sets util_req: required utilization period_pdf: statistical distribution scalingFlag: make WCET out of ACET with scaling threshold: accuracy of the required utilization cylinder: specific value for WATERS """ while True: taskset = [] # Create runnable periods. dist = stats.rv_discrete(name='periods', values=([1, 2, 5, 10, 20, 50, 100, 200, 1000], period_pdf)) runnables = (30000*number_of_sets) # number of runnables sys_runnable_periods = dist.rvs(size=runnables) # Count runnables. sys_runnables_period_0001_amount = 0 sys_runnables_period_0002_amount = 0 sys_runnables_period_0005_amount = 0 sys_runnables_period_0010_amount = 0 sys_runnables_period_0020_amount = 0 sys_runnables_period_0050_amount = 0 sys_runnables_period_0100_amount = 0 sys_runnables_period_0200_amount = 0 sys_runnables_period_1000_amount = 0 for period in sys_runnable_periods: if period == 1: sys_runnables_period_0001_amount += 1 elif period == 2: sys_runnables_period_0002_amount += 1 elif period == 5: sys_runnables_period_0005_amount += 1 elif period == 10: sys_runnables_period_0010_amount += 1 elif period == 20: sys_runnables_period_0020_amount += 1 elif period == 50: sys_runnables_period_0050_amount += 1 elif period == 100: sys_runnables_period_0100_amount += 1 elif period == 200: sys_runnables_period_0200_amount += 1 elif period == 1000: sys_runnables_period_1000_amount += 1 else: print("ERROR") # Build tasks from runnables. # (PERIOD = 1) # Random WCETs. wcets = sample_runnable_acet(1, sys_runnables_period_0001_amount, scalingFlag) # Use WCETs to create tasks. for i in range(sys_runnables_period_0001_amount): taskset.append(task(wcet=wcets[i], period=1, deadline=1)) # (PERIOD = 2) wcets = sample_runnable_acet(2, sys_runnables_period_0002_amount, scalingFlag) for i in range(sys_runnables_period_0002_amount): taskset.append(task(wcet=wcets[i], period=2, deadline=2)) # (PERIOD = 5) wcets = sample_runnable_acet(5, sys_runnables_period_0005_amount, scalingFlag) for i in range(sys_runnables_period_0005_amount): taskset.append(task(wcet=wcets[i], period=5, deadline=5)) # (PERIOD = 10) wcets = sample_runnable_acet(10, sys_runnables_period_0010_amount, scalingFlag) for i in range(sys_runnables_period_0010_amount): taskset.append(task(wcet=wcets[i], period=10, deadline=10)) # (PERIOD = 20) wcets = sample_runnable_acet(20, sys_runnables_period_0020_amount, scalingFlag) for i in range(sys_runnables_period_0020_amount): taskset.append(task(wcet=wcets[i], period=20, deadline=20)) # (PERIOD = 50) wcets = sample_runnable_acet(50, sys_runnables_period_0050_amount, scalingFlag) for i in range(sys_runnables_period_0050_amount): taskset.append(task(wcet=wcets[i], period=50, deadline=50)) # (PERIOD = 100) wcets = sample_runnable_acet(100, sys_runnables_period_0100_amount, scalingFlag) for i in range(sys_runnables_period_0100_amount): taskset.append(task(wcet=wcets[i], period=100, deadline=100)) # (PERIOD = 200) wcets = sample_runnable_acet(200, sys_runnables_period_0200_amount, scalingFlag) for i in range(sys_runnables_period_0200_amount): taskset.append(task(wcet=wcets[i], period=200, deadline=200)) # (PERIOD = 1000) wcets = sample_runnable_acet(1000, sys_runnables_period_1000_amount, scalingFlag) for i in range(sys_runnables_period_1000_amount): taskset.append(task(wcet=wcets[i], period=1000, deadline=1000)) # Shuffke the task set. random.shuffle(taskset) sets = [] # Select subset of tasks using the subset-sum approximation algorithm. for j in range(number_of_sets): thisset = taskset[:3000] taskset = taskset[3000:] util = 0.0 i = 0 for tasks in thisset: util += tasks.wcet/tasks.period i = i + 1 if util > util_req: break if(util <= util_req + threshold): thisset = thisset[:i] else: i = i - 1 initialSet = thisset[:i] remainingTasks = thisset[i:] tasks = remainingTasks[0] util -= tasks.wcet/tasks.period while (util < util_req): tasks = remainingTasks[0] if (util + tasks.wcet/tasks.period <= util_req + threshold): util += tasks.wcet/tasks.period initialSet.append(tasks) remainingTasks = remainingTasks[1:] thisset = initialSet if (sumRunnable): thisset=sum_same_period_tasks(thisset,number_of_task) sets.append(thisset) # # Remove task sets that contain just one task. # for task_set in sets: # if len(task_set) < 2: # sets.remove(task_set) return sets
ac1f09df006d31dfc1b772c850906ec236d07f4d
3,631,272
from typing import Dict from pathlib import Path import yaml def get_categories_and_file_names() -> Dict: """Returns a dictionary of categories and files Returns ------- Dict Key is the name of the category, value is a dictionary with information on files in that \ category """ path = Path("apd-core/core") category_files: Dict = {} for i in path.glob("**/*"): if i.is_dir(): continue category, file_name = i.parts[-2:] file = Path("apd-core/core") / category / file_name # print(file) try: with file.open() as open_file: open_file_content = open_file.read() for old, new in CORRECTIONS.items(): open_file_content = open_file_content.replace(old, new) data_info = yaml.load(open_file_content, Loader=yaml.FullLoader) if category in category_files: category_files[category][file_name] = data_info else: category_files[category] = {file_name: data_info} except UnicodeDecodeError as err: category_files[category][file_name] = "NOT READABLE" # logging.exception("Error. Could not read %s", file.name, exc_info=err) return category_files
4e7d0295e6e40e5c443f5d91c18d83d138b092f4
3,631,273
from typing import Dict from typing import Set def process_class_items(game_class: GameClass, categories: Dict[str, Dict[str, int]], items_def: Dict, removals: Set[str], locked: Set[str]) -> ClassUnlockables: """ Process a hierarchical definition of a class's items into a ClassUnlockables structure :param game_class: the game class :param categories: mapping of classes to the mappings of categories of weapons for each class :param items_def: hierarchical items definition :param removals: list of item names to remove (not show at all in the menu) :param locked: list of item names to show locked in the menu :return: the resulting ClassUnlockables """ # Weapons weapons = [UnlockableWeapon(item_name, item_id, game_class, categories[game_class.short_name][category_name], item_name not in removals, item_name not in locked) for category_name, category_def in items_def['weapons'].items() for item_name, item_id in category_def.items()] # Belt belt = [UnlockableClassSpecificItem(item_name, item_id, game_class, item_name not in removals, item_name not in locked) for item_name, item_id in items_def['belt'].items()] # Packs packs = [UnlockablePack(item_name, item_id, game_class, item_name not in removals, item_name not in locked) for item_name, item_id in items_def['packs'].items()] # Skins skins = [UnlockableSkin(item_name, item_id, game_class, item_name not in removals, item_name not in locked) for item_name, item_id in items_def['skins'].items()] return ClassUnlockables(weapons, belt, packs, skins)
64290748b72dac3590e6677dcd65af62677cee79
3,631,274
from typing import Optional async def resolve_rrid( identifier: str, client: ClientSession, settings: SciCrunchSettings ) -> Optional[ResolvedItem]: """ Provides a API to access to results as provided by this web https://scicrunch.org/resolver """ # Example https://scicrunch.org/resolver/RRID:AB_90755.json identifier = identifier.strip() url = f"{settings.SCICRUNCH_RESOLVER_BASE_URL}/{identifier}.json" async with client.get(url, raise_for_status=True) as resp: body = await resp.json() # process and simplify response resolved = ResolverResponseBody.parse_obj(body) if resolved.hits.total == 0: return None # FIXME: Not sure why the same RRID can have multiple hits. # We have experience that the order of hits is not preserve and # therefore selecting the first hit is not the right way to go ... # # WARNING: Since Sep.2021, hits returned by resolver does not guarantee order. # For instance, https://scicrunch.org/resolver/RRID:CVCL_0033.json changes # the order every call and the first hit flips between # '(BCRJ Cat# 0226, RRID:CVCL_0033)' and '(ATCC Cat# HTB-30, RRID:CVCL_0033)' # hit = resolved.hits.hits[0].source if resolved.hits.total > 1: logger.warning( "Multiple hits (%d) for '%s'. Returning first", resolved.hits.total, identifier, ) else: assert resolved.hits.total == 1 # nosec output = ResolvedItem.parse_obj(hit.flatten_dict()) return output
8818711de82448f91eb807c68eb19e0af2776938
3,631,275
def MakeByte(ea): """ Convert the current item to a byte @param ea: linear address @return: 1-ok, 0-failure """ return idaapi.doByte(ea, 1)
ce8925c66d3e4a6c811f2a244f56b4317c66a877
3,631,276
import inspect import sys def get_all(): """Returns all activation functions.""" fns = inspect.getmembers(sys.modules[__name__]) fns = [f[1] for f in fns if len(f)>1 and f[0] != "get_all"\ and isinstance(f[1], type(get_all))] return fns
52772f2aac04a9d68f9c6470a19d99afb79f7f7f
3,631,277
def upload_persons(request): """ Options for upload form to define person and person roles :param request: :return: """ print('persons upload request: ', request.GET) selection = NmPersonsEntries.objects.all().distinct('entry_id') myFilter = PersonsFilter(request.GET, queryset=selection) selection = myFilter.qs print('selection: ', selection) context = {'myFilter': myFilter, 'selection': selection} return render(request, 'author_manage/upload_persons.html', context)
2caf9a8c10c12aaa9b46bd83666c39dac4dd6492
3,631,278
from neon.backends.nervanacpu import NervanaCPU from neon.backends.util import check_gpu from neon.backends.nervanagpu import NervanaGPU from mgpu.nervanamgpu import NervanaMGPU from argon.neon_backend.ar_backend import ArBackend import logging import atexit def gen_backend(backend='cpu', rng_seed=None, datatype=np.float32, batch_size=0, stochastic_round=False, device_id=0, max_devices=get_device_count(), compat_mode=None, deterministic_update=None, deterministic=None): """ Construct and return a backend instance of the appropriate type based on the arguments given. With no parameters, a single CPU core, float32 backend is returned. Arguments: backend (string, optional): 'cpu' or 'gpu'. rng_seed (numeric, optional): Set this to a numeric value which can be used to seed the random number generator of the instantiated backend. Defaults to None, which doesn't explicitly seed (so each run will be different) datatype (dtype): Default tensor data type. CPU backend supports np.float64, np.float32, and np.float16; GPU backend supports np.float32 and np.float16. batch_size (int): Set the size the data batches. stochastic_round (int/bool, optional): Set this to True or an integer to implent stochastic rounding. If this is False rounding will be to nearest. If True will perform stochastic rounding using default bit width. If set to an integer will round to that number of bits. Only affects the gpu backend. device_id (numeric, optional): Set this to a numeric value which can be used to select device on which to run the process max_devices (int, optional): For use with multi-GPU backend only. Controls the maximum number of GPUs to run on. compat_mode (str, optional): if this is set to 'caffe' then the conv and pooling layer output sizes will match that of caffe as will the dropout layer implementation deterministic (bool, optional): if set to true, all operations will be done deterministically. Returns: Backend: newly constructed backend instance of the specifed type. Notes: * Attempts to construct a GPU instance without a CUDA capable card or without nervanagpu package installed will cause the program to display an error message and exit. """ logger = logging.getLogger(__name__) if NervanaObject.be is not None: # backend was already generated clean it up first cleanup_backend() else: # at exit from python force cleanup of backend only register this function once, will use # NervanaObject.be instead of a global atexit.register(cleanup_backend) if deterministic_update is not None or deterministic is not None: logger.warning('deterministic_update and deterministic args are deprecated in favor of ' 'specifying random seed') deterministic = None if backend == 'cpu' or backend is None: be = NervanaCPU(rng_seed=rng_seed, default_dtype=datatype, compat_mode=compat_mode) elif backend == 'gpu' or backend == 'mgpu': gpuflag = False # check nvcc gpuflag = (check_gpu.get_compute_capability(device_id) >= 3.0) if gpuflag is False: raise RuntimeError("Device {} does not have CUDA" " compute capability 3.0 or greater".format(device_id)) if backend == 'gpu': # init gpu be = NervanaGPU(rng_seed=rng_seed, default_dtype=datatype, stochastic_round=stochastic_round, device_id=device_id, compat_mode=compat_mode, deterministic=deterministic) else: try: # init multiple GPU be = NervanaMGPU(rng_seed=rng_seed, default_dtype=datatype, stochastic_round=stochastic_round, num_devices=max_devices, compat_mode=compat_mode, deterministic=deterministic) except ImportError: logger.error("Multi-GPU support is a premium feature " "available exclusively through the Nervana cloud." " Please contact info@nervanasys.com for details.") raise elif backend == 'argon': be = ArBackend(rng_seed=rng_seed, default_dtype=datatype) else: raise ValueError("backend must be one of ('cpu', 'gpu', 'mgpu')") logger.info("Backend: {}, RNG seed: {}".format(backend, rng_seed)) NervanaObject.be = be be.bsz = batch_size return be
8e6127d70a1fa4bee0d1e3a5c6855338f1254218
3,631,279
from redis import exceptions import socket def get_redis_error_classes(): """Return tuple of redis error classes.""" # This exception suddenly changed name between redis-py versions if hasattr(exceptions, 'InvalidData'): DataError = exceptions.InvalidData else: DataError = exceptions.DataError return error_classes_t( (virtual.Transport.connection_errors + ( InconsistencyError, socket.error, IOError, OSError, exceptions.ConnectionError, exceptions.AuthenticationError, exceptions.TimeoutError)), (virtual.Transport.channel_errors + ( DataError, exceptions.InvalidResponse, exceptions.ResponseError)), )
7d2331de649430a857c1575ccc590c642ff36e4a
3,631,280
from typing import Dict from typing import Any def create_property(supported_prop: Dict[str, Any]) -> HydraClassProp: """Create a HydraClassProp object from the supportedProperty.""" # Syntax checks doc_keys = { "property" : False, "title": False, "readonly": True, "writeonly": True, "required": True } result = {} for k, literal in doc_keys.items(): result[k] = input_key_check(supported_prop, k, "supported_prop", literal) # Create the HydraClassProp object prop = HydraClassProp(result["property"], result["title"], required=result["required"], read=result["readonly"], write=result["writeonly"]) return prop
7f0aa8252f995d0ae7a9fad5f5690a034d159458
3,631,281
import scipy.weave.md5_load as md5 def expr_to_filename(expr): """ Convert an arbitrary expr string to a valid file name. The name is based on the md5 check sum for the string and Something that was a little more human readable would be nice, but the computer doesn't seem to care. """ base = 'sc_' return base + md5.new(expr).hexdigest()
0fde1beda5e073890258f3fba5a8b5f38b1129fb
3,631,282
def getIDList(): """getIDList() -> list(string) Returns a list of all lanes in the network. """ return _getUniversal(tc.ID_LIST, "")
e7c3f0134f8a91778cc8caf252eaf0f11cdb8e46
3,631,283
def cmor(B, C): """ Returns the n-point continuous Morlet wavelet Args: B the bandwidth parameter C the central frequency """ def time(t, s=1.): s = jnp.atleast_2d(jnp.asarray(s)).T t = t / s # the sinusoid output = jnp.exp(1j * 2 * jnp.pi * C * t) # the Gaussian output = output * jnp.exp(-t**2 /B ) # the normalization factor factor = (jnp.pi *B) **(-0.5) output = factor * output # energy conservation output = jnp.sqrt(1/s) * output return jnp.squeeze(output) def frequency(w, s=1.0): s = jnp.atleast_2d(jnp.asarray(s)).T x = w * s # Heaviside mock Hw = (w > 0).astype(float) # subtract angular frequencies with angular central frequency x = x - 2*jnp.pi*C # apply the bandwidth factor x = x * B / 4 # apply the exponential points = Hw * jnp.exp(-x) # normalize for scale points = (s ** 0.5) * ((2*jnp.pi) ** 0.5) * points return jnp.squeeze(points) def fourier_period(s): s = jnp.asarray(s) return s / C def scale_from_period(period): period = jnp.asarray(period) return period * C def coi(s): return 2 ** .5 * s return WaveletFunctions(is_complex=True, time=time, frequency=frequency, fourier_period=fourier_period, scale_from_period=scale_from_period, coi=coi)
dc7e521113837423e37c20d2f31ad7222dd37fd6
3,631,284
def calculateMid(paddle): """Calculates midpoint for each paddle, much easier to move the paddle this way""" midpoint = int(paddle[0][1] + paddle[1][1]) / 2 return midpoint
83fbba67945d158807bd9c3aebcab63342ce7599
3,631,285
def autotune(i): """ Input: { See 'crowdsource program.optimization' (iterations) - change iterations Force: local = yes only_one_run = yes keep_tmp = yes skip_exchange = yes change_user = - skip_welcome = yes program_tags = ' ' ask_pipeline_choices = yes } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ o=i.get('out','') m=cfg['module_program_optimization'] # Check if module exists r=ck.access({'action':'find', 'module_uoa':'module', 'data_uoa':m}) if r['return']>0: if o=='con': ck.out('WARNING: this function uses module "program.optimization" but can\'t find it') ck.out('') ck.out('Please, try to install shared repository "ck-crowdtuning"') ck.out(' $ ck pull repo:ck-crowdtuning') ck.out('') return r # Redirecting to crowdsource program.optimization i['action']='crowdsource' i['module_uoa']=m i['local']='yes' i['once']='yes' i['keep_experiments']='yes' i['skip_welcome']='yes' i['program_tags']=' ' i['ask_pipeline_choices']='yes' i['local_autotuning']='yes' se=i.get('skip_exchange','') if se=='': se='yes' i['skip_exchange']=se cu=i.get('change_user','') if cu=='': cu='-' i['change_user']=cu return ck.access(i)
22e29831cfaa040ff0425a87f482be857a12004f
3,631,286
import io def generate_from_file(abouts, is_about_input, license_dict, scancode, min_license_score, template_loc=None, vartext=None): """ Generate an attribution text from an `abouts` list of About objects, a `template_loc` template file location and a `vartext` optional dict of extra variables. Return a tuple of (error, attribution text) where error is an Error object or None and attribution text is the generated text or None. """ if not template_loc: if scancode: template_loc = add_unc(DEFAULT_TEMPLATE_SCANCODE_FILE) else: template_loc = add_unc(DEFAULT_TEMPLATE_FILE) else: template_loc = add_unc(template_loc) with io.open(template_loc, encoding='utf-8', errors='replace') as tplf: tpls = tplf.read() return generate(abouts, is_about_input, license_dict, scancode, min_license_score, template=tpls, vartext=vartext)
099152905ded65278d1b382956f35b110692b8f2
3,631,287
from typing import Match from datetime import datetime def user_past_parties(): """List parties the logged in user has been in. Params: page (int): Optional. Page number to return. """ received = request.get_json() page = received.get('page', 1) # User record user = util.user_from_jwt(received.get('token')) if not user: return api_error(m.USER_NOT_FOUND), 404 # Query parties per_page = current_app.config['MATCHES_PER_PAGE'] entries = ( db.session .query(MatchParticipant) .join(Match) .filter( MatchParticipant.user_id == user.id, Match.end_date < datetime.datetime.utcnow(), Match.is_visible == True, Match.is_deleted == False ) .order_by(Match.start_date.asc()) .paginate(page, per_page, error_out=False) ) response = [] for entry in entries.items: details = {} # Match details match = entry.match details['match'] = { 'title': match.title, 'start-date': match.start_date.isoformat(), 'end-date': match.end_date.isoformat(), 'slug': match.slug } # Party details party = entry.party details['party'] = { 'leader': party.owner.username, 'members': [u.user.username for u in party.members], 'party-token': party.token } response.append(details) return api_success(**util.paginated(page, entries.pages, response)), 200
6a6cd86abe39e962a2b09a7204ad62f5be894df4
3,631,288
def bits_list(number): """return list of bits in number Keyword arguments: number -- an integer >= 0 """ # https://wiki.python.org/moin/BitManipulation if number == 0: return [0] else: # binary_literal string e.g. '0b101' binary_literal = bin(number) bits_string = binary_literal.lstrip('0b') # list comprehension bits = [int(bit_character) for bit_character in bits_string] return bits
6f27715dbccefe56d77c800a44c4fa5e82d35b50
3,631,289
def show_funcs(): """ List of of algos presently available """ return(sorted(func_registry.keys()))
e685c92ee8cbc8a7d5d1c7323976eaccd10fa4fa
3,631,290
def _FakeQuantWithMinMaxVars(inputs, min_var, max_var, per_channel, num_bits, narrow_range): """Adds a fake quantization operation. Depending on value of per_channel, this operation may do global quantization or per channel quantization. min_var and max_var should have corresponding shapes: [1] when per_channel == False and [d] when per_channel == True. Args: inputs: a tensor containing values to be quantized. min_var: a variable containing quantization range lower end(s). max_var: a variable containing quantization range upper end(s). per_channel: a boolean specifying whether to use per-channel quantization. num_bits: Number of bits to use for quantization, must be between 2 and 8. narrow_range: Whether to use the narrow quantization range [1; 2^num_bits - 1] or wide range [0; 2^num_bits - 1]. Returns: a tensor containing quantized values. """ if per_channel: assert len(min_var.get_shape()) == 1 assert len(max_var.get_shape()) == 1 return tf.quantization.quantize_and_dequantize(inputs, min_var, max_var, num_bits=num_bits, narrow_range=narrow_range, axis=-1, range_given=True) else: assert min_var.get_shape() == [] # pylint: disable=g-explicit-bool-comparison assert max_var.get_shape() == [] # pylint: disable=g-explicit-bool-comparison return tf.quantization.quantize_and_dequantize(inputs, min_var, max_var, num_bits=num_bits, narrow_range=narrow_range, range_given=True)
1457d7f7661ed336421f715f4841c6e7a1518537
3,631,291
def join_and_keep_order(left, right, remove_duplicates, keep='first', **kwargs): """ :type left: DataFrame :type right: DataFrame :rtype: DataFrame """ left = left.copy() right = right.copy() left['_left_id'] = range(left.shape[0]) right['_right_id'] = range(right.shape[0]) result = left.merge(right=right, **kwargs) result.sort_values(axis='index', by=['_left_id', '_right_id'], inplace=True) if remove_duplicates: result = result[(~result['_left_id'].duplicated(keep=keep)) & (~result['_right_id'].duplicated(keep=keep))] return result.drop(columns=['_left_id', '_right_id'])
a3044f7de9c1f8ffb50cf1e57997307ee0e3d840
3,631,292
import typing from typing import Counter def count_variants(graph) -> typing.Counter[str]: """Count how many of each type of variant a graph has. :param pybel.BELGraph graph: A BEL graph """ return Counter( variant_data[KIND] for data in graph if has_variant(graph, data) for variant_data in data[VARIANTS] )
194a3881e0cb4b73520293ec587cae96de932958
3,631,293
def collate(expression, collation): """Return the clause ``expression COLLATE collation``. e.g.:: collate(mycolumn, 'utf8_bin') produces:: mycolumn COLLATE utf8_bin """ expr = _literal_as_binds(expression) return _BinaryExpression( expr, _literal_as_text(collation), operators.collate, type_=expr.type)
37896bfce0f7c02c4021af75105a878831de4838
3,631,294
def crop_images(x, y, w, h, *args): """ Crops all the images passed as parameter using the box coordinates passed """ assert len(args) > 0, "At least 1 image needed." cropped = [] for img in args: cropped.append(img[x : x + h, y : y + w]) return cropped
e8f78246c0bfeb3d370b8fe01e264b2f7e0e1c49
3,631,295
import time import json def WriteResultToJSONFile(test_suites, results, json_path): """Aggregate a list of unittest result object and write to a file as a JSON. This takes a list of result object from one or more runs (for retry purpose) of Python unittest tests; aggregates the list by appending each test result from each run and writes to a file in the correct format for the --isolated-script-test-output argument passed to test isolates. Args: test_suites: a list of unittest.TestSuite that were run to get the list of result object; each test_suite contains the tests run and is iterated to get all test cases ran. results: a list of unittest.TextTestResult object returned from running unittest tests. json_path: desired path to JSON file of result. """ output = { 'interrupted': False, 'num_failures_by_type': {}, 'path_delimiter': '.', 'seconds_since_epoch': time.time(), 'tests': {}, 'version': 3, } def initialize(test_suite): for test in test_suite: if test.id() not in output['tests']: output['tests'][test.id()] = { 'expected': 'PASS', 'actual': [] } for test_suite in test_suites: initialize(test_suite) def get_pass_fail(test_suite, result): success = [] fail = [] for failure in result.failures + result.errors: fail.append(failure[0].id()) for test in test_suite: if test.id() not in fail: success.append(test.id()) return { 'success': success, 'fail': fail, } for test_suite, result in zip(test_suites, results): pass_fail = get_pass_fail(test_suite, result) for s in pass_fail['success']: output['tests'][s]['actual'].append('PASS') for f in pass_fail['fail']: output['tests'][f]['actual'].append('FAIL') num_fails = 0 for test_result in output['tests'].itervalues(): if test_result['actual'][-1] == 'FAIL': num_fails += 1 test_result['is_unexpected'] = True test_result['actual'] = ' '.join(test_result['actual']) output['num_failures_by_type']['FAIL'] = num_fails output['num_failures_by_type']['PASS'] = len(output['tests']) - num_fails with open(json_path, 'w') as script_out_file: json.dump(output, script_out_file) script_out_file.write('\n')
cb53b65bf5c8ceb1d0695e38c4ebeedd4916fe14
3,631,296
def svn_prop_has_svn_prop(*args): """svn_prop_has_svn_prop(apr_hash_t props, apr_pool_t pool) -> svn_boolean_t""" return _core.svn_prop_has_svn_prop(*args)
5ca451ecd2d945ace6f690ada61abc6de1bef445
3,631,297
def test_analysis_dual_grad(n, lbda): """ Test the gradient of dual analysis. """ rng = check_random_state(None) x, _, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) eps = 1e-3 v_dim = D.shape[1] v = np.clip(rng.randn(n, v_dim), -(lbda - eps), (lbda - eps)) Psi_A = np.linalg.pinv(A).dot(D) # Finite grad v def finite_grad(v): def f(v): v = v.reshape(n, v_dim) # the actual considered loss is not normalized but for # convenience we want to check the sample-loss average return analysis_dual_obj(v, A, D, x, lbda, Psi_A=Psi_A) * n grad = approx_fprime(xk=v.ravel(), f=f, epsilon=1.0e-6) return grad.reshape(n, v_dim) grad_ref = finite_grad(v) grad_test = analysis_dual_grad(v, A, D, x, Psi_A=Psi_A) np.testing.assert_allclose(grad_ref, grad_test, atol=1e-4)
5ba56bc1621f7e159c4cb5c5e2309272690b787f
3,631,298
def dojo_gbrv_results(pseudo, struct_type, num_sites, volumes, etotals): """ This function computes the GBRV results and returns the dictionary to be inserted in the dojoreport file. Args: pseudo: Pseudopotential object. struct_type: "fcc" or "bcc" num_sites: Number of sites in unit cell volumes: List with unit cell volumes in Ang**3 etotals: List of total energies in eV. Return: (dojo_entry, eos_fit) where dojo_entry is the Dictionary with results to be inserted in the djrepo file. eos_fit is the object storing the results of the EOS fit. """ # Read etotals and fit E(V) with a parabola to find the minimum assert len(etotals) == len(volumes) dojo_entry = dict( volumes=list(volumes), etotals=list(etotals), num_sites=num_sites, ) eos_fit = None try: eos_fit = EOS.Quadratic().fit(volumes, etotals) except EOS.Error as exc: dojo_entry["_exceptions"] = str(exc) return dojo_entry, eos_fit # Function to compute cubic a0 from primitive v0 (depends on struct_type) vol2a = {"fcc": lambda vol: (4 * vol) ** (1/3.), "bcc": lambda vol: (2 * vol) ** (1/3.), }[struct_type] a0 = vol2a(eos_fit.v0) dojo_entry.update(dict( v0=eos_fit.v0, b0=eos_fit.b0, #b1=eos_fit.b1, # infinity a0=a0, struct_type=struct_type )) db = gbrv_database(pseudo.xc) ref = db.get_entry(pseudo.symbol, stype=struct_type) pawabs_err = a0 - ref.gbrv_paw pawrel_err = 100 * (a0 - ref.gbrv_paw) / ref.gbrv_paw # AE results for P and Hg are missing. if ref.ae is not None: abs_err = a0 - ref.ae rel_err = 100 * (a0 - ref.ae) / ref.ae else: # Use GBRV_PAW as reference. abs_err = pawabs_err rel_err = pawrel_err print("for GBRV struct_type: ", struct_type, "a0= ", a0, "Angstrom") print("AE - THIS: abs_err = %f, rel_err = %f %%" % (abs_err, rel_err)) print("GBRV-PAW - THIS: abs_err = %f, rel_err = %f %%" % (pawabs_err, pawrel_err)) dojo_entry["a0_abs_err"] = abs_err dojo_entry["a0_rel_err"] = rel_err return dojo_entry, eos_fit
f20eac647f4bc83235ea71187dcd493392028472
3,631,299