content
stringlengths
22
815k
id
int64
0
4.91M
def handle_switches(args, sysroot): """Fetch the targeted binary and determine how to attach gdb. Args: args: Parsed arguments. sysroot: Local sysroot path. Returns: (binary_file, attach_pid, run_cmd). Precisely one of attach_pid or run_cmd will be None. """ device = args.device binary_file = None pid = None run_cmd = None if args.target_pid: # Fetch the binary using the PID later. pid = args.target_pid elif args.target_name: # Fetch the binary using the PID later. pid = get_remote_pid(device, args.target_name) elif args.run_cmd: if not args.run_cmd[0]: sys.exit("empty command passed to -r") if not args.run_cmd[0].startswith("/"): sys.exit("commands passed to -r must use absolute paths") run_cmd = args.run_cmd binary_file, local = gdbrunner.find_file(device, run_cmd[0], sysroot, user=args.user) if binary_file is None: assert pid is not None try: binary_file, local = gdbrunner.find_binary(device, pid, sysroot, user=args.user) except adb.ShellError: sys.exit("failed to pull binary for PID {}".format(pid)) if not local: logging.warning("Couldn't find local unstripped executable in {}," " symbols may not be available.".format(sysroot)) return (binary_file, pid, run_cmd)
5,344,000
def dot2states(dot): """Translate a dot-bracket string in a sequence of numerical states""" dot = dot.replace(".", "0") # Unpaired dot = dot.replace("(", "1") # Paired dot = dot.replace(")", "1") # Paired return np.array(list(dot), dtype=int)
5,344,001
def _create_all_schemata(): """ Create all of the schemata, just in case they haven't yet been created. """ cursor = connection.cursor() cursor.execute("SELECT count(*)>0 FROM information_schema.tables WHERE table_name = 'boardinghouse_schema'") if cursor.fetchone() == (True,): for schema in get_schema_model().objects.all(): schema.create_schema()
5,344,002
def mel_to_hz(mels, htk=False): """Convert mel bin numbers to frequencies Examples -------- >>> librosa.mel_to_hz(3) 200. >>> librosa.mel_to_hz([1,2,3,4,5]) array([ 66.667, 133.333, 200. , 266.667, 333.333]) Parameters ---------- mels : np.ndarray [shape=(n,)], float mel bins to convert htk : bool use HTK formula instead of Slaney Returns ------- frequencies : np.ndarray [shape=(n,)] input mels in Hz See Also -------- hz_to_mel """ mels = np.asanyarray(mels) if htk: return 700.0 * (10.0**(mels / 2595.0) - 1.0) # Fill in the linear scale f_min = 0.0 f_sp = 200.0 / 3 freqs = f_min + f_sp * mels # And now the nonlinear scale min_log_hz = 1000.0 # beginning of log region (Hz) min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels) logstep = np.log(6.4) / 27.0 # step size for log region if mels.ndim: # If we have vector data, vectorize log_t = (mels >= min_log_mel) freqs[log_t] = min_log_hz * np.exp(logstep * (mels[log_t] - min_log_mel)) elif mels >= min_log_mel: # If we have scalar data, check directly freqs = min_log_hz * np.exp(logstep * (mels - min_log_mel)) return freqs
5,344,003
def test_outfile_british_american2(): """ Runs on good input """ run(AMERICAN, BRITISH, EXPECTED3)
5,344,004
def _iter_population(draws, tune, popstep, steppers, traces, points): """Iterate a ``PopulationStepper``. Parameters ---------- draws: int number of draws per chain tune: int number of tuning steps popstep: PopulationStepper the helper object for (parallelized) stepping of chains steppers: list The step methods for each chain traces: list Traces for each chain points: list population of chain states Yields ------ traces: list List of trace objects of the individual chains """ try: with popstep: # iterate draws of all chains for i in range(draws): # this call steps all chains and returns a list of (point, stats) # the `popstep` may interact with subprocesses internally updates = popstep.step(i == tune, points) # apply the update to the points and record to the traces for c, strace in enumerate(traces): if steppers[c].generates_stats: points[c], stats = updates[c] if strace.supports_sampler_stats: strace.record(points[c], stats) else: strace.record(points[c]) else: points[c] = updates[c] strace.record(points[c]) # yield the state of all chains in parallel yield traces except KeyboardInterrupt: for c, strace in enumerate(traces): strace.close() if hasattr(steppers[c], "report"): steppers[c].report._finalize(strace) raise except BaseException: for c, strace in enumerate(traces): strace.close() raise else: for c, strace in enumerate(traces): strace.close() if hasattr(steppers[c], "report"): steppers[c].report._finalize(strace)
5,344,005
def load_images(images): """ Decodes batch of image bytes and returns a 4-D numpy array. """ import numpy as np batch = [] for image in images: img_np = readImage(image) batch.append(img_np) batch_images = np.concatenate(batch) logger.info('batch_images.shape:%s'%(str(batch_images.shape))) return batch_images
5,344,006
def dumplist(args): """Dumps lists of files based on your criteria""" db = Database() objects = db.objects( protocol=args.protocol, purposes=args.purposes, groups=args.groups, kinds=args.kinds ) output = sys.stdout if args.selftest: from bob.db.base.utils import null output = null() for obj in objects: output.write('%s\n' % obj.make_path(directory=args.directory)) return 0
5,344,007
def stats_hook(): """ decorator to register a stats hook. :raises InvalidStatsHookTypeError: invalid stats hook type error. :returns: stats hook class. :rtype: type """ def decorator(cls): """ decorates the given class and registers an instance of it into available stats hooks. :param type cls: stats hook class. :returns: stats hook class. :rtype: type """ instance = cls() stat_services.register_hook(instance) return cls return decorator
5,344,008
def safe_infer(node, context=None): """Return the inferred value for the given node. Return None if inference failed or if there is some ambiguity (more than one node has been inferred). """ try: inferit = node.infer(context=context) value = next(inferit) except exceptions.InferenceError: return None try: next(inferit) return None # None if there is ambiguity on the inferred node except exceptions.InferenceError: return None # there is some kind of ambiguity except StopIteration: return value
5,344,009
def start_session(config_path): """Lanuch tmuxp in a new terminal window.""" subprocess.Popen( ["rofi-sensible-terminal", "-e", "tmuxp", "load", str(config_path)], stdout=subprocess.DEVNULL, )
5,344,010
def test_get_order( rotation_matrix, translation_vector, repr_matrix, repr_has_cc, result, numeric ): """ Check that the ``get_order`` method matches the expected result. """ sym_op = sr.SymmetryOperation( rotation_matrix=rotation_matrix, translation_vector=translation_vector, repr_matrix=repr_matrix, repr_has_cc=repr_has_cc, numeric=numeric ) assert sym_op.get_order() == result
5,344,011
def catch_exception(func): """ Returns: object: """ @functools.wraps(func) def wrapper(*args, **kwargs): worker = kwargs['error_catcher'] try: return func(*args, **kwargs) except Exception as e: print('stdout:', worker.stdout.read().decode("utf-8")) print('stderr:', worker.stderr.read().decode("utf-8")) raise return wrapper
5,344,012
def RandomCrop(parent, new_shape, name=""): """\ Crop an image layer at a random location with size ``[height, width]``. :param parent: parent layer :param new_shape: [height, width] size :param name: name of the output layer :return: CropRandom layer """ return _eddl.RandomCrop(parent, new_shape, name)
5,344,013
def test_Compile_WrapMethodInClass_syntax_error(): """Test that error is raised if method contains a syntax error.""" with test.Raises(errors.BadCodeException): java.Compile(java.WrapMethodInClass("!@///"))
5,344,014
def run_client(instance): """ Start a client process """ port = [1008, 8989, 9002][instance] cpu = ['(3,4)', '(5,6)', '(7,8)'][instance] # TODO: the following line is an example of code that is not suitable! # should switch to run_udp_app instead of this function # ips = [[_server_ips[1], _server_ips[0]], ips = [[_server_ips[0],], [_server_ips[0]], [_server_ips[0]]][instance] mpps = 1000 * 1000 rate = [-2 * mpps, 2 * mpps, 6 * mpps][instance] _ips = ' '.join(ips) _cnt_flow = [1, count_flow, count_flow][instance] delay = [0, 0, 100] # cycles per packet args = { 'bin': slow_receiver_exp, 'cpu': cpu, 'count_queue': count_queue, 'sysmod': 'bess' if sysmod == 'bess-bp' else sysmod, 'mode': 'client', 'cnt_ips': len(ips), 'ips': _ips, 'count_flow': _cnt_flow, 'duration': duration, 'source_ip': _client_ip[instance], 'port': port, 'delay': delay[instance], 'bidi': 'false', } if PORT_TYPE == PMD: vdev = ['virtio_user1,path=/tmp/ex_vhost1.sock,queues='+str(count_queue), 'virtio_user3,path=/tmp/ex_vhost3.sock,queues='+str(count_queue),][instance] prefix = 'slow_receiver_exp_client_{}'.format(instance) args['vdev'] = vdev args['file-prefix'] = prefix cmd = ('sudo {bin} --no-pci --lcores="{cpu}" --file-prefix={file-prefix} ' '--vdev="{vdev}" --socket-mem=128 -- ' 'bidi={bidi} {source_ip} {count_queue} {sysmod} {mode} {cnt_ips} {ips} ' '{count_flow} {duration} {port} {delay}').format(**args) else: vdev = ['ex_vhost1', 'ex_vhost3', 'ex_vhost4'][instance] prefix = 'bessd-dpdk-prefix' args['vdev'] = vdev args['file-prefix'] = prefix cmd = ('sudo {bin} --no-pci --lcores="{cpu}" --file-prefix={file-prefix} ' '--proc-type=secondary --socket-mem=128 -- ' 'bidi={bidi} vport={vdev} {source_ip} {count_queue} ' '{sysmod} {mode} {cnt_ips} {ips} ' '{count_flow} {duration} {port} {delay}').format(**args) if rate >= 0: # add rate limit argument cmd += ' {}'.format(rate) print("=" * 32) print(" " * 13 + "client") print(cmd) print("=" * 32, end='\n\n') # Run in background if not DIRECT_OUTPUT: p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) else: p = subprocess.Popen(cmd, shell=True) return p
5,344,015
def get_linear_sys(eqns, params): """Gets the linear system corresponding to the symbolic equations Note that this function only work for models where the left-hand side of the equations all contain only linear terms with respect to the given model parameters. For these linear cases, this function will return a matrix :math:`\\mathbf{A}` and a vector :math:`\\mathbf{v}` such that the given equations can be written as .. math:: \\mathbf{A} \\mathbf{x} = \\mathbf{v} with :math:`\\mathbf{x}` being the column vector of the values of the model symbols. Normally the matrix will have more rows than columns for over- determined fitting. :param eqns: A sequence of ``Eqn`` objects for the equations of the fitting. :param params: A sequence of the ``ModelParam`` objects for the parameters to be fitted. :returns: The matrix :math:`\\mathbf{A}` and the vector :math:`\\mathbf{v}`. :rtype: tuple :raises ValueError: if the system of equations are not linear. """ # We treat the equations one-by-one, write rows of the matrix and # the vector one-by-one. n_params = len(params) n_eqns = len(eqns) mat = np.zeros((n_eqns, n_params), dtype=np.float) vec = np.empty((n_eqns, ), dtype=np.float) # Extract the symbols for the parameters and assort the result into a # dictionary for fast loop up of the location of the symbols. symbs = { param.symb: idx for idx, param in enumerate(params) } print('\nForming the matrix and vectors for the linear model...') start_time = time.process_time() for idx, eqn in enumerate(eqns): # First get the vector to the reference value of the equation. vec[idx] = eqn.ref_val # Get the symbolic expression. expr = eqn.modelled_val.simplify().expand() # Get its terms. if isinstance(expr, Add): terms = expr.args else: terms = [expr, ] # Loop over the terms to get the coefficients ahead of the symbols. for term in terms: # Split the term into a symbol and a coefficient. symb, coeff = _get_symb_w_coeff(term) if symb is None: # When we are treating a pure number term, we can move it to # the left-hand side of the equation. vec[idx] -= coeff else: # When we are going a symbol, we need to locate the symbol. try: col_idx = symbs[symb] except KeyError: raise ValueError( 'Unrecognised symbol {!r}'.format(symb) ) else: mat[idx, col_idx] += coeff # Go on to the next term. continue # Go on to the next equation. continue print( 'Finished: {!s}sec.'.format(time.process_time() - start_time) ) # Return the matrix and the vector. return mat, vec
5,344,016
def slightly(membership: npt.ArrayLike) -> npt.ArrayLike: """ Applies the element-wise function fn(u) = u^(1/2). :param membership: Membership function to be modified. >>> from fuzzy_expert.operators import slightly >>> slightly([0, 0.25, 0.5, 0.75, 1]) array([0. , 0.16326531, 0.99696182, 1. , 0. ]) """ plus_membership: npt.ArrayLike = np.power(membership, 1.25) not_very_membership: npt.ArrayLike = 1 - np.power(membership, 2) membership: npt.ArrayLike = np.where( membership < not_very_membership, plus_membership, not_very_membership ) membership: npt.ArrayLike = membership / np.max(membership) return np.where(membership <= 0.5, membership ** 2, 1 - 2 * (1 - membership) ** 2)
5,344,017
def pfxstrokes(fn="string",pc=1,sl=1): """ http://help.autodesk.com/cloudhelp/2019/ENU/Maya-Tech-Docs/CommandsPython/pfxstrokes.html ----------------------------------------- pfxstrokes is NOT undoable, NOT queryable, and NOT editable. This command will loop through all the Paint Effects strokes, including pfxHair nodes, and write the current state of all the tubes to a file. For normal stroke nodes tubes must be ON in the brush or there will be no output. For pfxHair nodes there will always be output, but the format is different than for stroke nodes(however one can assign a brush with tubes = ON to a pfxHair node, in which case it will output the same format as strokes). The general file format is ASCII, using commas to separate numerical values and newlines between blocks of data. The format used for pfxHair nodes presents the hair curves points in order from root to tip of the hair. The hairs follow sequentially in the following fashion: NumCvs pointX,pointY,pointZ, normalX,normalY,normalZ, width, colorR,colorG,colorB, paramU pointX,pointY,pointZ, normalX,normalY,normalZ, width, colorR,colorG,colorB, paramU etc... NumCvs pointX,pointY,pointZ, normalX,normalY,normalZ, width, colorR,colorG,colorB, paramU etc.. The format used to output files for brushes with tubes=ON is more complex. The tubes can branch and the order the segments are written is the same order they are drawn in. Slowly drawing a tall grass brush in the paint effects panel can help to illustrate the order the segments will appear in the file. New tubes can start "growing" before others are finished. There is no line for "NumCvs". Instead all data for each segment appears on each line. The data on each line is the same as passed into the paint effects runtime function. See the argument list of paintRuntimeFunc.mel for the order and a description of these parameters. The parameters match up exactly in the order they appear on a line of the output file with the order of arguments to this function. If one wishes to parse the output file and connect the segments together into curves the branchId, parentId and siblingCnt parameters can help when sorting which segment connects to which line. Using the -postCallback option will write out the tubes data after it has been proessed by the runTime callback. ----------------------------------------- Return Value: None ----------------------------------------- Flags: ----------------------------------------- fn : filename [string] [] The output file. ----------------------------------------- pc : postCallback [boolean] [] Output information to the file after the Runtime Callback MEL function has been invoked. The default is to output the information prior to the callback. ----------------------------------------- sl : selected [boolean] Only loop through the selected strokes. """
5,344,018
def int_max(int_a, int_b): """ max(a, b) """ if int_a > int_b: return int_a else: return int_b
5,344,019
def extract_depth_map(frame): """ Extract front-view lidar camera projection for ground-truth depth maps """ (range_images, camera_projections, range_image_top_pose) = frame_utils.parse_range_image_and_camera_projection(frame) for c in frame.context.camera_calibrations: if dataset_pb2.CameraName.Name.Name(c.name) == 'FRONT': extrinsic = np.reshape( np.array(c.extrinsic.transform, np.float32), [4, 4]) range_images_cartesian = convert_range_image_to_cartesian(frame,range_images,range_image_top_pose) cam_projection = (np.array(camera_projections[1][0].data).reshape(64,2650,6))[np.newaxis,...] depth = range_image_utils.build_camera_depth_image(range_images_cartesian[1][np.newaxis,...],extrinsic[np.newaxis,...],cam_projection ,[1280,1920],1) p = np.where(depth[0]!= 0) v = np.extract(depth[0]!=0,depth[0]) grid_w,grid_h = np.mgrid[0:1280,0:1920] depth_map = griddata(p, v, (grid_w, grid_h), method='nearest') depth_map = depth_map return depth_map[0:1280:4,0:1920:4]
5,344,020
def cross(vect1, vect2): """ Returns cross product of two vectors. Examples ======== >>> from sympy.vector import CoordSys3D >>> from sympy.vector.vector import cross >>> R = CoordSys3D('R') >>> v1 = R.i + R.j + R.k >>> v2 = R.x * R.i + R.y * R.j + R.z * R.k >>> cross(v1, v2) (-R.y + R.z)*R.i + (R.x - R.z)*R.j + (-R.x + R.y)*R.k """ if isinstance(vect1, Add): return VectorAdd.fromiter(cross(i, vect2) for i in vect1.args) if isinstance(vect2, Add): return VectorAdd.fromiter(cross(vect1, i) for i in vect2.args) if isinstance(vect1, BaseVector) and isinstance(vect2, BaseVector): if vect1._sys == vect2._sys: n1 = vect1.args[0] n2 = vect2.args[0] if n1 == n2: return Vector.zero n3 = ({0,1,2}.difference({n1, n2})).pop() sign = 1 if ((n1 + 1) % 3 == n2) else -1 return sign*vect1._sys.base_vectors()[n3] from .functions import express try: v = express(vect1, vect2._sys) except ValueError: return Cross(vect1, vect2) else: return cross(v, vect2) if isinstance(vect1, VectorZero) or isinstance(vect2, VectorZero): return Vector.zero if isinstance(vect1, VectorMul): v1, m1 = next(iter(vect1.components.items())) return m1*cross(v1, vect2) if isinstance(vect2, VectorMul): v2, m2 = next(iter(vect2.components.items())) return m2*cross(vect1, v2) return Cross(vect1, vect2)
5,344,021
def get_cognates(wordlist, ref): """ Retrieve cognate sets from a wordlist. """ etd = wordlist.get_etymdict(ref=ref) cognates = {} if ref == "cogids": for cogid, idxs_ in etd.items(): idxs, count = {}, 0 for idx, language in zip(idxs_, wordlist.cols): if idx: tks = wordlist[idx[0], "tokens"] cogidx = wordlist[idx[0], ref].index(cogid) idxs[language] = " ".join([ x.split("/")[1] if "/" in x else x for x in tks.n[cogidx]]) count += 1 else: idxs[language] = "" if count >= 2: cognates[cogid] = idxs elif ref == "cogid": for cogid, idxs_ in etd.items(): idxs, count = {}, 0 for idx, language in zip(idxs_, wordlist.cols): if idx: tks = wordlist[idx[0], "tokens"] idxs[language] = " ".join([x.split("/")[1] if "/" in x else x for x in tks]) count += 1 else: idxs[language] = "" if count >= 2: cognates[cogid] = idxs return cognates
5,344,022
def test_ir_to_rgb_calibration(get_depthcamera): """ Tests the depthcamera's ir_to_rgb_calibration method. """ depthcamera = get_depthcamera depthcamera_model = check_device_types.get_device_model(depthcamera) if depthcamera_model == Devices.depthcamera_g1: with pytest.raises(PySproutError) as execinfo: depthcamera.ir_to_rgb_calibration() assert 'Functionality not available.' in str(execinfo.value) return vendetta = depthcamera.ir_to_rgb_calibration() assert isinstance(vendetta, dict) assert "ir_intrinsics" in vendetta assert isinstance(vendetta['ir_intrinsics'], list) assert len(vendetta['ir_intrinsics']) == 4 for item in vendetta['ir_intrinsics']: assert isinstance(item, float) assert "rgb_intrinsics" in vendetta assert isinstance(vendetta['rgb_intrinsics'], list) assert len(vendetta['rgb_intrinsics']) == 4 for item in vendetta['rgb_intrinsics']: assert isinstance(item, float) assert "ir_distortion" in vendetta assert isinstance(vendetta['ir_distortion'], list) assert len(vendetta['ir_distortion']) == 5 for item in vendetta['ir_distortion']: assert isinstance(item, float) assert "rgb_distortion" in vendetta assert isinstance(vendetta['rgb_distortion'], list) assert len(vendetta['rgb_distortion']) == 5 for item in vendetta['rgb_distortion']: assert isinstance(item, float) assert "matrix_transformation" in vendetta assert isinstance(vendetta['matrix_transformation'], list) assert len(vendetta['matrix_transformation']) == 4 for item in vendetta['matrix_transformation']: assert isinstance(item, list) assert len(item) == 4 for value in item: assert isinstance(value, float) assert "mirror" in vendetta assert isinstance(vendetta['mirror'], bool)
5,344,023
def get_actress_string(_movie, s): """Return the string of the actress names as per the naming convention specified Takes in the html contents to filter out the actress names""" a_list = get_actress_from_html(_movie, s) actress_string = '' # if javlibrary returns no actresses then we'll just say whatever we specified if len(a_list) == 0: return s['name-for-actress-if-blank'] for actress in a_list: actress_string += actress + s['delimiter-between-multiple-actresses'] # strip the last delimiter, we don't want it actress_string = actress_string[0:-1] return actress_string
5,344,024
def test_shortuuid_uuid(): """Test shortuuid_to_uuid and uuid_to_shortuuid""" uuid = "5CF8D91E-DCEB-4CC3-BFF7-920B05564EB0" shortuuid = "JYsxugP9UjetmCbBCHXcmu" assert uuid_to_shortuuid(uuid) == shortuuid assert shortuuid_to_uuid(shortuuid) == uuid
5,344,025
def get_route_by_id(): """ GET /routes/<id> :return: """ req = client.routes.get(domain=domain, route_id="6012d994e8d489e24a127e79") print(req.json())
5,344,026
def get_lessons_of_day(day): """ Returns the lessons as a string for the given day webelement :param day: day webelement :return: dictionary with day as key and list with lessons as value """ day_lessons = [] to_iterate = day.find_elements_by_class_name('event-content') to_iterate.reverse() for lesson in to_iterate: text = lesson.text day_lessons.append(text) return day_lessons
5,344,027
def main(season=None): """Get a list of (winner,loser) pairs for every NCAA men's basketball game in the specified season (season==2010 means the 2010-2011 seaon). """ today = datetime.datetime.today().date() if not season: # Figure out what season it is. season = today.year - 1 if today.month < 10 else today.year print("Getting data for the {}-{} season".format(season, season+1)) season = int(season) # Get the list of pages to scrape. pages = [] start_date = datetime.date(season, 10, 25) # October 25th, before season end_date = datetime.date(season+1, 4, 20) # April 20th, after season end_date = min(end_date, today) # Don't try to see the future. for n_days in range((end_date - start_date).days + 1): date = start_date + datetime.timedelta(days=n_days) pages.append(SITE.format(date.month, date.day, date.year)) # Scrape each page. games = [] try: for page in tqdm(pages): time.sleep(1) try: tables = pd.read_html(page) # PANDAS MAGIC!! games.extend([get_win_lose_tuple(t) for t in tables]) except ValueError as e: # Ignore the error "there weren't games that day." if e.args[0] == "No tables found": continue else: print(type(e).__name__ + ':', e) raise finally: # Export the data. df = pd.DataFrame(games, columns=["Winner", "Loser"]) df.to_csv("ncaa{}.csv".format(season), index=False)
5,344,028
async def test_if_fires_on_change_with_template_advanced(hass, start_ha, calls): """Test for firing on change with template advanced.""" context = Context() await hass.async_block_till_done() hass.states.async_set("test.entity", "world", context=context) await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].context.parent_id == context.id assert calls[0].data["some"] == "template - test.entity - hello - world - None"
5,344,029
def version_map_to_rst(full_version, version_family, ver_map): """ Return a version of the version map that is suitable for printing. """ none_found_msg = '* No SIMP Mapping Data Found for "' + full_version + '"' # Easy cop out if not ver_map: return none_found_msg simp_release_list = __generate_version_list(full_version, version_family) # Build the Release mapping table for insertion into the docs release_mapping_list = [] ver_map_releases = ver_map.keys() simp_release = full_version if not simp_release in ver_map_releases: for ver in simp_release_list: if ver in ver_map_releases: simp_release = ver print("Warning: version mapper falling back to " + simp_release, file=sys.stderr) else: simp_release = None if simp_release: release_mapping_list.append('* **SIMP ' + simp_release + '**') for os_key in sorted(ver_map[simp_release].keys()): release_mapping_list.append("\n * **" + os_key + '**') for i, iso in enumerate(ver_map[simp_release][os_key]['isos']): release_mapping_list.append("\n * **ISO #" + str(i+1) + ":** " + iso['name']) release_mapping_list.append(" * **Checksum:** " + iso['checksum']) if not release_mapping_list: release_mapping_list.append(none_found_msg) # Trailing newline release_mapping_list.append('') return "\n".join(release_mapping_list)
5,344,030
def progressbar(total, alive, desc=None): """ progressbar for single file uploading, downloading, copying :param cpfilepath: checkpoint file :param total: file size :param alive: 0-init state; 1-task complete; 2-task failed :param desc: :return: """ with tqdm(total=total, unit="B", unit_scale=True, mininterval=BAR_MININTERVAL, miniters=BAR_MINITERS, desc=desc, unit_divisor=1024, ncols=BAR_NCOLS, postfix={'td': 0, 'tps': 0}) as pbar: size = 0 tmp_latency = '0ms' tmp_tps = '0' while size < total: size = pbar_get_size() size = size if size < total else total updata = size - pbar.n updata = check_value_threshold(updata, 0, total) pbar.set_description(desc) pbar.update(updata) td, tps = get_latency_tps() if td != tmp_latency or tps != tmp_tps: pbar.set_postfix(td=td, tps=tps) tmp_latency, tmp_tps = td, tps if alive.value > 0: if alive.value == 1 and not globl.get_value('force_exit').value: pbar.update(total - pbar.n) break time.sleep(BAR_SLEEP_FOR_UPDATE)
5,344,031
def run_command(name, command, cwd, module, logdir, results): """Run a command and log the stdout and stderr. :arg command: command argument list :arg cwd: current directory to run the command :arg module: the ansible module object :arg logdir: where to place stdout and stderr logs :arg results: the module results dictionary """ log.info('[%s] %s' % (cwd, ' '.join(command))) rc, out, err = module.run_command(command, cwd=cwd) logfile_out = os.path.join(logdir, '%s.out' % name) with open(logfile_out, 'w') as f: f.write(out) results['changed'] = True results['logfiles'].append(logfile_out) logfile_err = os.path.join(logdir, '%s.err' % name) with open(logfile_err, 'w') as f: f.write(err) results['changed'] = True results['logfiles'].append(logfile_err) if rc != 0: log.error('%s failed; rc=%d' % (name, rc)) module.fail_json( msg='%s command failed. See log files %s and %s' % (name, logfile_out, logfile_err), rc=rc, stdout=tail(out), stderr=tail(err), )
5,344,032
def resize_cluster(checkpoint, new_size, debug=True): """Resize cluster to given size. Inputs: checkpoint: A scalar tensor of type string, new peers should be able to restore to this checkpoint. new_size: A scalar tensor of type int32, the new cluster size. Returns: A pair of scalar tensors (changed, keep) of type bool, {changed} indicates if the cluster has been changed, {keep} indicates if the current peer is still in the new cluster, the peer should quit if it is not in the new cluster. """ return _op_lib.kungfu_resize_cluster(checkpoint, new_size, debug=debug)
5,344,033
def log_test(): """" Issue logs at each level """ print('Issuing five messages...') for l, n in zip([logger], ['logger']): print(n) l.debug('A debug message') l.info('A info message') l.warning('A warning message') l.error('A error message') l.critical('A critical message') print(f'...done with {n}') print('...done')
5,344,034
def ising_hamiltonian(n_qubits, g, h): """ Construct the hamiltonian matrix of Ising model. Args: n_qubits: int, Number of qubits g: float, Transverse magnetic field h: float, Longitudinal magnetic field """ ham_matrix = 0 # Nearest-neighbor interaction spin_coupling = jnp.kron(PauliBasis[3], PauliBasis[3]) for i in range(n_qubits - 1): ham_matrix -= jnp.kron(jnp.kron(jnp.eye(2 ** i), spin_coupling), jnp.eye(2 ** (n_qubits - 2 - i))) ham_matrix -= jnp.kron(jnp.kron(PauliBasis[3], jnp.eye(2 ** (n_qubits - 2))), PauliBasis[3]) # Periodic B.C # Transverse magnetic field for i in range(n_qubits): ham_matrix -= g * jnp.kron(jnp.kron(jnp.eye(2 ** i), PauliBasis[1]), jnp.eye(2 ** (n_qubits - 1 - i))) # Longitudinal magnetic field for i in range(n_qubits): ham_matrix -= h * jnp.kron(jnp.kron(jnp.eye(2 ** i), PauliBasis[3]), jnp.eye(2 ** (n_qubits - 1 - i))) return ham_matrix
5,344,035
def declare_eq_bus_vm_approx(model, index_set, PTDF=None, rel_ptdf_tol=None, abs_ptdf_tol=None): """ Create the equality constraints or expressions for voltage magnitude (from PTDF approximation) at the bus """ m = model con_set = decl.declare_set("_con_eq_bus_vm_approx_set", model, index_set) vm_is_var = isinstance(m.vm, pe.Var) if vm_is_var: m.eq_vm_bus = pe.Constraint(con_set) else: if not isinstance(m.vm, pe.Expression): raise Exception("Unrecognized type for m.vm", m.vm.pprint()) if PTDF is None: return for bus_name in con_set: expr = \ get_vm_expr_ptdf_approx(m, bus_name, PTDF, rel_ptdf_tol=rel_ptdf_tol, abs_ptdf_tol=abs_ptdf_tol) if vm_is_var: m.eq_vm_bus[bus_name] = \ m.vm[bus_name] == expr else: m.vm[bus_name] = expr
5,344,036
def test_main( mock_building_parser, mock_return_logger, config_dict, db_connection, monkeypatch, test_dir, ): """Test main()""" def mock_parser(*args, **kwargs): parser = Namespace( cache_dir=(test_dir / "test_outputs" / "test_outputs_uniprot"), nodelete_cache=False, config=None, classes=None, database="fake_database_path", ec=True, force=False, families=None, genbank_accessions=None, genera=None, get_pages=True, kingdoms=None, log=None, nodelete=False, output=None, retries=10, sequence=True, seq_update=True, subfamilies=True, species=None, strains=None, streamline=None, timeout=45, uniprot_accessions=None, uniprot_batch_size=150, uniprot_data=None, verbose=False, pdb=True, skip_uniprot_accessions=None, use_uniprot_cache=None, ) return parser def mock_return_none(*args, **kwargs): return def mock_connect_existing_db(*args, **kwards): return db_connection, None, "cache_dir" def mock_get_expansion_configuration(*args, **kwards): return config_dict, set(), set(), set(), dict(), set() def mock_get_genbank_accessions(*args, **kwards): return {1: 1, 2:2, 3:3} def mock_get_uniprot_data(*args, **kwards): return {1: {'ec': {1,2,3}, 'pdb': {1,2,3}}, 2: {'ec': {1,2,3}, 'pdb': {1,2,3}}, 3: {'ec': {1,2,3}, 'pdb': {1,2,3}}}, {1, 2, 3} monkeypatch.setattr(uniprot_parser, "build_parser", mock_building_parser) monkeypatch.setattr(ArgumentParser, "parse_args", mock_parser) monkeypatch.setattr(saint_logger, "config_logger", mock_return_logger) monkeypatch.setattr(get_uniprot_data, "connect_existing_db", mock_connect_existing_db) monkeypatch.setattr("cazy_webscraper.expand.uniprot.get_uniprot_data.make_output_directory", mock_return_none) monkeypatch.setattr(get_uniprot_data, "get_expansion_configuration", mock_get_expansion_configuration) monkeypatch.setattr(sql_interface, "log_scrape_in_db", mock_return_none) monkeypatch.setattr(get_selected_gbks, "get_genbank_accessions", mock_get_genbank_accessions) monkeypatch.setattr(get_uniprot_data, "get_uniprot_accessions", mock_get_genbank_accessions) monkeypatch.setattr(get_uniprot_data, "get_uniprot_data", mock_get_uniprot_data) monkeypatch.setattr(get_uniprot_data, "add_uniprot_accessions", mock_return_none) monkeypatch.setattr(get_uniprot_data, "add_ec_numbers", mock_return_none) monkeypatch.setattr(get_uniprot_data, "add_pdb_accessions", mock_return_none) monkeypatch.setattr(cazy_webscraper, "closing_message", mock_return_none) output = test_dir / "test_outputs" / "test_outputs_uniprot" output.mkdir(parents=True, exist_ok=True) get_uniprot_data.main() shutil.rmtree((test_dir / "test_outputs" / "test_outputs_uniprot")) output.mkdir(parents=True, exist_ok=True)
5,344,037
def _slots_from_params(func): """List out slot names based on the names of parameters of func Usage: __slots__ = _slots_from_signature(__init__) """ funcsig = signature(func) slots = list(funcsig.parameters) slots.remove('self') return slots
5,344,038
def gnd_to_wd_id(gnd_id): """ Searches for a Wikidata entry which contains the provided GND ID. Outputs the Wikidata ID (if found). --------- gnd_id : str GND ID of entity. Returns ----------- str. """ url = 'https://query.wikidata.org/bigdata/namespace/wdq/sparql' try: time.sleep(2.0) data = requests.get(url, params={'query': "SELECT ?Entity WHERE {?Entity wdt:P227 '%s'.}" % (gnd_id), 'format': 'json'}).json() result = [] for item in data['results']['bindings']: if result == []: result.append(item['Entity']['value']) wd_id= re.findall("[Q]\d*",str(result)) return wd_id[0] except: return "Exception"
5,344,039
def get_instance_category(entry) -> Optional[str]: """Determines the instance category for which the entry was submitted. If it does not match the config of any instance category, returns None. """ instance_categories = RidehailEnv.DIMACS_CONFIGS.ALL_CONFIGS entry_config = entry["config"] keys_to_check = list(entry_config.keys()) try: keys_to_check.remove("nickname") except: return None for category_name, category_config in instance_categories.items(): if all((entry_config[key] == category_config[key] for key in keys_to_check)): return category_name return None
5,344,040
def extendheader(table, fields): """ Extend header row in the given table. E.g.:: >>> import petl as etl >>> table1 = [['foo'], ... ['a', 1, True], ... ['b', 2, False]] >>> table2 = etl.extendheader(table1, ['bar', 'baz']) >>> table2 +-----+-----+-------+ | foo | bar | baz | +=====+=====+=======+ | 'a' | 1 | True | +-----+-----+-------+ | 'b' | 2 | False | +-----+-----+-------+ See also :func:`petl.transform.headers.setheader`, :func:`petl.transform.headers.pushheader`. """ return ExtendHeaderView(table, fields)
5,344,041
def _rzz(theta: float, q0: cirq.Qid, q1: cirq.Qid) -> cirq.OP_TREE: """Implements the Rzz Ising coupling gate (i.e. exp(-1j * theta * zz)) using Sycamore gates. Args: theta: The rotation parameter of Rzz Ising coupling gate. q0: First qubit to operate on q1: Second qubit to operate on Yields: The `cirq.OP_TREE` that implements the Rzz Ising coupling gate using Sycamore gates. """ phi = -np.pi / 24 c_phi = np.cos(2 * phi) target_unitary = cirq.unitary(cirq.ZZPowGate(exponent=2 * theta / np.pi, global_shift=-0.5)) c2 = abs(np.sin(theta) if abs(np.cos(theta)) > c_phi else np.cos(theta)) / c_phi # Prepare program that has same Schmidt coefficients as exp(-1j theta ZZ) program = cirq.Circuit(ops.SYC(q0, q1), cirq.rx(2 * np.arccos(c2)).on(q1), ops.SYC(q0, q1)) yield _create_corrected_circuit(target_unitary, program, q0, q1)
5,344,042
def get_teamcount(): """Get a count of teams.""" #FINISHED FOR SASO teamlist = get_list_of_teams() return len(teamlist)
5,344,043
def make_adjacencyW(I, D, sigma): """Create adjacency matrix with a Gaussian kernel. Args: I (numpy array): for each vertex the ids to its nnn linked vertices + first column of identity. D (numpy array): for each data the l2 distances to its nnn linked vertices + first column of zeros. sigma (float): Bandwith of the Gaussian kernel. Returns: csr_matrix: affinity matrix of the graph. """ V, k = I.shape k = k - 1 indices = np.reshape(np.delete(I, 0, 1), (1, -1)) indptr = np.multiply(k, np.arange(V + 1)) def exp_ker(d): return np.exp(-d / sigma ** 2) exp_ker = np.vectorize(exp_ker) res_D = exp_ker(D) data = np.reshape(np.delete(res_D, 0, 1), (1, -1)) adj_matrix = csr_matrix((data[0], indices[0], indptr), shape=(V, V)) return adj_matrix
5,344,044
def blackbody2d(wavelengths, temperature): """ Planck function evaluated for a vector of wavelengths in units of meters and temperature in units of Kelvin Parameters ---------- wavelengths : `~numpy.ndarray` Wavelength array in units of meters temperature : `~numpy.ndarray` Temperature in units of Kelvin Returns ------- pl : `~numpy.ndarray` Planck function evaluated at each wavelength """ return blackbody_lambda(wavelengths, temperature)
5,344,045
def record_metrics(metrics, args): """ Record the metrics recorded in the metrics dictionary to a metrics file """ with open('attacker_metrics/input_reduction_metrics_{}'.format(args.file_num), 'a') as f: f.write("META DATA\n") f.write("---------\n") f.write("Model Name: {}\n".format(args.model_name)) f.write("Beam Size: {}\n".format(args.beam_size)) f.write("Baseline 1 Model File: {}\n".format(args.baseline_1_model_file)) f.write("Baseline 2 Model File: {}\n".format(args.baseline_2_model_file)) f.write("Cuda: {}\n".format(args.cuda)) f.write("\nBASELINE 1 MODEL METRICS\n") f.write("----------------------------------------\n") for key, val in metrics['baseline_1_model'].items(): f.write("{}: {}\n".format(key, val)) f.write("\nBASELINE 2 MODEL METRICS\n") f.write("----------------------------------------\n") for key, val in metrics['baseline_2_model'].items(): f.write("{}: {}\n".format(key, val))
5,344,046
def bookShop(): """ Este programa resuelve el siguiente ejercicio: Book Shop Link: https://cses.fi/problemset/task/1158 Este programa retorna el máximo número de páginas que se pueden conseguir comprando libros dados el precio y páginas de los libros disponibles y la cantidad de dinero disponible. """ inputLine = input() inputArray = inputLine.split() inputArray = [int(x) for x in inputArray] numBooks = inputArray[0] totalPrice = inputArray[1] inputLine = input() inputArray = inputLine.split() inputArray = [int(x) for x in inputArray] prices = inputArray inputLine = input() inputArray = inputLine.split() inputArray = [int(x) for x in inputArray] pages = inputArray bag = [[0 for y in range(totalPrice + 1)] for x in range(numBooks + 1)] for i in range(1, len(bag)): price = prices[i - 1] page = pages[i - 1] for j in range(1, len(bag[0])): if j - price < 0: bag[i][j] = bag[i - 1][j] elif bag[i - 1][j - price] + page > bag[i - 1][j]: bag[i][j] = bag[i - 1][j - price] + page else: bag[i][j] = bag[i - 1][j] return bag[-1][-1]
5,344,047
def test_data_modules(dm_cls: Type[LightningDataModule], stratified: bool) -> None: """Test the datamodules.""" dm = _create_dm(dm_cls, stratified) loader = dm.train_dataloader() batch = next(iter(loader)) assert batch.x.size() == torch.Size([BATCHSIZE, *dm.size()]) assert batch.s.size() == torch.Size([BATCHSIZE, 1]) assert batch.y.size() == torch.Size([BATCHSIZE, 1]) F.cross_entropy(torch.rand((BATCHSIZE, dm.num_sens)), batch.s.squeeze(-1)) F.cross_entropy(torch.rand((BATCHSIZE, dm.num_classes)), batch.y.squeeze(-1)) assert dm.num_classes assert dm.num_sens
5,344,048
def pdf(x, nu, sigma): """ PDF for the Rice distribution. """ if x <= 0: return mpmath.mp.zero with mpmath.extradps(5): x = mpmath.mpf(x) nu = mpmath.mpf(nu) sigma = mpmath.mpf(sigma) sigma2 = sigma**2 p = ((x / sigma2) * mpmath.exp(-(x**2 + nu**2)/(2*sigma2)) * mpmath.besseli(0, x*nu/sigma2)) return p
5,344,049
def get_310_prob(score_prob_dct: dict) -> typing.Dict[str, float]: """get home win, draw, away win prob""" prob = {} result_dct = get_score_pairs(0) type_dict = ['home_win', 'draw', 'away_win'] for i in type_dict: prob[i] = get_one_prob(score_prob_dct, result_dct, i) sum_value = float(sum(prob.values())) if sum_value != 1: avg_value = round((1 - sum_value) / 3, 2) prob['home_win'] += avg_value prob['draw'] += avg_value prob['away_win'] += avg_value return prob
5,344,050
def test__getBarFCNameNL(): """Test __getBarFCNameNL function.""" tests = {None: None, 0: 'Zware storm', 500: 'Zware storm', 973: 'Zware storm', 974: 'Storm', 981: 'Storm', 989: 'Storm', 990: 'Regen en wind', 996: 'Regen en wind', 1001: 'Regen en wind', 1002: 'Bewolkt', 1006: 'Bewolkt', 1009: 'Bewolkt', 1010: 'Veranderlijk', 1016: 'Veranderlijk', 1021: 'Veranderlijk', 1022: 'Mooi', 1029: 'Mooi', 1034: 'Mooi', 1035: 'Zeer mooi', 1100: 'Zeer mooi', 9999: 'Zeer mooi'} for k, expected in tests.items(): value = __getBarFCNameNL(k) assert(value == expected)
5,344,051
def load_project_data(storage): """Load project data using provided open_func and project directory.""" # Load items and extractors from project schemas = storage.open('items.json') extractors = storage.open('extractors.json') # Load spiders and templates spider_loader = SpiderLoader(storage) spiders = {} for spider_name in spider_loader.spider_names: spider = spider_loader[spider_name] crawler = IblSpider(spider_name, spider, schemas, extractors, Settings()) spiders[spider_name] = (crawler, spider) return schemas, extractors, spiders
5,344,052
def bidir_bfs(ADT,start,end): """Bidirectional Breadth First Search""" #create queues with rule of first-in-first-out #queue1 for bfs from start #queue2 for bfs from end queue1=[] queue1.append(start) queue2=[] queue2.append(end) visited1=[] visited2=[] while queue1 or queue2: #keep track of the visited vertices if queue1: current1=queue1.pop(0) visited1.append(current1) ADT.visit(current1) if queue2: current2=queue2.pop(0) visited2.append(current2) ADT.visit(current2) #intersection of two trees stop=False for i in ADT.vertex(): if i in visited1 and i in visited2: stop=True break if stop: break #do not revisit a vertex for newpos in ADT.edge(current1): if newpos not in visited1 and newpos not in queue1: queue1.append(newpos) for newpos in ADT.edge(current2): if newpos not in visited2 and newpos not in queue2: queue2.append(newpos)
5,344,053
def PBH_HASH_update(db, hash_name, hash_field_list): """ Update object in PBH_HASH table """ ctx = click.get_current_context() hash_name_validator(ctx, db.cfgdb_pipe, hash_name) table = str(PBH_HASH_CDB) key = str(hash_name) data = {} if hash_field_list is not None: hash_field_list_validator(ctx, db.cfgdb_pipe, hash_field_list) data[PBH_HASH_HASH_FIELD_LIST] = hash_field_list.split(",") if not data: exit_with_error("Error: Failed to update PBH hash: options are not provided", fg="red") cap = pbh_capabilities_query(db.db, PBH_HASH_CAPABILITIES_KEY) if cap is None: exit_with_error("Error: Failed to query PBH hash capabilities: configuration is not available", fg="red") try: update_entry(db.cfgdb_pipe, cap, table, key, data) except Exception as err: exit_with_error("Error: {}".format(err), fg="red")
5,344,054
def home(): """ Route to display home page and form to receive text from user for speech synthesis. """ form = TextToSpeechForm() # Instantiates a client client = texttospeech.TextToSpeechClient() # Get the language list voices = client.list_voices() voice_codes_list = list(dict.fromkeys([voice.language_codes[0] for voice in voices.voices])) language_list = [(ind + 1, voice) for ind, voice in enumerate(voice_codes_list)] if request.method == 'POST': lang = dict(language_list).get(int(form.language_options.data)) gender = dict([(1, texttospeech.SsmlVoiceGender.MALE), (2, texttospeech.SsmlVoiceGender.FEMALE)]).get(int(form.gender_options.data)) messages = json.dumps({'text': form.text_field.data, 'language': lang, 'gender': gender}) return redirect(url_for('.translate', messages=messages)) return render_template('main.html', form=form)
5,344,055
def load_c6_file(filename, is_radar): """ Loads ice scattering LUTs from a file (based on Yang et al., JAS, 2013). Parameters ---------- filename: str The name of the file storing the Mie scattering parameters is_radar: bool If True, the first LUT column is treated as the frequency, otherwise, wavelength. Returns ------- my_df: xarray.Dataset The xarray Dataset storing the scattering data, including descriptive metadata. """ if is_radar is True: my_df = pd.read_csv(filename, names=["frequency", "p_diam", "p_diam_eq_A", "p_diam_eq_V", "V", "A", "beta_p", "scat_p", "alpha_p", "beta_p_cross"]) else: my_df = pd.read_csv(filename, names=["wavelength", "p_diam", "p_diam_eq_A", "p_diam_eq_V", "V", "A", "beta_p", "scat_p", "alpha_p", "beta_p_cross"]) my_df["alpha_p"] = my_df["alpha_p"] * 1e-12 my_df["beta_p"] = my_df["beta_p"] * 1e-12 my_df["beta_p_cross"] = my_df["beta_p_cross"] * 1e-12 my_df["scat_p"] = my_df["scat_p"] * 1e-12 my_df["p_diam"] = 1e-6 * my_df["p_diam"] my_df["p_diam_eq_A"] = 1e-6 * my_df["p_diam_eq_A"] my_df["p_diam_eq_V"] = 1e-6 * my_df["p_diam_eq_V"] my_df["A"] = my_df["A"] * 1e-12 my_df["V"] = my_df["V"] * 1e-18 my_df = my_df.to_xarray() if is_radar is True: my_df["frequency"].attrs["units"] = "GHz" my_df["frequency"].attrs["long_name"] = "Pulse frequency" my_df["frequency"].attrs["standard_name"] = "Frequency" else: my_df["wavelength"].attrs["units"] = "microns" my_df["wavelength"].attrs["long_name"] = "Wavelength of beam" my_df["wavelength"].attrs["standard_name"] = "Wavelength" my_df["p_diam"].attrs["units"] = "meters" my_df["p_diam"].attrs["long_name"] = "Maximum dimension of the particle" my_df['p_diam'].attrs["standard_name"] = "Maximum dimension" my_df["p_diam_eq_A"].attrs["units"] = "meters" my_df["p_diam_eq_A"].attrs["long_name"] = "Diameter of equivalent projected area sphere" my_df['p_diam_eq_A'].attrs["standard_name"] = "Diameter of equivalent A sphere" my_df["p_diam_eq_V"].attrs["units"] = "meters" my_df["p_diam_eq_V"].attrs["long_name"] = "Diameter of equivalent volume sphere" my_df['p_diam_eq_V'].attrs["standard_name"] = "Diameter of equivalent V sphere" my_df["A"].attrs["units"] = "meters^2" my_df["A"].attrs["long_name"] = "Projected area of particle" my_df['A'].attrs["standard_name"] = "Projected area" my_df["V"].attrs["units"] = "meters^3" my_df["V"].attrs["long_name"] = "Particle volume" my_df['V'].attrs["standard_name"] = "Volume" my_df["scat_p"].attrs["units"] = "microns^2" my_df["scat_p"].attrs["long_name"] = "Scattering cross section" my_df["scat_p"].attrs["standard_name"] = "Scat_cross_section" my_df["beta_p"].attrs["units"] = "meters^2" my_df["beta_p"].attrs["long_name"] = "Backscattering cross section" my_df["beta_p"].attrs["standard_name"] = "Scat_cross_section_back" my_df["alpha_p"].attrs["units"] = "meters^2" my_df["alpha_p"].attrs["long_name"] = "Extinction cross section" my_df["alpha_p"].attrs["standard_name"] = "Ext_cross_section" my_df["beta_p_cross"].attrs["units"] = "meters^2" my_df["beta_p_cross"].attrs["long_name"] = "Cross-polar backscattering cross section" my_df["beta_p_cross"].attrs["standard_name"] = "Scat_cross_section_back_crosspol" return my_df
5,344,056
def gwrite(document: vp.Document, output: typing.TextIO, profile: str): """ Write gcode or other ascii files for the vpype pipeline. The output format can be customized by the user heavily to an extent that you can also output most known non-gcode ascii text files. """ gwrite_config = vp.CONFIG_MANAGER.config["gwrite"] # If no profile was provided, try to use a default if not profile: # Try to get the default profile from the config if "default_profile" in gwrite_config: profile = gwrite_config["default_profile"] else: raise click.BadParameter( "no gwrite profile provided on the commandline and no default gwrite " + "profile configured in the vpype configuration. This can be done using " + 'the "default_default" key in the "gwrite" section' ) # Check that the profile is actually there, we can be sure that the `gwrite` # part exists as there are several default profiles. if profile not in gwrite_config: profiles = [p for p in gwrite_config.keys() if p != "default_profile"] raise click.BadParameter( "gwrite profile " + profile + " not found in vpype configuration. Available gwrite profiles: " + ", ".join(profiles) ) # Read the config for the profile from the main vpype config = gwrite_config[profile] document_start = config.get("document_start", None) document_end = config.get("document_end", None) layer_start = config.get("layer_start", None) layer_end = config.get("layer_end", None) layer_join = config.get("layer_join", None) line_start = config.get("line_start", None) line_end = config.get("line_end", None) line_join = config.get("line_join", None) segment_first = config.get("segment_first", None) segment = config.get("segment", None) segment_last = config.get("segment_last", None) unit = config.get("unit", "mm") offset_x = config.get("offset_x", 0.0) offset_y = config.get("offset_y", 0.0) scale_x = config.get("scale_x", 1.0) scale_y = config.get("scale_y", 1.0) # transform the document according to the desired parameters orig_document = document document = copy.deepcopy(document) # do NOT affect the pipeline's document unit_scale = vp.convert_length(unit) document.scale(scale_x / unit_scale, scale_y / unit_scale) document.translate(offset_x, offset_y) invert_x = config.get("invert_x", False) invert_y = config.get("invert_y", False) # transform the document according to inversion parameters if invert_x or invert_y: document = invert_axis(document, invert_x, invert_y) # process file filename = output.name if document_start is not None: output.write(document_start.format(filename=filename)) last_x = 0 last_y = 0 xx = 0 yy = 0 lastlayer_index = len(document.layers.values()) - 1 for layer_index, layer_id in enumerate(document.layers): layer = document.layers[layer_id] if layer_start is not None: output.write( layer_start.format( x=last_x, y=last_y, ix=xx, iy=yy, index=layer_index, index1=layer_index + 1, layer_index=layer_index, layer_index1=layer_index + 1, layer_id=layer_id, filename=filename, ) ) lastlines_index = len(layer) - 1 for lines_index, line in enumerate(layer): if line_start is not None: output.write( line_start.format( x=last_x, y=last_y, ix=xx, iy=yy, index=lines_index, index1=lines_index + 1, lines_index=lines_index, lines_index1=lines_index + 1, layer_index=layer_index, layer_index1=layer_index + 1, layer_id=layer_id, filename=filename, ) ) segment_last_index = len(line) - 1 for segment_index, seg in enumerate(line): x = seg.real y = seg.imag dx = x - last_x dy = y - last_y idx = int(round(x - xx)) idy = int(round(y - yy)) xx += idx yy += idy if segment_first is not None and segment_index == 0: seg_write = segment_first elif segment_last is not None and segment_index == segment_last_index: seg_write = segment_last else: seg_write = segment if seg_write is not None: output.write( seg_write.format( x=x, y=y, dx=dx, dy=dy, _x=-x, _y=-y, _dx=-dx, _dy=-dy, ix=xx, iy=yy, idx=idx, idy=idy, index=segment_index, index1=segment_index + 1, segment_index=segment_index, segment_index1=segment_index + 1, lines_index=lines_index, lines_index1=lines_index + 1, layer_index=layer_index, layer_index1=layer_index + 1, layer_id=layer_id, filename=filename, ) ) last_x = x last_y = y if line_end is not None: output.write( line_end.format( x=last_x, y=last_y, ix=xx, iy=yy, index=lines_index, index1=lines_index + 1, lines_index=lines_index, lines_index1=lines_index + 1, layer_index=layer_index, layer_index1=layer_index + 1, layer_id=layer_id, filename=filename, ) ) if line_join is not None and lines_index != lastlines_index: output.write( line_join.format( x=last_x, y=last_y, ix=xx, iy=yy, index=lines_index, index1=lines_index + 1, lines_index=lines_index, lines_index1=lines_index + 1, layer_index=layer_index, layer_index1=layer_index + 1, layer_id=layer_id, filename=filename, ) ) if layer_end is not None: output.write( layer_end.format( x=last_x, y=last_y, ix=xx, iy=yy, index=layer_index, index1=layer_index + 1, layer_index=layer_index, layer_index1=layer_index + 1, layer_id=layer_id, filename=filename, ) ) if layer_join is not None and layer_index != lastlayer_index: output.write( layer_join.format( x=last_x, y=last_y, ix=xx, iy=yy, index=layer_index, index1=layer_index + 1, layer_index=layer_index, layer_index1=layer_index + 1, layer_id=layer_id, filename=filename, ) ) if document_end is not None: output.write(document_end.format(filename=filename)) output.flush() output.close() info = config.get("info", None) if info: print(info) return orig_document
5,344,057
def retrieve_latin_text_boxes(data_dir: str, annotation_file: TextIO) -> None: """ Extract the text boxes from the source dataset for which the language is in VALID_LANGUAGES Parameters ---------- data_dir: Directory containing the text boxes. annotation_file: File where the information on each image kept in the dataset are written. """ files = os.listdir(data_dir) assert "gt.txt" in files with open(os.path.join(data_dir, "gt.txt")) as gt_file: image_prefix = os.path.basename(data_dir) lines = gt_file.readlines() for line in lines: image_name, language, word = line.split(",", 2) if language in VALID_LANGUAGES and is_word_valid(word): new_image_name = f"{image_prefix}_{image_name}" # open the file and save it to the right folder instead of simply copying the file # to save it in the right format and avoid libpng warning when training image = cv2.imread(os.path.join(data_dir, image_name)) cv2.imwrite( os.path.join(cfg.training_text_boxes_dir, new_image_name), image ) annotation_file.write(f"{new_image_name},{word}")
5,344,058
def txt_analysis(file_name, pattern, path): """Tries to finds the pattern in the text of UTF-8 encocded files""" try: with open(file_name, "r") as file: text = file.read().lower() counter = text.count(pattern) show(path, file_name, counter) except UnicodeDecodeError: pass
5,344,059
def test_draw_out_of_samples(proposal): """Assert populated is set to false when the last sample is used.""" N = 5 proposal.populated = True proposal.indices = [0] proposal.samples = np.array([0, 1, 2, 3, 4]) sample = AnalyticProposal.draw(proposal, 1, N=N) assert sample == 0 assert proposal.indices == [] assert proposal.populated is False
5,344,060
def create_datasets(dataset, num_valid=0, max_epochs=1, batch_size=128, cache=True, **kwargs): """ Takes `.tfrecord` files and creates `tf.data.Dataset` objects. If `dataset` is `hdfs://<path>/data/` or `hdfs://<path>/data.tfrecord`, then there should also be a JSON file called `hdfs://<path>/data.json` which describes the features and provides additional meta information. The format of the JSON file should be, for example:: { "features": { "embedding": {"shape": [N], "dtype": "float32" }, "author_id": { "shape": [], "dtype": "int64" }, "author_topic": { "shape": [], "dtype": "string" }, "item_id": { "shape": [], "dtype": "int64" }, "item_topic": { "shape": [], "dtype": "string" }, }, "meta": { "embedding_dim": 300, "topics": ["tv", "politics", "sports"], } } If `dataset` is a path to a directory, all `.tfrecord` files in the directory will be loaded. Args ---- dataset: A string pointing to a folder or .tfrecord file num_valid: If > 0, split data into training and validation sets max_epochs: Training data will be iterated this many times batch_size: How many data points to return at once cache: Keep dataset in memory to speed up epochs Returns ------- One or two `tf.data.Dataset` objects and a dictionary containing meta information """ # load meta information if dataset.lower().endswith('.tfrecord'): meta_info_file = dataset[:-9] + '.json' else: meta_info_file = dataset.rstrip('/') + '.json' with tf.gfile.GFile(meta_info_file, 'r') as handle: meta_info = json.load(handle) meta_info, features = meta_info['meta'], meta_info['features'] # extract description of features present in the dataset for name, kwargs in features.items(): features[name] = tf.FixedLenFeature(**kwargs) # turn serialized example into tensors def _parse_function(serialized): return tf.parse_single_example(serialized=serialized, features=features) if dataset.endswith('.tfrecord'): files = [dataset] else: files = tf.gfile.Glob(os.path.join(dataset, '*.tfrecord')) dataset = tf.data.TFRecordDataset(files) dataset = dataset.map(_parse_function, num_parallel_calls=8) if num_valid > 0: # split dataset into training and validation sets dataset_valid = dataset.take(num_valid) dataset_train = dataset.skip(num_valid) if cache: dataset_valid = dataset_valid.cache() dataset_train = dataset_train.cache() # take into account hyperparameters dataset_train = dataset_train.shuffle(10000).repeat(max_epochs).batch(batch_size) dataset_valid = dataset_valid.batch(batch_size) return dataset_train, dataset_valid, meta_info else: if cache: dataset = dataset.cache() dataset = dataset.shuffle(1000).repeat(max_epochs).batch(batch_size) return dataset, meta_info
5,344,061
def user_int(arg): """ Convert a :class:`~int` to a `USER` instruction. :param arg: Int that represents instruction arguments. :return: Fully-qualified `USER` instruction. """ return str(arg)
5,344,062
def timezoneAdjuster(context, dt): """Convinience: new datetime with given timezone.""" newtz = ITimezoneFactory(context) return dt.astimezone(newtz)
5,344,063
def turn(board, player): """Simulate a single player's turn.""" log.info('Beginning turn of player %s.', player.name) if player.jailed: player.try_jailout(board) else: player.consider_developing(board) diceroll, doubles = roll() player.advance(diceroll, board) if doubles and not player.jailed and not player.bankrupt: diceroll, doubles = roll() player.advance(diceroll, board) if doubles: player.enjail()
5,344,064
def make_adder(n): """Return a function that takes one argument k and returns k + n. >>> add_three = make_adder(3) >>> add_three(4) 7 """ def adder(k): return k + n return adder
5,344,065
def IsWritable(Feature): """IsWritable(Feature) -> Writable Parameters: Feature: str Return value: Writable: ctypes.c_int""" if _at_camera_handle is not None: return _at_core_lib.AT_IsWritable(_at_camera_handle, Feature) != AT_FALSE else: raise AndorError('Andor library not initialized')
5,344,066
def error(statuscode, cause, message): """ Print a given `message` to standard error and terminate the program with `statuscode` value. It's a good practice to use Unix convention for status code: `2` for command line syntax error and `1` for all other kind of errors. """ print('{0}: {1}: {2}'.format(__name__, cause, message), file=sys.stderr) sys.exit(statuscode)
5,344,067
def playstore_search(text): """ Search on Play Store (https://play.google.com/store) Parameters ----------- text:- The query which you want to search about (str) """ play_store=f"https://play.google.com/store/search?q={text}" open(play_store)
5,344,068
def groupby_apply( keys: torch.Tensor, values: torch.Tensor, bins: int = 95, reduction: str = "mean", return_histogram: bool = False ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: """ Groupby apply for torch tensors Args: keys: tensor of groups (``0`` to ``bins``) values: values to aggregate - same size as keys bins: total number of groups reduction: either "mean" or "sum" return_histogram: if to return histogram on top Returns: tensor of size ``bins`` with aggregated values and optionally with counts of values """ if reduction == "mean": reduce = torch.mean elif reduction == "sum": reduce = torch.sum else: raise ValueError(f"Unknown reduction '{reduction}'") uniques, counts = keys.unique(return_counts=True) groups = torch.stack([reduce(item) for item in torch.split_with_sizes(values, tuple(counts))]) reduced = torch.zeros(bins, dtype=values.dtype, device=values.device).scatter(dim=0, index=uniques, src=groups) if return_histogram: hist = torch.zeros(bins, dtype=torch.long, device=values.device).scatter(dim=0, index=uniques, src=counts) return reduced, hist else: return reduced
5,344,069
def cube(x): """return x^3""" return x*x*x
5,344,070
def get_candidados(vetor): """Retorna o dado dos candidatos""" lista_retorno = [] for i in vetor: lista_retorno.append(candidatos[int(i)]) return lista_retorno
5,344,071
def get_fmin_tree(f_df, tree): """ """ f = f_df[f_df['F4ratio']>=0].reset_index() t = copy.deepcopy(tree) i=0 for node in t.traverse(): if node.children: l = node.children[0] r = node.children[1] lleaves = l.get_leaf_names() rleaves = r.get_leaf_names() node_fl = f[f['h2'].isin(lleaves)&f['h1'].isin(rleaves)] node_fr = f[f['h2'].isin(rleaves)&f['h1'].isin(lleaves)] for side, node_f, sister_f in [(0,node_fl, node_fr),(1,node_fr, node_fl)]: if len(node_f) or len(sister_f): sister_f0 = sister_f.rename(columns={'h1':'h2','h2':'h1'}) sister_f0['F4ratio'] = 0 sister_f0['Z'] = 0 nf = pd.concat([node_f, sister_f0]) #node_f.sort_values('|f|', ascending=False) #only take h3 with maximum mean '|f|' on this branch #h3 = node_f.groupby('h3').mean().sort_values('|f|', ascending=False).iloc[0].name #node_f1 = node_f[node_f['h3']==h3] child = node.get_children()[side] #child.add_feature('rscore', summary(node_f1['|f|'])) #child.add_feature('h3', h3) child.add_feature('branch_f', nf.groupby(['h2','h3']).min().reset_index()) return t
5,344,072
def primes(n): """ Returns a list of primes < n """ n = int(n) sieve = [True] * n for i in np.arange(3, n ** 0.5 + 1, 2, dtype=int): if sieve[i]: sieve[i*i::2*i]=[False]*((n-i*i-1)//(2*i)+1) return [2] + [i for i in np.arange(3,n,2) if sieve[i]]
5,344,073
def H_TP(Z, T, P): """ Enthalpy defined by temperature and pressure (reference state at 300 K and 1 bar) Z - array of molar composition T, P - temperature and pressure Units are specified above """ H = RP.ABFLSHdll('TP', T, P*100, Z, 2).h - RP.ABFLSHdll('TP', 300, 100, Z, 2).h return H
5,344,074
def convert_docx(converter, output_path: pathlib.Path, settings: Settings) -> None: """Convert the xml to docx.""" docx_output_filename = output_path / settings["OutputFormats"]["docx"]["OutputFile"] docx_builder = DocxBuilder(docx_output_filename, pathlib.Path(settings["OutputFormats"]["docx"]["ReferenceFile"])) converter.convert(docx_builder)
5,344,075
def is_root() -> bool: """ Checks whether the current user is root (or, on Windows, an administrator). """ if os.name == 'nt': try: _dummy = list((Path(os.environ.get('SystemRoot', 'C:\\Windows')) / 'Temp').iterdir()) return True except OSError: return False else: return os.geteuid() == 0
5,344,076
def test_fallback_round_with_input_n_not_int(): """ Feature: JIT Fallback Description: Test round() in graph mode with input x is not int. Expectation: TypeError. """ @ms_function def foo(): x = round(10.123, 1.0) return x with pytest.raises(TypeError) as ex: foo() assert "cannot be interpreted as an integer" in str(ex.value)
5,344,077
def validate_netmask(s): """Validate that a dotted-quad ip address is a valid netmask. >>> validate_netmask('0.0.0.0') True >>> validate_netmask('128.0.0.0') True >>> validate_netmask('255.0.0.0') True >>> validate_netmask('255.255.255.255') True >>> validate_netmask(BROADCAST) True >>> validate_netmask('128.0.0.1') False :param s: String to validate as a dotted-quad notation netmask. :type s: str :returns: ``True`` if a valid netmask, ``False`` otherwise. :raises: TypeError """ if validate_ip(s): mask = bin(ip2network(s))[2:] # all left most bits must be 1, all right most must be 0 seen0 = False for c in mask: if '1' == c: if seen0: return False else: seen0 = True return True else: return False
5,344,078
def translate(): """ A handler for translating given english word which about digit to chinese character Return: - `JSON` .. code-block # message = isError ? reason : "OK" # output = isError ? '' : <TRANSLATION> { message: string, output: string, } """ ret = {'message': 'OK', 'output': ''} try: # Get request words from query string words = request.args.get('words') # Raise exception if there is no such query defined if words is None: raise MissingWordsQueryException() # Define our lookup table myMap = { 'zero': '零', 'one': '一', 'two': '二', 'three': '三', 'four': '四', 'five': '五', 'six': '六', 'seven': '七', 'eight': '八', 'nine': '九', 'ten': '十', } # Since there maybe more than one words, loop through those words for word in words.split(' '): # Translate word by look up values in our lookup table output = myMap[word] # Set word to be output if there are no records at first # Otherwise, append to the output string ret['output'] = output \ if (len(ret['output']) == 0) \ else '%s %s' % (ret['output'], output) except MissingWordsQueryException: # Setup error message ret['message'] = 'Missing "words" query string' ret['output'] = '' except KeyError: # Setup error message ret['message'] = 'Translation error for word |%s|' % word ret['output'] = '' # Encode ret in JSON format so that it prints a json string on the web return jsonify(**ret)
5,344,079
def create_matrix_sars_overlap_between_networks(networks_summary_df, networks_dict): """ Creates matrix where element (i,j) quantifies the number of common SARS-Cov-2 partners in networks i and j divided by the total number of SARS-Cov-2 partners in both networks Args: networks_summary_df: dataframe, output of make_summary networks_dict: dictionary of networks Returns: A matrix where element (i,j) quantifies the number of common SARS-Cov-2 partners in networks i and j divided by the total number of SARS-Cov-2 partners in both networks """ N = len(networks_summary_df) mat_sars = np.zeros((N,N)) mat_sars = np.zeros((N,N)) for i in np.arange(N): for j in np.arange(0,N,1): # Select networks paramstring_a = networks_summary_df.loc[networks_summary_df['index']==i].index[0] paramstring_b = networks_summary_df.loc[networks_summary_df['index']==j].index[0] network_a = networks_dict[paramstring_a] network_b = networks_dict[paramstring_b] # Compute intersection/union df_a = oi.get_networkx_graph_as_dataframe_of_nodes(network_a) sars_in_a = set(df_a[df_a['interact_sars_bool']==True].index) df_b = oi.get_networkx_graph_as_dataframe_of_nodes(network_b) sars_in_b = set(df_b[df_b['interact_sars_bool']==True].index) difference = float(len(sars_in_a.difference(sars_in_b))) mat_sars[i,j] = difference/float(len(sars_in_a)) return(mat_sars)
5,344,080
def load_forcings_gauge_metadata(path: str) -> Tuple[float, float, float]: """ Loads gauge metadata from the header of a CAMELS-USE forcings file. Parameters ---------- path: str Path to the forcings file. Returns ------- tuple (gauge latitude, gauge elevation, basin area [m²]) """ with open(path, 'r') as file: latitude = float(file.readline()) elevation = float(file.readline()) area = float(file.readline()) return latitude, elevation, area
5,344,081
async def test_restore_state(hass, monkeypatch): """Ensure states are restored on startup.""" config = { "rflink": {"port": "/dev/ttyABC0"}, DOMAIN: { "platform": "rflink", "devices": { "NewKaku_12345678_0": {"name": "l1", "type": "hybrid"}, "test_restore_2": {"name": "l2"}, "test_restore_3": {"name": "l3"}, "test_restore_4": {"name": "l4", "type": "dimmable"}, "test_restore_5": {"name": "l5", "type": "dimmable"}, }, }, } mock_restore_cache( hass, ( State(DOMAIN + ".l1", STATE_ON, {ATTR_BRIGHTNESS: "123"}), State(DOMAIN + ".l2", STATE_ON, {ATTR_BRIGHTNESS: "321"}), State(DOMAIN + ".l3", STATE_OFF), State(DOMAIN + ".l5", STATE_ON, {ATTR_BRIGHTNESS: "222"}), ), ) hass.state = CoreState.starting # setup mocking rflink module _, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch) # hybrid light must restore brightness state = hass.states.get(DOMAIN + ".l1") assert state assert state.state == STATE_ON assert state.attributes[ATTR_BRIGHTNESS] == 123 # normal light do NOT must restore brightness state = hass.states.get(DOMAIN + ".l2") assert state assert state.state == STATE_ON assert not state.attributes.get(ATTR_BRIGHTNESS) # OFF state also restores (or not) state = hass.states.get(DOMAIN + ".l3") assert state assert state.state == STATE_OFF # not cached light must default values state = hass.states.get(DOMAIN + ".l4") assert state assert state.state == STATE_OFF assert state.attributes[ATTR_BRIGHTNESS] == 255 assert state.attributes["assumed_state"] # test coverage for dimmable light state = hass.states.get(DOMAIN + ".l5") assert state assert state.state == STATE_ON assert state.attributes[ATTR_BRIGHTNESS] == 222
5,344,082
def transition(measure, N, **measure_args): """ A, B transition matrices for different measures measure: the type of measure legt - Legendre (translated) legs - Legendre (scaled) glagt - generalized Laguerre (translated) lagt, tlagt - previous versions of (tilted) Laguerre with slightly different normalization """ # Laguerre (translated) if measure == 'lagt': b = measure_args.get('beta', 1.0) A = np.eye(N) / 2 - np.tril(np.ones((N, N))) B = b * np.ones((N, 1)) elif measure == 'tlagt': # beta = 1 corresponds to no tilt b = measure_args.get('beta', 1.0) A = (1.-b)/2 * np.eye(N) - np.tril(np.ones((N, N))) B = b * np.ones((N, 1)) # Generalized Laguerre # alpha 0, beta small is most stable (limits to the 'lagt' measure) # alpha 0, beta 1 has transition matrix A = [lower triangular 1] elif measure == 'glagt': alpha = measure_args.get('alpha', 0.0) beta = measure_args.get('beta', 0.01) A = -np.eye(N) * (1 + beta) / 2 - np.tril(np.ones((N, N)), -1) B = ss.binom(alpha + np.arange(N), np.arange(N))[:, None] L = np.exp(.5 * (ss.gammaln(np.arange(N)+alpha+1) - ss.gammaln(np.arange(N)+1))) A = (1./L[:, None]) * A * L[None, :] B = (1./L[:, None]) * B * np.exp(-.5 * ss.gammaln(1-alpha)) * beta**((1-alpha)/2) # Legendre (translated) elif measure == 'legt': Q = np.arange(N, dtype=np.float64) R = (2*Q + 1) ** .5 j, i = np.meshgrid(Q, Q) A = R[:, None] * np.where(i < j, (-1.)**(i-j), 1) * R[None, :] B = R[:, None] A = -A # LMU: equivalent to LegT up to normalization elif measure == 'lmu': Q = np.arange(N, dtype=np.float64) R = (2*Q + 1)[:, None] # / theta j, i = np.meshgrid(Q, Q) A = np.where(i < j, -1, (-1.)**(i-j+1)) * R B = (-1.)**Q[:, None] * R # Legendre (scaled) elif measure == 'legs': q = np.arange(N, dtype=np.float64) col, row = np.meshgrid(q, q) r = 2 * q + 1 M = -(np.where(row >= col, r, 0) - np.diag(q)) T = np.sqrt(np.diag(2 * q + 1)) A = T @ M @ np.linalg.inv(T) B = np.diag(T)[:, None] B = B.copy() # Otherwise "UserWarning: given NumPY array is not writeable..." after torch.as_tensor(B) else: raise NotImplementedError return A, B
5,344,083
def update_standings(season, current_week, matchups): """ Generate scores and results for the current week, update standings. """ for matchup in matchups: scores = matchup.scoreboard.get_score() winner = matchup.scoreboard.get_winner() for team in (matchup.home_team, matchup.away_team): standing = TeamStanding.objects.get( team=team, season=season, week_number=current_week) standing.pk = None standing._state.adding = True standing.week_number = current_week + 1 # Set matchup types is_div = matchup.is_divisional is_conf = matchup.is_conference is_home = False is_tie = False is_winner = False if team == matchup.home_team: is_home = True if winner == 'Tie': is_tie = True elif winner == team: is_winner = True home_score = scores['Home'] away_score = scores['Away'] update_standing = UpdateStanding( standing, current_week, home_score, away_score, is_div, is_conf, is_home, is_tie, is_winner ) update_standing.check_stats()
5,344,084
def interpolate_coord(df, xcol, ycol, step, distcol='d'): """ Interpolates x/y coordinates along a line at a fixed distance. Parameters ---------- df : pandas.DataFrame xcol, ycol : str Labels of the columns in ``df`` containing the x- and y-coords, respectively. step : int The spacing between the interpolated points. distcol : string, optional (default = 'd') Label of the column where the distance along the line is stored. Returns ------- pandas.DataFrame """ dist = _linear_distance(df, xcol, ycol) d_ = numpy.arange(0, numpy.floor(dist.max()), step) x_interp = interpolate.interp1d(dist, df[xcol]) y_interp = interpolate.interp1d(dist, df[ycol]) return pandas.DataFrame({'d': d_, 'x': x_interp(d_), 'y': y_interp(d_)})
5,344,085
def dnd(dev, cmd, start_hr, start_min, end_hr, end_min): """Query and adjust do-not-disturb mode.""" if cmd == "off": click.echo("Disabling DND..") print(dev.disable_dnd()) elif cmd == "on": click.echo("Enabling DND %s:%s to %s:%s" % (start_hr, start_min, end_hr, end_min)) click.echo(dev.set_dnd(start_hr, start_min, end_hr, end_min)) else: x = dev.dnd_status()[0] click.echo("DND %02i:%02i to %02i:%02i (enabled: %s)" % ( x['start_hour'], x['start_minute'], x['end_hour'], x['end_minute'], x['enabled']))
5,344,086
def create_ticket_for_new_issue(obj, issue_tracker_info): """Create new IssueTracker ticket for issue""" builder = issue_tracker_params_builder.IssueParamsBuilder() issue_tracker_params = builder.build_create_issue_tracker_params( obj, issue_tracker_info ) if issue_tracker_params.is_empty(): return # Query to IssueTracker. issue_tracker_query = issue_tracker_params.get_issue_tracker_params() # Parameters for creation IssuetrackerIssue object in GGRC. issuetracker_issue_params = issue_tracker_params.get_params_for_ggrc_object() try: res = issues.Client().create_issue(issue_tracker_query) issue_url = integration_utils.build_issue_tracker_url(res["issueId"]) issuetracker_issue_params["issue_url"] = issue_url issuetracker_issue_params["issue_id"] = res["issueId"] except integrations_errors.Error as error: logger.error( "Unable to create a ticket while creating object ID=%d: %s", obj.id, error ) obj.add_warning("Unable to create a ticket in issue tracker.") issuetracker_issue_params["enabled"] = False # Create object in GGRC with info about issue tracker integration. all_models.IssuetrackerIssue.create_or_update_from_dict( obj, issuetracker_issue_params )
5,344,087
def test_protocol_version_0(valid_data): """Test whether version 0 raises an exception""" valid_data["version"] = 0 with pytest.raises(IOError): Packet(bitstring.pack(PACKET_FORMAT, **valid_data), "127.0.0.1")
5,344,088
def oven_cook_setting_to_str(cook_setting: OvenCookSetting, units: str) -> str: """Format OvenCookSetting values nicely.""" cook_mode = cook_setting.cook_mode cook_state = cook_mode.oven_state temperature = cook_setting.temperature modifiers = [] if cook_mode.timed: modifiers.append(STATE_OVEN_TIMED) if cook_mode.delayed: modifiers.append(STATE_OVEN_DELAY) if cook_mode.probe: modifiers.append(STATE_OVEN_PROBE) if cook_mode.sabbath: modifiers.append(STATE_OVEN_SABBATH) temp_str = f" ({temperature}{units})" if temperature > 0 else "" modifier_str = f" ({', '.join(modifiers)})" if modifiers else "" display_state = oven_display_state_to_str(cook_state) return f"{display_state}{temp_str}{modifier_str}"
5,344,089
def print_location(location: Location) -> str: """Render a helpful description of the location in the GraphQL Source document.""" return print_source_location( location.source, get_location(location.source, location.start) )
5,344,090
def run_metrics(ground_truth, simulation, measurement_name,users=None,repos=None): """ Run all of the assigned metrics for a given measurement. Inputs: ground_truth - DataFrame of ground truth data simulation - DataFrame of simulated data measurement_name - Name of measurement corresponding to keys of measurement_params users - list of user IDs for user-centric, node-level measurements repos - list of repo IDs for repo-centric, node-level measurements Outputs: measurement_on_gt - Output of the measurement for the ground truth data measurement_on_sim - Output of the measurement for the simulation data metrics_output - Dictionary containing metric results for each metric assigned to the measurement """ p = measurement_params[measurement_name] if p["node_type"] == "user": nodes = users else: nodes = repos if "filters" in p: ground_truth = prefilter(ground_truth, p['filters']) simulation = prefilter(simulation, p['filters']) #for node-level measurements default to the most active node if a #list of nodes is not provided if p["scale"] == "node" and nodes is None: nodes = ground_truth.groupby([p["node_type"],'event'])["time"].count().reset_index() nodes = nodes.groupby(p["node_type"])["time"].median().sort_values(ascending=False).reset_index() nodes = nodes.head(1)[p["node_type"]] elif p["scale"] != "node": nodes = [''] metrics_output = {} #for node level measurements iterate over nodes for node in nodes: if p["scale"] == "node": metrics_output[node] = {} #select data for individual node filter = {p["node_type"]:[node]} gt = prefilter(ground_truth, filter) sim = prefilter(simulation, filter) else: gt = ground_truth.copy() sim = simulation.copy() measurement_function = p['measurement'] empty_df = False if len(gt.index) > 0: print("Measuring {} for ground truth data".format(measurement_function.__name__)) measurement_on_gt = measurement_function(gt) else: print("Ground truth data frame is empty for {} measurement".format(measurement_function.__name__)) empty_df = True measurement_on_gt = [] if len(sim.index) > 0: print("Measuring {} for simulation data".format(measurement_function.__name__)) measurement_on_sim = measurement_function(sim) else: print("Simulation data frame is empty for {} measurement".format(measurement_function.__name__)) empty_df = True measurement_on_sim = [] metrics = p['metrics'] #iterate over the metrics assigned to the measurement for m, metric_function in metrics.items(): print("Calculating {} for {}".format(metric_function.__name__, measurement_function.__name__)) if not empty_df: metric = metric_function(measurement_on_gt, measurement_on_sim) else: metric = None if p["scale"] == "node": metrics_output[node][m] = metric else: metrics_output[m] = metric return measurement_on_gt, measurement_on_sim, metrics_output
5,344,091
def setPenColor(c=_DEFAULT_PEN_COLOR): """ Set the pen color to c, where c is an object of class color.Color. c defaults to stddraw.BLACK. """ global _penColor _penColor = c
5,344,092
def checkTrue(comment,res,update=True): """ This method is a pass-through for consistency and updating @ In, comment, string, a comment printed out if it fails @ In, res, bool, the tested value @ In, update, bool, optional, if False then don't update results counter @ Out, res, bool, True if test """ if update: if res: results["pass"] += 1 else: print("checking bool",comment,'|',res,'is not True!') results["fail"] += 1 return res
5,344,093
def compute_votes( candidates, voters, voter_id, node_degree_normalization, ): """Comptue neighbor voting for a given set of candidates and voters Arguments: candidates {np.ndarray} -- genes x cells normalized expression for candidates voters {np.ndarray} -- genes x cells normalized expression for voters voter_id {np.ndarray} -- design_matrix for voters for cell type identities node_degree_normalization {bool} -- Flag indicating whether to normalize votes by degree Returns: np.ndarray -- Votes for each candidate """ votes = np.dot(candidates, np.dot(voters, voter_id)) if node_degree_normalization: node_degree = np.sum(voter_id, axis=0) votes += node_degree norm = np.dot(candidates, np.sum(voters, axis=1)) + voters.shape[1] votes /= norm[:, None] return votes
5,344,094
def drop_second_col(in_list): """ Drop line info, convert into list """ ret = [] log.debug("Drop second col from %r", in_list) for x in in_list: y = x.split(":") lineless = y[0:1] + y[2:] log.debug("Add lineless list: %r", lineless) ret.append(lineless) return ret
5,344,095
def _precompute_cache(x, y, num_classes): """Cache quantities to speed-up the computation of L2-regularized least-sq.""" # Whiten mean = jnp.mean(x, axis=0, keepdims=True) std = jnp.std(x, axis=0, keepdims=True) + 1e-5 x = (x - mean) / std # Add a constant feature for the bias, large so it's almost unregularized: x = jnp.pad(x, ((0, 0), (0, 1)), constant_values=BIAS_CONSTANT) # To one-hot representation rescaled into {-1, 1} y = 2.0 * jax.nn.one_hot(y, num_classes) - 1.0 num_points, dim = x.shape # Let N be the number of points, D the dimension and C the number of classes. # We have x of shape (N, D) and y of shape (N, C). # For least-squares, we can compute # # (A) when N >= D, (x^T x + l2 Id)^{-1} x^T y # (B) when D > N, x^T (x x^T + l2 Id)^{-1} y # # We pre-compute the eigen-decomposition of either x^T x or x x^T which # becomes q diag(eigs) q^T with q unitary matrix either (D, D) or (N, N) # and eigs a vector (D,) or (N,). # # For any l2 > 0, we can compute (x^T x + l2 Id)^{-1} or (x x^T + l2 Id)^{-1} # by simply computing q (diag(eigs) + l2 Id)^{-1} q^T. # (SVD would be more natural here, but it proved slower, so we use eigh) # # Both cases (A) and (B) can be viewed as lhs (diag(eigs) + l2 Id)^{-1} rhs, # where lhs/rhs are pre-computed left/right-hand sides to specify. # if num_points >= dim: eigs, q = jnp.linalg.eigh(x.T @ x) rhs = q.T @ (x.T @ y) lhs = q else: eigs, q = jnp.linalg.eigh(x @ x.T) rhs = q.T @ y lhs = x.T @ q cache = {'eigs': eigs, 'rhs': rhs, 'lhs': lhs, 'mean': mean, 'std': std} return cache
5,344,096
def Usage(): """Print usage.""" print """Usage: cros_check_patches [--board=BOARD] [emerge args] package overlay-dir config.json Given a package name (e.g. 'chromeos') and an overlay directory (e.g. /usr/local/portage/chromiumos), outputs a list of patches applied by that overlay, in the course of building the specified package and all its dependencies. Additional configuration options are specified in the JSON-format config file named on the command line. First run? Try this for a starter config: { "ignored_packages": ["chromeos-base/chromeos-chrome"], "upstreamed": [], "needs_upstreaming": [], "not_for_upstream": [], "uncategorized": [] } """
5,344,097
def test_dispose(websocket_server): """Observable subscriptions can be disposed using Websockets.""" url_thing_01 = websocket_server.pop("url_thing_01") exposed_thing_01 = websocket_server.pop("exposed_thing_01") prop_name = websocket_server.pop("prop_name_01") @tornado.gen.coroutine def test_coroutine(): observe_msg_id = Faker().pyint() dispose_msg_id = Faker().pyint() conn = yield tornado.websocket.websocket_connect(url_thing_01) msg_observe_req = WebsocketMessageRequest( method=WebsocketMethods.ON_PROPERTY_CHANGE, params={"name": prop_name}, msg_id=observe_msg_id) conn.write_message(msg_observe_req.to_json()) msg_observe_resp_raw = yield conn.read_message() msg_observe_resp = WebsocketMessageResponse.from_raw(msg_observe_resp_raw) assert msg_observe_resp.id == observe_msg_id subscription_id = msg_observe_resp.result yield exposed_thing_01.write_property(prop_name, Faker().sentence()) msg_emitted_raw = yield conn.read_message() msg_emitted = WebsocketMessageEmittedItem.from_raw(msg_emitted_raw) assert msg_emitted.subscription_id == subscription_id msg_dispose_req = WebsocketMessageRequest( method=WebsocketMethods.DISPOSE, params={"subscription": subscription_id}, msg_id=dispose_msg_id) conn.write_message(msg_dispose_req.to_json()) msg_dispose_resp_raw = yield conn.read_message() msg_dispose_resp = WebsocketMessageResponse.from_raw(msg_dispose_resp_raw) assert msg_dispose_resp.result == subscription_id conn.write_message(msg_dispose_req.to_json()) msg_dispose_resp_02_raw = yield conn.read_message() msg_dispose_resp_02 = WebsocketMessageResponse.from_raw(msg_dispose_resp_02_raw) assert not msg_dispose_resp_02.result yield exposed_thing_01.write_property(prop_name, Faker().pystr()) yield exposed_thing_01.write_property(prop_name, Faker().pystr()) with pytest.raises(tornado.gen.TimeoutError): yield tornado.gen.with_timeout( timeout=datetime.timedelta(milliseconds=200), future=conn.read_message()) run_test_coroutine(test_coroutine)
5,344,098
def show_user(): """Return page showing details: walks, landmarks rated, scores.""" user = User.query.filter_by(user_id=session.get('user_id')).first() ratings = user.ratings # import pdb; pdb.set_trace() walks = user.walks # for walk in walks: # origin = Landmark.query.filter(Landmark.landmark_id == walk.origin).first() # origin = origin.landmark_name # destination = Landmark.query.filter(Landmark.landmark_id == walk.destination).first() # destination = destination.landmark_name # metaWalks = { # "walk_id": walk.walk_id, # "metadata": { # "origin": origin, # "destination": destination, # "datetime": walk.log_datetime, # "duration": walk.duration, # "distance": walk.distance # } # } saved = UserSaved.query.filter_by(user_id=session.get('user_id')).all() # import pdb; pdb.set_trace() return render_template('profile.html', user=user, ratings=ratings, walks=walks, # metaWalks=metaWalks, saved=saved)
5,344,099