content
stringlengths
22
815k
id
int64
0
4.91M
def log(level, msg): """ level 0 : info level 1 : warning level 2 : error msg : message(str) """ dent = " " print(dent * level + msg)
5,328,500
def esv(value, args=''): """ Use ESV API to get a Bible Passage http://www.esvapi.org/v2/rest/passageQuery?key=IP&passage=Gen+1:5-10&output-format=plain-text Looking for [[bible PASSAGE]] Usage:: {{ text|esv}} {{ text|esv:"option1:value,option2:value"}} """ if BIBLE_RE.search(value) is None: return value esv_dict = ESV_DICT.copy() esv_args = args.split(',') if len(esv_args) > 0: for arg in esv_args: try: key, val = arg.split(':') if esv_dict.has_key(key): esv_dict[key] = val except ValueError: pass global ESV_QUERY_URL ESV_QUERY_URL = ESV_API_URL+'&'.join([k+'='+urllib.quote(str(v)) for (k,v) in esv_dict.items()]) return BIBLE_RE.sub(_get_esv_txt, value)
5,328,501
def exec_func_shell(func, d, runfile, cwd=None): """Execute a shell function from the metadata Note on directory behavior. The 'dirs' varflag should contain a list of the directories you need created prior to execution. The last item in the list is where we will chdir/cd to. """ # Don't let the emitted shell script override PWD d.delVarFlag('PWD', 'export') with open(runfile, 'w') as script: script.write(shell_trap_code()) bb.data.emit_func(func, script, d) if bb.msg.loggerVerboseLogs: script.write("set -x\n") if cwd: script.write("cd '%s'\n" % cwd) script.write("%s\n" % func) script.write(''' # cleanup ret=$? trap '' 0 exit $ret ''') os.chmod(runfile, 0775) cmd = runfile if d.getVarFlag(func, 'fakeroot', False): fakerootcmd = d.getVar('FAKEROOT', True) if fakerootcmd: cmd = [fakerootcmd, runfile] if bb.msg.loggerDefaultVerbose: logfile = LogTee(logger, sys.stdout) else: logfile = sys.stdout def readfifo(data): lines = data.split('\0') for line in lines: splitval = line.split(' ', 1) cmd = splitval[0] if len(splitval) > 1: value = splitval[1] else: value = '' if cmd == 'bbplain': bb.plain(value) elif cmd == 'bbnote': bb.note(value) elif cmd == 'bbwarn': bb.warn(value) elif cmd == 'bberror': bb.error(value) elif cmd == 'bbfatal': # The caller will call exit themselves, so bb.error() is # what we want here rather than bb.fatal() bb.error(value) elif cmd == 'bbfatal_log': bb.error(value, forcelog=True) elif cmd == 'bbdebug': splitval = value.split(' ', 1) level = int(splitval[0]) value = splitval[1] bb.debug(level, value) tempdir = d.getVar('T', True) fifopath = os.path.join(tempdir, 'fifo.%s' % os.getpid()) if os.path.exists(fifopath): os.unlink(fifopath) os.mkfifo(fifopath) with open(fifopath, 'r+') as fifo: try: bb.debug(2, "Executing shell function %s" % func) try: with open(os.devnull, 'r+') as stdin: bb.process.run(cmd, shell=False, stdin=stdin, log=logfile, extrafiles=[(fifo,readfifo)]) except bb.process.CmdError: logfn = d.getVar('BB_LOGFILE', True) raise FuncFailed(func, logfn) finally: os.unlink(fifopath) bb.debug(2, "Shell function %s finished" % func)
5,328,502
def books(db_path, auth, username, scrape): """Save books for a specified user, e.g. rixx""" db = sqlite_utils.Database(db_path) try: data = json.load(open(auth)) token = data["goodreads_personal_token"] user_id = data["goodreads_user_id"] except (KeyError, FileNotFoundError): utils.error( "Cannot find authentication data, please run goodreads_to_sqlite auth!" ) click.secho(f"Read credentials for user ID {user_id}.", fg="green") if username: user_id = username if username.isdigit() else utils.fetch_user_id(username) utils.fetch_user_and_shelves(user_id, token, db=db) utils.fetch_books(db, user_id, token, scrape=scrape)
5,328,503
def main() -> None: """Make a jazz noise here""" args = get_args() # Create writer for outfile out_flds = [ 'source', 'unit', 'location_name', 'location_type', 'variable_name', 'variable_desc', 'collected_on', 'value', 'medium' ] writer = csv.DictWriter(args.outfile, out_flds) writer.writeheader() num_written = 0 headers = get_headers(args.headers) for i, fh in enumerate(args.file, start=1): print(f'{i:3}: {os.path.basename(fh.name)}') num_written += process(fh, headers, writer, args) print(f'Done, wrote {num_written:,}.')
5,328,504
def box_nms(bboxes, scores, labels, threshold=0.5, mode='union'): """Non maximum suppression. source: https://github.com/kuangliu/pytorch-retinanet Args: bboxes: (tensor) bounding boxes, sized [N,4]. scores: (tensor) bbox scores, sized [N,]. threshold: (float) overlap threshold. mode: (str) 'union' or 'min'. Returns: keep: (tensor) selected indices. Reference: https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/py_cpu_nms.py """ x1 = bboxes[:, 0] y1 = bboxes[:, 1] x2 = bboxes[:, 2] y2 = bboxes[:, 3] areas = (x2 - x1 + 1) * (y2 - y1 + 1) _, order = scores.sort(0, descending=True) keep = [] while order.numel() > 0: i = order[0] keep.append(i) if order.numel() == 1: break label = labels[i] xx1 = x1[order[1:]].clamp(min=x1[i]) yy1 = y1[order[1:]].clamp(min=y1[i]) xx2 = x2[order[1:]].clamp(max=x2[i]) yy2 = y2[order[1:]].clamp(max=y2[i]) w = (xx2 - xx1 + 1).clamp(min=0) h = (yy2 - yy1 + 1).clamp(min=0) inter = w * h if mode == 'union': ovr = inter / (areas[i] + areas[order[1:]] - inter) elif mode == 'min': ovr = inter / areas[order[1:]].clamp(max=areas[i]) else: raise TypeError('Unknown nms mode: %s.' % mode) ids = ((ovr <= threshold) | (labels[order[1:]] != label)).nonzero().squeeze() if ids.numel() == 0: break order = order[ids + 1] return torch.LongTensor(keep)
5,328,505
def abs_length_diff(trg, pred): """Computes absolute length difference between a target sequence and a predicted sequence Args: - trg (str): reference - pred (str): generated output Returns: - absolute length difference (int) """ trg_length = len(trg.split(' ')) pred_length = len(pred.split(' ')) return abs(trg_length - pred_length)
5,328,506
def CDLEVENINGSTAR(data: xr.DataArray, penetration: float = 0.3) -> xr.DataArray: """ Evening Star (Pattern Recognition) Inputs: data:['open', 'high', 'low', 'close'] Outputs: double series (values are -1, 0 or 1) """ return multiple_series_call(talib.CDLEVENINGSTAR, data, ds.TIME, ds.FIELD, [f.OPEN, f.HIGH, f.LOW, f.CLOSE], [penetration], result_divider=100)
5,328,507
def cern_authorized_signup_handler(resp, remote, *args, **kwargs): """Handle sign-in/up functionality. :param remote: The remote application. :param resp: The response. :returns: Redirect response. """ # Remove any previously stored auto register session key session.pop(token_session_key(remote.name) + "_autoregister", None) # Store token in session # ---------------------- # Set token in session - token object only returned if # current_user.is_authenticated(). token = response_token_setter(remote, resp) handlers = current_oauthclient.signup_handlers[remote.name] # Sign-in/up user # --------------- if not current_user.is_authenticated: account_info = handlers["info"](resp) account_info_received.send(remote, token=token, response=resp, account_info=account_info) user = oauth_get_user( remote.consumer_key, account_info=account_info, access_token=token_getter(remote)[0], ) if user is None: # Auto sign-up if user not found form = create_csrf_disabled_registrationform() form = fill_form(form, account_info["user"]) user = oauth_register(form) # if registration fails ... if user is None: # requires extra information session[token_session_key(remote.name) + "_autoregister"] = True session[token_session_key(remote.name) + "_account_info"] = account_info session[token_session_key(remote.name) + "_response"] = resp db.session.commit() return redirect( url_for( ".signup", remote_app=remote.name, ) ) # Authenticate user if not oauth_authenticate(remote.consumer_key, user, require_existing_link=False): return current_app.login_manager.unauthorized() # Link account # ------------ # Need to store token in database instead of only the session when # called first time. token = response_token_setter(remote, resp) # Setup account # ------------- if not token.remote_account.extra_data: account_setup = handlers["setup"](token, resp) account_setup_received.send(remote, token=token, response=resp, account_setup=account_setup) db.session.commit() account_setup_committed.send(remote, token=token) else: db.session.commit() # Redirect to next if current_user.is_authenticated and not is_egroup_admin(): logout_user() return redirect(get_post_logout_redirect()) next_url = get_session_next_url(remote.name) if next_url: return redirect(next_url) return redirect(url_for("invenio_oauthclient_settings.index"))
5,328,508
def cr2_to_pgm(cr2_fname, pgm_fname=None, dcraw='dcraw', clobber=True, **kwargs): # pragma: no cover """ Convert CR2 file to PGM Converts a raw Canon CR2 file to a netpbm PGM file via `dcraw`. Assumes `dcraw` is installed on the system Note: This is a blocking call Arguments: cr2_fname {str} -- Name of CR2 file to convert **kwargs {dict} -- Additional keywords to pass to script Keyword Arguments: pgm_fname {str} -- Name of PGM file to output, if None (default) then use same name as CR2 (default: {None}) dcraw {str} -- Path to installed `dcraw` (default: {'dcraw'}) clobber {bool} -- A bool indicating if existing PGM should be clobbered (default: {True}) Returns: str -- Filename of PGM that was created """ assert subprocess.call('dcraw', stdout=subprocess.PIPE),\ "could not execute dcraw in path: {}".format(dcraw) assert os.path.exists(cr2_fname), "cr2 file does not exist at {}".format( cr2_fname) verbose = kwargs.get('verbose', False) if pgm_fname is None: pgm_fname = cr2_fname.replace('.cr2', '.pgm') if os.path.exists(pgm_fname) and not clobber: if verbose: print("PGM file exists, returning existing file: {}".format( pgm_fname)) else: try: # Build the command for this file command = '{} -t 0 -D -4 {}'.format(dcraw, cr2_fname) cmd_list = command.split() if verbose: print("PGM Conversion command: \n {}".format(cmd_list)) # Run the command if subprocess.check_call(cmd_list) == 0: if verbose: print("PGM Conversion command successful") except subprocess.CalledProcessError as err: raise error.InvalidSystemCommand(msg="File: {} \n err: {}".format( cr2_fname, err)) return pgm_fname
5,328,509
def asset_from_iconomi(symbol: str) -> Asset: """May raise: - DeserializationError - UnsupportedAsset - UnknownAsset """ if not isinstance(symbol, str): raise DeserializationError(f'Got non-string type {type(symbol)} for iconomi asset') symbol = symbol.upper() if symbol in UNSUPPORTED_ICONOMI_ASSETS: raise UnsupportedAsset(symbol) name = ICONOMI_TO_WORLD.get(symbol, symbol) return symbol_to_asset_or_token(name)
5,328,510
def ConvertHashType(value): """ Attempt to convert a space separated series of key=value pairs into a dictionary of pairs. If any value fails to split successfully an error will be raised. :param value: Space delimited string of key-value pairs :return: Dictionary of key-value pairs. """ collection = dict() for option in value.split(): try: k, v = option.split('=') except ValueError: raise ConversionFailure("Invalid option '{}' for key-value pair: {}" .format(option, value)) collection[k] = v.strip() return collection
5,328,511
def ts_dct_from_estsks(pes_idx, es_tsk_lst, rxn_lst, thy_dct, spc_dct, run_prefix, save_prefix): """ build a ts queue """ print('\nTasks for transition states requested...') print('Identifying reaction classes for transition states...') # Build the ts_dct ts_dct = {} for tsk_lst in es_tsk_lst: obj, es_keyword_dct = tsk_lst[0], tsk_lst[-1] if obj in ('ts', 'all'): # want print for task list method_dct = thy_dct.get(es_keyword_dct['runlvl']) ini_method_dct = thy_dct.get(es_keyword_dct['inplvl']) thy_info = tinfo.from_dct(method_dct) ini_thy_info = tinfo.from_dct(ini_method_dct) break # Discern if TS should be reidentified re_id = False for tsk_lst in es_tsk_lst: obj, es_keyword_dct = tsk_lst[:-1], tsk_lst[-1] if 'find_ts' in obj: re_id = es_keyword_dct.get('re_id', False) ts_dct = {} for rxn in rxn_lst: ts_dct.update( ts_dct_sing_chnl( pes_idx, rxn, spc_dct, run_prefix, save_prefix, thy_info=thy_info, ini_thy_info=ini_thy_info, re_id=re_id) ) # Build the queue # ts_queue = tuple(sadpt for sadpt in ts_dct) if ts_dct else () return ts_dct # return ts_dct, ts_queue
5,328,512
def parse_msig_storage(storage: str): """Parse the storage of a multisig contract to get its counter (as a number), threshold (as a number), and the keys of the signers (as Micheline sequence in a string).""" # put everything on a single line storage = ' '.join(storage.split('\n')) storage_regexp = r'Pair\s+?([0-9]+)\s+?([0-9]+)\s+?(.*)\s*' match = re.search(storage_regexp, storage) assert match is not None return { 'counter': int(match[1]), 'threshold': int(match[2]), 'keys': match[3], }
5,328,513
def adjust_spines(ax, spines): """ removing the spines from a matplotlib graphics. taken from matplotlib gallery anonymous author. parameters ---------- ax: a matplolib axes object handler of the object to work with spines: list of char location of the spines """ for loc, spine in ax.spines.items(): if loc in spines: pass # print 'skipped' # spine.set_position(('outward',10)) # outward by 10 points # spine.set_smart_bounds(true) else: spine.set_color('none') # don't draw spine # turn off ticks where there is no spine if 'left' in spines: ax.yaxis.set_ticks_position('left') else: # no yaxis ticks ax.yaxis.set_ticks([]) if 'bottom' in spines: ax.xaxis.set_ticks_position('bottom') else: # no xaxis ticks ax.xaxis.set_ticks([])
5,328,514
def doTestMethods(method, term, parm, options): """ update SF using passed data""" tf = file(options.trace, 'w+') sfb = sForceApi3(dlog=tf, debug=options.debug) ret = ['No test found for %s %s' %(method,parm)] dtup = time.strptime('2004-04-21T12:30:59', ISO_8601_DATETIME) dtsec = time.mktime(dtup) taskdata = {'Description':'This was updated from api3 %s' %term ,'Dont_Send_Empty_Report__c':True ,'Frequency__c':'weekly' ,'Last_Report__c':dtsec #dataTime ,'My_CRs_in_Development__c':True # ref must be valid ID ,'id':'00330000001Ud4D' ,'LastName':'Vanek' } if method == 'update': if parm == 'test1': ret = sfb.updateTest1(entity='Task', data={}, nullEmpty=False, seed=term) elif parm == 'test2': ret = sfb.updateTest1(entity='Contact', data=taskdata, nullEmpty=False, seed=term) elif method == 'create': if parm == 'test1': ret = sfb.createTest1(entity='Task', data={}, nullEmpty=False, seed=term) elif parm == 'test2': ret = sfb.createTest1(entity='Contact', data=taskdata, nullEmpty=False, seed=term) elif method == 'query': if parm == 'test1': soql = "Select Business_Unit__c, Department, Dept__c, Email, EmployeeNumber__c, FirstName, Id, LastName from Contact \ where AccountId = '00130000000DWRJ' and EmployeeNumber__c <'10015' and EmployeeNumber__c != '' " ret = sfb.queryBase(entity='Contact', soql=soql) elif parm == 'test2': # should be same as above ret = sfb.queryBase(entity='Contact', where='t1', sc='contacts') else: ret = sfb.queryBase(entity='task', where=term, sc=parm) # sc = mod, case, contacts, elif method == 'search': if term.find('@') != -1: scope = 'EMAIL FIELDS' else: scope = 'ALL FIELDS' if parm == 'test1': retsc = 'case' ret = sfb.searchBase(term, retsc=retsc, scope=scope) elif parm == 'test2': retsc = 'people' ret = sfb.searchBase(term, retsc=retsc, scope=scope) else: ret = sfb.searchBase(term, retsc=parm, scope=scope) elif method == 'retrieve': if parm == 'test1': ids = ['00T3000000321P8','00T3000000321Nn'] fieldList = [] ret = sfb.retrieveBase(ids=ids, entity='Task', fieldList=fieldList ) elif parm == 'test2': ids = ['00330000001Jsyq','00330000001Ud4D'] fieldList = ['Id','FirstName', 'LastName','Email'] ret = sfb.retrieveBase(ids=ids, entity='Contact' ) elif method == 'delete': if parm == 'test1': ids = ['00T300000033cw3'] ret = sfb.deleteBase(ids=ids) else: print 'Only a parm of test1 is supported right now' return ret
5,328,515
def do_index(request): """Render the index page.""" projects = [ (name, path) for name, path in config.all_projects() if request.user.can_open(path) ] login_block = render_login_block(request) html = util.render_template( config.get_path('www.index_template', 'gbd/core/index.html.tpl'), locals()) return 200, [('Content-Type', 'text/html; charset=utf-8')], html
5,328,516
def get_hathi_records(id): """ Deprecated. Yields records from the Hathi web API. But I'm using """ handler = XmlHandler() url = "http://catalog.hathitrust.org/api/volumes/full/json/htid:" + id f = urllib2.urlopen(url) data = json.load(f) for record in hathi_xml_yielder(data): xml = cStringIO.StringIO(record.encode("utf-8")) parse_xml(xml,handler) for m in handler.records: yield m
5,328,517
def extract_trajectory_to_file( # pylint: disable=R0912,R0913,R0914,R0915 nc_path, out_path, ref_system=None, top=None, nc_checkpoint_file=None, state_index=None, replica_index=None, start_frame=0, end_frame=-1, skip_frame=1, keep_solvent=True, discard_equilibration=False, image_molecules=False, ligand_atoms=None, solvent_atoms='auto'): """ Extract trajectory from the NetCDF4 and save to `out_path`. The output format is determined by the filename extension. Parameters ---------- nc_path : str Path to the primary nc_file storing the analysis options out_path : str Path to output trajectory file. ref_system : System object or path to serialized System object, optional Reference state System object. Needed if nc file metadata does not store the reference_system. top : Topography or Topology object path to .pdb file, optional Needed if nc file metadata does not store topography. nc_checkpoint_file : str or None, Optional File name of the checkpoint file housing the main trajectory Used if the checkpoint file is differently named from the default one chosen by the nc_path file. Default: None state_index : int, optional The index of the alchemical state for which to extract the trajectory. One and only one between state_index and replica_index must be not None (default is None). replica_index : int, optional The index of the replica for which to extract the trajectory. One and only one between state_index and replica_index must be not None (default is None). start_frame : int, optional Index of the first frame to include in the trajectory (default is 0). end_frame : int, optional Index of the last frame to include in the trajectory. If negative, will count from the end (default is -1). skip_frame : int, optional Extract one frame every skip_frame (default is 1). keep_solvent : bool, optional If False, solvent molecules are ignored (default is True). discard_equilibration : bool, optional If True, initial equilibration frames are discarded (see the method pymbar.timeseries.detectEquilibration() for details, default is False). ligand_atoms : iterable or int or str, optional The atomic indices of the ligand. A string is interpreted as a mdtraj DSL specification. Needed for applying pbc using a receptor as anchor. solvent_atoms : iterable of int or str, optional The atom indices of the solvent. A string is interpreted as an mdtraj DSL specification of the solvent atoms. Needed to recenter with pbc around solute molecules if image_molecules=True. If 'auto', a list of common solvent residue names will be used to automatically detect solvent atoms (default is 'auto'). """ _ = extract_trajectory(nc_path=nc_path, ref_system=ref_system, top=top, nc_checkpoint_file=nc_checkpoint_file, state_index=state_index, replica_index=replica_index, start_frame=start_frame, end_frame=end_frame, skip_frame=skip_frame, keep_solvent=keep_solvent, discard_equilibration=discard_equilibration, image_molecules=image_molecules, ligand_atoms=ligand_atoms, solvent_atoms=solvent_atoms, to_file=out_path) extract_trajectory_to_file.reference_system = extract_trajectory.reference_system extract_trajectory_to_file.topology = extract_trajectory.topology
5,328,518
def test_PD015_fail_merge_on_pandas_object(): """ Test that using .merge() on the pandas root object generates an error. """ statement = "pd.merge(df1, df2)" tree = ast.parse(statement) expected = [PD015(1, 0)] actual = list(VetPlugin(tree).run()) assert actual == expected
5,328,519
def sample_coll(word, urns=[], after=5, before=5, sample_size = 300, limit=1000): """Find collocations for word in a sample of set of book URNs""" from random import sample # check if urns is a list of lists, [[s1, ...],[s2, ...]...] then urn serial first element # else the list is assumed to be on the form [s1, s2, ....] if isinstance(urns[0], list): urns = [u[0] for u in urns] newurns = [x[0] for x in nb.refine_book_urn(words=[word], urns = urns)] # Take a sample sampleurns = sample(newurns, min(len(newurns), sample_size)) # run collocation as normal r = requests.post("https://api.nb.no/ngram/urncoll", json = { 'word':word, 'urns':sampleurns, 'after':after, 'before':before, 'limit':limit } ) res = pd.DataFrame.from_dict(r.json(), orient='index') # sort values of resultant set if not res.empty: res = res.sort_values(by=res.columns[0], ascending = False) return res
5,328,520
def roll_array(arr: npt.ArrayLike, shift: int, axis: int = 0) -> np.ndarray: """Roll the elements in the array by `shift` positions along the given axis. Parameters ---------- arr : :py:obj:`~numpy.typing.ArrayLike` input array to roll shift : int number of bins to shift by axis : int axis to roll along, by default 0 Returns ------- :py:obj:`~numpy.ndarray` shifted numpy array """ arr = np.asanyarray(arr) arr_size = arr.shape[axis] shift %= arr_size return arr.take(np.concatenate((np.arange(shift, arr_size), np.arange(shift))), axis)
5,328,521
def remove(favourites_list, ctype, pk, **options): """Remove a line from the favourites_list. """ instance = unpack_instance_key(favourites_list, ctype, pk) return favourites_list.remove(instance, options=options)
5,328,522
def area_rm(path): """ 实现矢量面积统计,将面积写道属性表中,并按照面积属性进行筛选,删除面积最大的要素。 :param path:矢量文件 """ driver = ogr.GetDriverByName('ESRI Shapefile') poly_DS = driver.Open(path,1) poly_lyr = poly_DS.GetLayer(0) prosrs = poly_lyr.GetSpatialRef() #获取投影坐标信息 #print("投影坐标系为:",prosrs) geosrs = prosrs.CloneGeogCS() #获取地理坐标系 #print("地理坐标系为:",geosrs) wkt = prosrs.ExportToWkt() #将坐标系输出为字符串 bool1 = wkt.find('UNIT["Metre"') bool2 = wkt.find('UNIT["metre"') bool3 = wkt.find('UNIT["Meter"') if bool1 == -1 and bool2 == -1 and bool3 == -1: print('请定义投影坐标!') # time.sleep(5) os._exit(-1) new_field = ogr.FieldDefn("Area", ogr.OFTString) # new_field.SetWidth(32) # new_field.SetPrecision(2) # 设置面积精度 poly_lyr.CreateField(new_field) featnumber = poly_lyr.GetFeatureCount() #将面积写道属性表中,并存到列表中 list = [] for i in range(featnumber): feature = poly_lyr.GetFeature(i) geom = feature.GetGeometryRef() area = geom.GetArea() area = round(area/1000000,2) list.append(area) feature.SetField("Area", area) # 将面积添加到属性表中 poly_lyr.SetFeature(feature) #在列表中找到最大的面积 area_max = max(list) # 设置属性筛选条件 strFilter = "Area = '" + str(area_max) + "'" #进行属性筛选 poly_lyr.SetAttributeFilter(strFilter) #删除符合条件的要素 pFeatureDef = poly_lyr.GetLayerDefn() # pLayerName = poly_lyr.GetName() pFieldName = "Area" # pFieldIndex = pFeatureDef.GetFieldIndex(pFieldName) for pFeature in poly_lyr: pFeatureFID = pFeature.GetFID() poly_lyr.DeleteFeature(int(pFeatureFID)) strSQL = "REPACK " + str(poly_lyr.GetName()) poly_DS.ExecuteSQL(strSQL, None, "") #SQL语句 pFeatureLayer = None poly_DS = None
5,328,523
def eHealthClass_airFlowWave(*args): """eHealthClass_airFlowWave(int air)""" return _ehealth.eHealthClass_airFlowWave(*args)
5,328,524
def interactive_plot(outdir='./_output',format='ascii'): """ Convenience function for launching an interactive plotting session. """ from visclaw.plotters import Iplotclaw ip=Iplotclaw.Iplotclaw() ip.plotdata.outdir=outdir ip.plotdata.format=format ip.plotloop()
5,328,525
def build_gtid_ranges(iterator): """Yield dicts containing most compact representation of all (timestamp, UUID, GNO) tuples returned by given iterator. Ranges are returned in correct order and no gaps are allowed. If input contains uninterrupted sequence of events from a single server only one range is produced.""" current_range = {} for timestamp, server_id, server_uuid, gno, _file_position in iterator: if current_range: if current_range["server_uuid"] == server_uuid and current_range["end"] + 1 == gno: current_range["end"] = gno current_range["end_ts"] = timestamp else: yield current_range current_range = None if not current_range: current_range = { "end": gno, "end_ts": timestamp, "server_id": server_id, "server_uuid": server_uuid, "start": gno, "start_ts": timestamp, } if current_range: yield current_range
5,328,526
def bp_symm_func(tensors, sf_spec, rc, cutoff_type): """ Wrapper for building Behler-style symmetry functions""" sf_func = {'G2': G2_SF, 'G3': G3_SF, 'G4': G4_SF} fps = {} for i, sf in enumerate(sf_spec): options = {k: v for k, v in sf.items() if k != "type"} if sf['type'] == 'G3': # Workaround for G3 only options.update({'rc': rc, 'cutoff_type': cutoff_type}) fp, jacob, jacob_ind = sf_func[sf['type']]( tensors, **options) fps['fp_{}'.format(i)] = fp fps['jacob_{}'.format(i)] = jacob fps['jacob_ind_{}'.format(i)] = jacob_ind return fps
5,328,527
def test_lock_error(request, mocker, runner): """ Lock a configuration and check for exceptions when redis fails. """ if not MOCKED: pytest.xfail("need mocking") key = request.node.name with open("pytest.ini", "w") as config: config.write("[pytest]\naddopts = --setup-only") if MOCKED: mocker.patch("cdist.redis.RedisResource.lock", side_effect=cdist.ResourceError()) # push configuration file ret = runner(['lock', key]) assert ret.exception assert ret.exit_code == 1 if MOCKED: cdist.redis.RedisResource.lock.assert_called_with(key)
5,328,528
def create_dummy_window(show_all=True, should_quit=False, fullscreen=False): """ Function to create dummy window which does nothing. :param show_all: True if window should be shown immediately :param should_quit: True if window should quit after user closed it :param fullscreen: True if window should be in full screen mode by default :return: True if window should be in full screen mode by default """ window = Gtk.Window() if show_all: window.show_all() if should_quit: window.connect("delete-event", Gtk.main_quit) if fullscreen: window.fullscreen() return window
5,328,529
def color_RGB_to_hs(iR: float, iG: float, iB: float) -> tuple[float, float]: """Convert an rgb color to its hs representation.""" return color_RGB_to_hsv(iR, iG, iB)[:2]
5,328,530
def remove_linked_attachments(root, atts, ns): """Remove linked attachments and related linkage.""" rdf_about = f"{{{ns['rdf']}}}about" rdf_res = f"{{{ns['rdf']}}}resource" att_ids = set([att.get(rdf_about) for att in atts]) for bib in root.iterfind("bib:*", ns): att_to_remove = [] for att in bib.findall('link:link', ns): att_id = att.get(rdf_res) if att_id in att_ids: att_to_remove.append(att) for att in att_to_remove: # print("DEL:", att.get(rdf_res)) bib.remove(att) for att in atts: # print("DEL:", att.get(rdf_about)) root.remove(att)
5,328,531
def drawFighterPath(time, surface, fighterSprite, path): """Draws the path of the fighter. fighterSprite is the sprite of the fighter. path is the series of tiles the fighter will walk through.""" return
5,328,532
def select_thread(*args): """ select_thread(tid) -> bool Select the given thread as the current debugged thread. All thread related execution functions will work on this thread. The process must be suspended to select a new thread. \sq{Type, Synchronous function - available as request, Notification, none (synchronous function)} @param tid: ID of the thread to select (C++: thid_t) @return: false if the thread doesn't exist. """ return _ida_dbg.select_thread(*args)
5,328,533
def f(): """Redefinition of the function.""" print('second f')
5,328,534
def simulation_aggreation_merge(rankings, baseline, method='od'): """Merge rankings by running simulation of existing rankings. This would first extract relative position of different ranking results, and relative position are considered as simulated games. The game results are sent to another ranker that gives merged ranking result. Parameters ---------- rankings: list of rankings returned by rank of rankers. baseline: (0, +Inf) Since we are using relative position of each game player, one should provide a baseline as the least score a team should obtain in the simulated match. method: {'massey', 'colley', 'keener', 'markov', 'od', 'difference'} The final ranker applied on simulated games. Returns ------- pandas.DataFrame: ['name', 'rating', 'rank'] """ if not isinstance(rankings, list): raise ValueError('rankings should be a list of ranker result.') if not all([isinstance(x, pd.DataFrame) for x in rankings]): raise ValueError('all items in rankings list should be pandas dataframe.') vhost = [] vvisit = [] vhscore = [] vvscore = [] for it in rankings: for i in range(it.shape[0]): for j in range(i+1, it.shape[0]): host = it.loc[i, 'name'] visit = it.loc[j, 'name'] delta = it.loc[j, 'rank'] - it.loc[i, 'rank'] # host wins delta score over visit hscore = baseline if delta<0 else baseline+delta vscore = baseline-delta if delta<0 else baseline vhost.append(host) vvisit.append(visit) vhscore.append(hscore) vvscore.append(vscore) sim = pd.DataFrame(data={ 'host': vhost, 'visit': vvisit, 'hscore': vhscore, 'vscore': vvscore }, columns=['host', 'visit', 'hscore', 'vscore']) data = Table(data=sim, col = [0, 1, 2, 3]) if method=='massey': ranker = MasseyRanker(table=data) return ranker.rank(ascending=False) elif method=='colley': ranker = ColleyRanker(table=data) return ranker.rank(ascending=False) elif method=='keener': ranker = KeenerRanker(table=data) return ranker.rank(ascending=False) elif method=='markov': ranker = MarkovRanker(table=data) return ranker.rank(ascending=False) elif method=='od': ranker = ODRanker(table=data) return ranker.rank(output='summary', ascending=False) elif method=='difference': ranker = DifferenceRanker(table=data) return ranker.rank(ascending=False) else: raise ValueError('method not available. Available methods are: massey, colley, keener, markov, od and difference.')
5,328,535
def maxOverTime(field,makeTimes=0): """Take the max of the values in each time step If makeTimes is true (1) then we return a field mapping all of the times to the average. Else we just return the max """ return GridMath.maxOverTime(field,makeTimes);
5,328,536
async def test_arlo_baby_setup(hass): """Test that an Arlo Baby can be correctly setup in HA.""" accessories = await setup_accessories_from_file(hass, "arlo_baby.json") await setup_test_accessories(hass, accessories) await assert_devices_and_entities_created( hass, DeviceTestInfo( unique_id=HUB_TEST_ACCESSORY_ID, name="ArloBabyA0", model="ABC1000", manufacturer="Netgear, Inc", sw_version="1.10.931", hw_version="", serial_number="00A0000000000", devices=[], entities=[ EntityTestInfo( entity_id="camera.arlobabya0", unique_id="homekit-00A0000000000-aid:1", friendly_name="ArloBabyA0", state="idle", ), EntityTestInfo( entity_id="binary_sensor.arlobabya0", unique_id="homekit-00A0000000000-500", friendly_name="ArloBabyA0", state="off", ), EntityTestInfo( entity_id="sensor.arlobabya0_battery", unique_id="homekit-00A0000000000-700", friendly_name="ArloBabyA0 Battery", unit_of_measurement=PERCENTAGE, state="82", ), EntityTestInfo( entity_id="sensor.arlobabya0_humidity", unique_id="homekit-00A0000000000-900", friendly_name="ArloBabyA0 Humidity", unit_of_measurement=PERCENTAGE, state="60.099998", ), EntityTestInfo( entity_id="sensor.arlobabya0_temperature", unique_id="homekit-00A0000000000-1000", friendly_name="ArloBabyA0 Temperature", unit_of_measurement=TEMP_CELSIUS, state="24.0", ), EntityTestInfo( entity_id="sensor.arlobabya0_air_quality", unique_id="homekit-00A0000000000-aid:1-sid:800-cid:802", capabilities={"state_class": SensorStateClass.MEASUREMENT}, friendly_name="ArloBabyA0 - Air Quality", state="1", ), EntityTestInfo( entity_id="light.arlobabya0", unique_id="homekit-00A0000000000-1100", friendly_name="ArloBabyA0", supported_features=SUPPORT_BRIGHTNESS | SUPPORT_COLOR, capabilities={"supported_color_modes": ["hs"]}, state="off", ), ], ), )
5,328,537
def assemble_docstring(parsed, sig=None): """ Assemble a docstring from an OrderedDict as returned by :meth:`nd.utils.parse_docstring()` Parameters ---------- parsed : OrderedDict A parsed docstring as obtained by ``nd.utils.parse_docstring()``. sig : function signature, optional If provided, the parameters in the docstring will be ordered according to the parameter order in the function signature. Returns ------- str The assembled docstring. """ parsed = parsed.copy() indent = parsed.pop('indent') pad = ' '*indent # Sort 'Parameters' section according to signature if sig is not None and 'Parameters' in parsed: order = tuple(sig.parameters.keys()) def sort_index(p): key = p[0].split(':')[0].strip(' *') if key == '': return 9999 return order.index(key) parsed['Parameters'] = sorted(parsed['Parameters'], key=sort_index) d = [] for k, v in parsed.items(): if isinstance(v[0], list): flat_v = reduce(add, v) else: flat_v = v if k is not None: d.extend(['', pad + k, pad + '-'*len(k)]) d.extend([(pad + l).rstrip() for l in flat_v]) return '\n'.join(d)
5,328,538
def retrieve_unscoped_token(os_auth_url, access_token, protocol="openid"): """Request an unscopped token""" url = get_keystone_url( os_auth_url, "/v3/OS-FEDERATION/identity_providers/egi.eu/protocols/%s/auth" % protocol, ) r = requests.post(url, headers={"Authorization": "Bearer %s" % access_token}) if r.status_code != requests.codes.created: raise RuntimeError("Unable to get an unscoped token") else: return r.headers["X-Subject-Token"]
5,328,539
def load_dataset(csv_path, relative_path): """ Inputs --- csv_path: path to training data csv relative_path: relative path to training data Outputs --- X: Training data numpy array y: Training labels numpy array """ # Read CSV lines lines = [] with open(csv_path) as csvfile: reader = csv.reader(csvfile) print("Loading CSV File ...") for line in tqdm(reader): lines.append(line) images = []; measurements = [] print("Loading Data ...") # Read from CSV lines for line in tqdm(lines): # Center Image image, measurement = _load_image(line, 0, relative_path) images.append(image) measurements.append(measurement) image_flipped = np.fliplr(image) images.append(image_flipped) measurement_flipped = -1 * measurement measurements.append(measurement_flipped) # Left Image image, measurement = _load_image(line, 1, relative_path) images.append(image) measurements.append(measurement) image_flipped = np.fliplr(image) images.append(image_flipped) measurement_flipped = -1 * measurement measurements.append(measurement_flipped) # Right Image image, measurement = _load_image(line, 2, relative_path) images.append(image) measurements.append(measurement) image_flipped = np.fliplr(image) images.append(image_flipped) measurement_flipped = -1 * measurement measurements.append(measurement_flipped) X = np.array(images) y = np.array(measurements) return X, y
5,328,540
def randomly_replace_a_zone() -> rd.RouteDict: """ 读入历史数据,把每条 high quality 的 route 的一个随机的 zone 替换成新 zone :return: rd.RouteDict: 改过的route """ routeDict = rd.loadOrCreate() rng.seed(3) new_zone_id = 'zub_fy' i: int rid: str route: rd.Route for (i, (rid, route)) in enumerate(routeDict.items()): if route.route_score != "High": continue # if i < 55: # continue zone_id2stops: Dict[str, List[rd.Stop]] = {} unique_zone_id_list: List[str] = [] for s in route.stops: if s.isDropoff() and s.zone_id is not None: # if s.zone_id == 'C-13.1J': # print(f'**** {s.idx=}, {s.zone_id=}') if s.zone_id in zone_id2stops: zone_id2stops[s.zone_id].append(s) else: zone_id2stops[s.zone_id] = [s] unique_zone_id_list.append(s.zone_id) zone_idx = rng.randrange(0, len(unique_zone_id_list)) selected_zone_id = unique_zone_id_list[zone_idx] print(f'route_idx {i}: change zone {selected_zone_id} to {new_zone_id}') for s in zone_id2stops[selected_zone_id]: # print(f'---- {s.idx}, zone {s.zone_id} ==> {new_zone_id}') s.zone_id = new_zone_id if hasattr(route, 'zones'): raise RuntimeError if hasattr(route, 'zones_filled'): raise RuntimeError zones, zone_id2zones = route.computeZones() # 用 fill_missing_zone 会把附件没有 zone_id 的加进来 if selected_zone_id in zone_id2zones: raise RuntimeError if not (new_zone_id in zone_id2zones): raise RuntimeError if len(zone_id2zones[new_zone_id].stops) != len(zone_id2stops[selected_zone_id]): # print(f' new_zone stops: {[s.idx for s in zone_id2zones[new_zone_id].stops]}') raise RuntimeError(f'{len(zone_id2zones[new_zone_id].stops)=} != {len(zone_id2stops[selected_zone_id])=}') return routeDict
5,328,541
def check_default_make_help(project): """Check that we have the expected make targets in the default configuration""" res = make(project, 'help') stdout = res.stdout # some versions of make print additional information that we need to strip stdout = re.sub(r'make.*: Entering directory.*\n', '', stdout) stdout = re.sub(r'make.*: Leaving directory.*', '', stdout) stdout = stdout.strip() expected = dedent( r''' help show this help bootstrap verify that tox is available and pre-commit hooks are active clean remove all build, docs, test, and coverage artifacts, as well as tox environments clean-build remove build artifacts clean-tests remove test and coverage artifacts clean-venv remove tox virtual environments clean-docs remove documentation artifacts flake8-check check style with flake8 pylint-check check style with pylint test run tests for all supported Python versions test35 run tests for Python 3.5 test36 run tests for Python 3.6 test37 run tests for Python 3.7 test38 run tests for Python 3.8 test39 run tests for Python 3.9 docs generate Sphinx HTML documentation, including API docs black-check Check all src and test files for complience to "black" code style black Apply 'black' code style to all src and test files isort-check Check all src and test files for correctly sorted imports isort Sort imports in all src and test files coverage generate coverage report in ./htmlcov test-upload package and upload a release to test.pypi.org upload package and upload a release to pypi.org release Create a new version, package and upload it dist builds source and wheel package dist-check Check all dist files for correctness ''' ).strip() assert stdout == expected
5,328,542
def import_loop(schema, mutable, raw_data=None, field_converter=None, trusted_data=None, mapping=None, partial=False, strict=False, init_values=False, apply_defaults=False, convert=True, validate=False, new=False, oo=False, recursive=False, app_data=None, context=None): """ The import loop is designed to take untrusted data and convert it into the native types, as described in ``schema``. It does this by calling ``field_converter`` on every field. Errors are aggregated and returned by throwing a ``ModelConversionError``. :param schema: The Schema to use as source for validation. :param mutable: A mapping or instance that can be changed during validation by Schema functions. :param raw_data: A mapping to be converted into types according to ``schema``. :param field_converter: This function is applied to every field found in ``instance_or_dict``. :param trusted_data: A ``dict``-like structure that may contain already validated data. :param partial: Allow partial data to validate; useful for PATCH requests. Essentially drops the ``required=True`` arguments from field definitions. Default: False :param strict: Complain about unrecognized keys. Default: False :param apply_defaults: Whether to set fields to their default values when not present in input data. :param app_data: An arbitrary container for application-specific data that needs to be available during the conversion. :param context: A ``Context`` object that encapsulates configuration options and ``app_data``. The context object is created upon the initial invocation of ``import_loop`` and is then propagated through the entire process. """ if raw_data is None: raw_data = mutable got_data = raw_data is not None context = Context._make(context) try: context.initialized except: if type(field_converter) is types.FunctionType: field_converter = BasicConverter(field_converter) context._setdefaults({ 'initialized': True, 'field_converter': field_converter, 'trusted_data': trusted_data or {}, 'mapping': mapping or {}, 'partial': partial, 'strict': strict, 'init_values': init_values, 'apply_defaults': apply_defaults, 'convert': convert, 'validate': validate, 'new': new, 'oo': oo, 'recursive': recursive, 'app_data': app_data if app_data is not None else {} }) raw_data = context.field_converter.pre(schema, raw_data, context) _field_converter = context.field_converter _model_mapping = context.mapping.get('model_mapping') data = dict(context.trusted_data) if context.trusted_data else {} errors = {} if got_data and context.validate: errors = _mutate(schema, mutable, raw_data, context) if got_data: # Determine all acceptable field input names all_fields = schema._valid_input_keys if context.mapping: mapped_keys = (set(itertools.chain(*( listify(input_keys) for target_key, input_keys in context.mapping.items() if target_key != 'model_mapping')))) all_fields = all_fields | mapped_keys if context.strict: # Check for rogues if strict is set rogue_fields = set(raw_data) - all_fields if rogue_fields: for field in rogue_fields: errors[field] = 'Rogue field' atoms_filter = None if not context.validate: # optimization: convert without validate doesn't require to touch setters atoms_filter = atom_filter.not_setter for field_name, field, value in atoms(schema, raw_data, filter=atoms_filter): serialized_field_name = field.serialized_name or field_name if got_data and value is Undefined: for key in field.get_input_keys(context.mapping): if key and key != field_name and key in raw_data: value = raw_data[key] break if value is Undefined: if field_name in data: continue if context.apply_defaults: value = field.default if value is Undefined and context.init_values: value = None if got_data: if field.is_compound: if context.trusted_data and context.recursive: td = context.trusted_data.get(field_name) if not all(hasattr(td, attr) for attr in ('keys', '__getitem__')): td = {field_name: td} else: td = {} if _model_mapping: submap = _model_mapping.get(field_name) else: submap = {} field_context = context._branch(trusted_data=td, mapping=submap) else: field_context = context try: value = _field_converter(field, value, field_context) except (FieldError, CompoundError) as exc: errors[serialized_field_name] = exc if context.apply_defaults: value = field.default if value is not Undefined: data[field_name] = value if isinstance(exc, DataError): data[field_name] = exc.partial_data continue if value is Undefined: continue data[field_name] = value if not context.validate: for field_name, field, value in atoms(schema, raw_data, filter=atom_filter.has_setter): data[field_name] = value if errors: raise DataError(errors, data) data = context.field_converter.post(schema, data, context) return data
5,328,543
def test_rectangles(): """Test instantiating Shapes layer with a random 2D rectangles.""" # Test a single four corner rectangle shape = (1, 4, 2) np.random.seed(0) data = 20 * np.random.random(shape) layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all(layer.data[0] == data[0]) assert layer.ndim == shape[2] assert np.all([s == 'rectangle' for s in layer.shape_type]) # Test multiple four corner rectangles shape = (10, 4, 2) data = 20 * np.random.random(shape) layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)]) assert layer.ndim == shape[2] assert np.all([s == 'rectangle' for s in layer.shape_type]) # Test a single two corner rectangle, which gets converted into four # corner rectangle shape = (1, 2, 2) data = 20 * np.random.random(shape) layer = Shapes(data) assert layer.nshapes == 1 assert len(layer.data[0]) == 4 assert layer.ndim == shape[2] assert np.all([s == 'rectangle' for s in layer.shape_type]) # Test multiple two corner rectangles shape = (10, 2, 2) data = 20 * np.random.random(shape) layer = Shapes(data) assert layer.nshapes == shape[0] assert np.all([len(ld) == 4 for ld in layer.data]) assert layer.ndim == shape[2] assert np.all([s == 'rectangle' for s in layer.shape_type])
5,328,544
def print_error(fmt_str, value, error_content): """ send errors to stdout. This displays errors on the screen. :param int fmt_str: a Python `format string <https://docs.python.org/3/library/string.html#formatspec>`_ for the error. Can use arguments **{value}** and **{error_content}** in the format string :param Any value: the value the caused the error :param str error_content: additional information for the error :return: None """ print(fmt_str.format(value=value, error_content=error_content), file=sys.stderr)
5,328,545
def resnet_50(inputs, block_fn=bottleneck_block, is_training_bn=False): """ResNetv50 model with classification layers removed.""" layers = [3, 4, 6, 3] data_format = 'channels_last' inputs = conv2d_fixed_padding( inputs=inputs, filters=64, kernel_size=7, strides=2, data_format=data_format) inputs = tf.identity(inputs, 'initial_conv') inputs = batch_norm_relu(inputs, is_training_bn, data_format=data_format) inputs = tf.layers.max_pooling2d( inputs=inputs, pool_size=3, strides=2, padding='SAME', data_format=data_format) inputs = tf.identity(inputs, 'initial_max_pool') inputs = block_group( inputs=inputs, filters=64, blocks=layers[0], strides=1, block_fn=block_fn, is_training_bn=is_training_bn, name='block_group1', data_format=data_format) c3 = block_group( inputs=inputs, filters=128, blocks=layers[1], strides=2, block_fn=block_fn, is_training_bn=is_training_bn, name='block_group2', data_format=data_format) c4 = block_group( inputs=c3, filters=256, blocks=layers[2], strides=2, block_fn=block_fn, is_training_bn=is_training_bn, name='block_group3', data_format=data_format) c5 = block_group( inputs=c4, filters=512, blocks=layers[3], strides=2, block_fn=block_fn, is_training_bn=is_training_bn, name='block_group4', data_format=data_format) return c3, c4, c5
5,328,546
def _sanitize_index_element(ind): """Sanitize a one-element index.""" if isinstance(ind, Number): ind2 = int(ind) if ind2 != ind: raise IndexError(f"Bad index. Must be integer-like: {ind}") else: return ind2 elif ind is None: return None else: raise TypeError("Invalid index type", type(ind), ind)
5,328,547
def overlap_click(original, click_position, sr=44100, click_freq=2000, click_duration=0.5): """ :param click_position: Notice that position should be given in second :return: wave """ cwave = librosa.clicks(np.array(click_position), sr=44100, click_freq=4000, click_duration=0.05) / 2 original, wave = mono_pad_or_truncate(original, cwave) return standardize(original + wave)
5,328,548
def vigenere(plaintext: str, *, key: str) -> str: """Vigenère cipher (page 48) - `plaintext` is the message to be encrypted - `key` defines the series of interwoven Caesar ciphers to be used """ plaintext = validate_plaintext(plaintext) key = validate_key(key) cycled_cipher_alphabet = itertools.cycle( _shifted_alphabet(ord(c) - 65) for c in key ) seq = [next(cycled_cipher_alphabet)[ord(c) - 97] for c in plaintext] return "".join(seq)
5,328,549
def generateVtTick(row, symbol): """生成K线""" tick = VtTickData() tick.symbol = symbol tick.vtSymbol = symbol tick.lastPrice = row['last'] tick.volume = row['volume'] tick.openInterest = row['open_interest'] tick.datetime = row.name tick.openPrice = row['open'] tick.highPrice = row['high'] tick.lowPrice = row['low'] tick.preClosePrice = row['prev_close'] tick.upperLimit = row['limit_up'] tick.lowerLimit = row['limit_down'] tick.bidPrice1 = row['b1'] tick.bidPrice2 = row['b2'] tick.bidPrice3 = row['b3'] tick.bidPrice4 = row['b4'] tick.bidPrice5 = row['b5'] tick.bidVolume1 = row['b1_v'] tick.bidVolume2 = row['b2_v'] tick.bidVolume3 = row['b3_v'] tick.bidVolume4 = row['b4_v'] tick.bidVolume5 = row['b5_v'] tick.askPrice1 = row['a1'] tick.askPrice2 = row['a2'] tick.askPrice3 = row['a3'] tick.askPrice4 = row['a4'] tick.askPrice5 = row['a5'] tick.askVolume1 = row['a1_v'] tick.askVolume2 = row['a2_v'] tick.askVolume3 = row['a3_v'] tick.askVolume4 = row['a4_v'] tick.askVolume5 = row['a5_v'] return tick
5,328,550
def hartigan_map_mutations(tree, genotypes, alleles, ancestral_state=None): """ Returns a Hartigan parsimony reconstruction for the specified set of genotypes. The reconstruction is specified by returning the ancestral state and a list of mutations on the tree. Each mutation is a (node, parent, state) triple, where node is the node over which the transition occurs, the parent is the index of the parent transition above it on the tree (or -1 if there is none) and state is the new state. """ # The python version of map_mutations allows the ancestral_state to be a string # from the alleles list, so we implement this at the top of this function although # it doesn't need to be in the C equivalent of this function if isinstance(ancestral_state, str): ancestral_state = alleles.index(ancestral_state) # equivalent C implementation can start here genotypes = np.array(genotypes) not_missing = genotypes != -1 if np.sum(not_missing) == 0: raise ValueError("Must have at least one non-missing genotype") num_alleles = np.max(genotypes[not_missing]) + 1 if ancestral_state is not None: if ancestral_state < 0 or ancestral_state >= len(alleles): raise ValueError("ancestral_state must be a number from 0..(num_alleles-1)") if ancestral_state >= num_alleles: num_alleles = ancestral_state + 1 num_nodes = tree.tree_sequence.num_nodes # use a numpy array of 0/1 values to represent the set of states # to make the code as similar as possible to the C implementation. optimal_set = np.zeros((num_nodes + 1, num_alleles), dtype=np.int8) for allele, u in zip(genotypes, tree.tree_sequence.samples()): if allele != -1: optimal_set[u, allele] = 1 else: optimal_set[u] = 1 allele_count = np.zeros(num_alleles, dtype=int) for u in tree.nodes(tree.virtual_root, order="postorder"): allele_count[:] = 0 for v in tree.children(u): for j in range(num_alleles): allele_count[j] += optimal_set[v, j] if not tree.is_sample(u): max_allele_count = np.max(allele_count) optimal_set[u, allele_count == max_allele_count] = 1 if ancestral_state is None: ancestral_state = np.argmax(optimal_set[tree.virtual_root]) else: optimal_set[tree.virtual_root] = 1 @dataclasses.dataclass class StackElement: node: int state: int mutation_parent: int mutations = [] stack = [StackElement(tree.virtual_root, ancestral_state, -1)] while len(stack) > 0: s = stack.pop() if optimal_set[s.node, s.state] == 0: s.state = np.argmax(optimal_set[s.node]) mutation = tskit.Mutation( node=s.node, derived_state=alleles[s.state], parent=s.mutation_parent, ) s.mutation_parent = len(mutations) mutations.append(mutation) for v in tree.children(s.node): stack.append(StackElement(v, s.state, s.mutation_parent)) return alleles[ancestral_state], mutations
5,328,551
def padArray(ori_array, pad_size): """ Pads out an array to a large size. ori_array - A 2D numpy array. pad_size - The number of elements to add to each of the "sides" of the array. The padded 2D numpy array. """ if (pad_size > 0): [x_size, y_size] = ori_array.shape lg_array = numpy.ones((x_size+2*pad_size,y_size+2*pad_size)) lg_array[pad_size:(x_size+pad_size),pad_size:(y_size+pad_size)] = ori_array.astype(numpy.float64) lg_array[0:pad_size,:] = numpy.flipud(lg_array[pad_size:2*pad_size,:]) lg_array[(x_size+pad_size):(x_size+2*pad_size),:] = numpy.flipud(lg_array[x_size:(x_size+pad_size),:]) lg_array[:,0:pad_size] = numpy.fliplr(lg_array[:,pad_size:2*pad_size]) lg_array[:,(y_size+pad_size):(y_size+2*pad_size)] = numpy.fliplr(lg_array[:,y_size:(y_size+pad_size)]) return lg_array else: return ori_array
5,328,552
def finish_subprocess(proc, cmdline, cmd_input=None, ok_exit_codes=None): """Ensure that the process returned a zero exit code indicating success""" if ok_exit_codes is None: ok_exit_codes = [0] out, err = proc.communicate(cmd_input) ret = proc.returncode if ret not in ok_exit_codes: LOG.error("Command '%(cmdline)s' with process id '%(pid)s' expected " "return code in '%(ok)s' but got '%(rc)s': %(err)s", {'cmdline': cmdline, 'pid': proc.pid, 'ok': ok_exit_codes, 'rc': ret, 'err': err}) raise SubprocessException(' '.join(cmdline), ret, out, err) return out
5,328,553
def _gmapsusage(filename): """ Construct a usage string. """ usage = "{} <mapps_image_dump_file> [path_to_mapps_config_file]".format( filename) # Print usage string. print("\n[Usage]: python {}\n".format(usage))
5,328,554
def browse(): """ A simple browser that doesn't deal with queries at all """ page = int(request.args.get("page", 1)) includeHistory = request.args.get("includeHistory", False) results_per_page, search_offset = results_offset(page) searchIndex = "history" if includeHistory else "latest" count, hostdata = current_app.elastic.search( results_per_page, search_offset, searchIndex=searchIndex ) totalHosts = current_app.elastic.total_hosts() if includeHistory: next_url, prev_url = build_pagination_urls( "main.browse", page, count, includeHistory=includeHistory ) else: # By using the if/else we can avoid putting includeHistory=False into the url that gets constructed next_url, prev_url = build_pagination_urls("main.browse", page, count) return render_template( "main/browse.html", numresults=count, totalHosts=totalHosts, page=page, hosts=hostdata, next_url=next_url, prev_url=prev_url, )
5,328,555
def argmax(pda: pdarray) -> np.int64: """ Return the index of the first occurrence of the array max value. Parameters ---------- pda : pdarray Values for which to calculate the argmax Returns ------- np.int64 The index of the argmax calculated from the pda Raises ------ TypeError Raised if pda is not a pdarray instance RuntimeError Raised if there's a server-side error thrown """ repMsg = generic_msg(cmd="reduction", args="{} {}".format("argmax", pda.name)) return parse_single_value(cast(str, repMsg))
5,328,556
def log_cumsum(probs, dim=1, eps=1e-8): """Calculate log of inclusive cumsum.""" return torch.log(torch.cumsum(probs, dim=dim) + eps)
5,328,557
def iou_coe_Slice_by_Slice(output, target, threshold=0.5, axis=(2, 3,4), smooth=1e-5): """Non-differentiable Intersection over Union (IoU) for comparing the similarity """ pre = tf.cast(output > threshold, dtype=tf.float32) truth = tf.cast(target > threshold, dtype=tf.float32) inse = tf.reduce_sum(tf.multiply(pre, truth), axis=axis) # AND union = tf.reduce_sum(tf.cast(tf.add(pre, truth) >= 1, dtype=tf.float32), axis=axis) # OR # old axis=[0,1,2,3] # epsilon = 1e-5 # batch_iou = inse / (union + epsilon) # new haodong batch_iou = (inse + smooth) / (union + smooth) iou = tf.reduce_mean(batch_iou,axis=0, name='iou_coe') return iou
5,328,558
def simulation(num_simulations: int, st: np.array(), ct: np.array(), pt: np.array(), st_ind: np.array()): """ Compute Monte Carlo simulation to generate stock paths for basket option pricing. Here we are computing both the call option and the put option. Args: int: number of simulations num_simulations np.array(): numpy array st to store simulated stock prices np.array(): numpy array ct to store simulated call option prices np.array(): numpy array pt to store simulated put option prices np.array(): two dimensional numpy array to store simulated stock price paths for each individual stock Returns: None """ for i in prange(num_simulations): # Compute current spot stock price. stock_prices = spot_prices * np.exp((0.026 - 0.5 * sigmas ** 2) * T + cor_random_nums[i] * sigmas * np.sqrt(T)) st_ind[i,:] = stock_prices st_basket = np.dot(stock_prices, W) st[i] = st_basket # Compute call price. ct[i] = max(st_basket - strike, 0) # Compute put price. pt[i] = max(strike - st_basket, 0)
5,328,559
def main_timing_test_blackbox(datapoints, model: CompressionModel, bbstream: BlackBoxBitstream): """ Timing test for the black box algorithm """ enc_times = [] for single_x_raw in tqdm(datapoints, desc='encoding (black box)'): init_bits = len(bbstream) tstart = time.time() bbstream.encode(single_x_raw) enc_times.append(time.time() - tstart) print('expected {expected:.05f} actual {actual:.05f} enc_time {enc_time:.05f}'.format( expected=model.forward(single_x_raw[None].to(dtype=torch.float64))['total_logd'][0].item() / ( -math.log(2.) * int(np.prod(single_x_raw.shape))), actual=(len(bbstream) - init_bits) / int(np.prod(single_x_raw.shape)), enc_time=enc_times[-1] )) dec_times = [] decoded_datapoints = [] for _ in tqdm(reversed(datapoints), desc='decoding (black box)'): tstart = time.time() decoded_datapoints.append(bbstream.decode().cpu()) dec_times.append(time.time() - tstart) print('enc: {} +/- {} sec'.format(np.mean(enc_times[1:]), np.std(enc_times[1:]))) print('dec: {} +/- {} sec'.format(np.mean(dec_times[1:]), np.std(dec_times[1:]))) assert torch.allclose(datapoints.cpu(), torch.stack(decoded_datapoints[::-1], 0))
5,328,560
def plot_stat(data_source, fig_location=None, show_figure=False): """ Processes input data and plots the bar graph displaying number of accidents :param data_source: Data used to plot the graph :param fig_location: Folder where graph will be saved :param show_figure: If set to True shows graph :return: """ # Check is data was given if not data_source: print("Missing data to plot", file=sys.stderr) # Getting needed information from data data = process_data(data_source) num_years = len(data) data = dict(sorted(data.items())) # Creating the graphs fig = plt.figure(figsize=(8.27, 11.69)) plt.subplots_adjust(hspace=0.8) sub_plot_id = 1 # Find yliem max_y = find_ylim(data) for y in data: plt.subplot(num_years, 1, sub_plot_id) X = [2 * y for y in np.arange(len(data[y]))] plt.ylim(top=max_y) plt.yticks([y for y in range(0, max_y, max_y // 5)]) plt.xlabel("Kraje") plt.ylabel("Počet nehôd") plt.title(y) plt.xticks(X, data[y].keys()) vals = [y[0] for y in data[y].values()] plt.bar(X, vals, color=(0.0, 0.63, 0.92, 0.4)) bar_plot = plt.bar(X, vals, width=1.4) for idx, rect in enumerate(bar_plot): plt.text(rect.get_x() + rect.get_width() / 2., 0, vals[idx], fontdict={"size": "7"}, ha='center', va='bottom', rotation=0) ranks = [y[1] for y in data[y].values()] for idx, rect in enumerate(bar_plot): height = rect.get_height() plt.text(rect.get_x() + rect.get_width() / 2., height, ranks[idx], fontdict={"size": "7"}, ha='center', va='bottom', rotation=0) sub_plot_id += 1 # Saving and/or showing figure if fig_location: folder = fig_location[0:fig_location.rfind('/')] check_folder(folder) plt.savefig(fig_location) if show_figure: plt.show()
5,328,561
def fetch_step(step_key, stdio_url, parse_gtest, parse_suppression): """Fetches data about a single build step.""" step = BuildStep.get(step_key) if step.is_fetched: return step.fetch_timestamp = datetime.datetime.now() step.put() try: stdio_response = urlfetch.fetch(stdio_url, deadline=URLFETCH_DEADLINE) except urlfetch.ResponseTooLargeError: # Workaround http://code.google.com/p/googleappengine/issues/detail?id=5686 step.is_fetched = True step.is_too_large = True step.put() return if not stdio_response or stdio_response.status_code != 200: return gs_filename = '/chromium-build-logs/logs/%d/%d/%s' % ( step.fetch_timestamp.year, step.fetch_timestamp.month, str(step_key)) with cloudstorage.open(gs_filename, 'w', content_type='text/html') as gs_file: gs_file.write(stdio_response.content) def tx_step(): step = BuildStep.get(step_key) if step.is_fetched: return step.log_gs = gs_filename step.is_fetched = True step.put() if parse_gtest and step.status in PARSEABLE_STATUSES: deferred.defer(insert_gtest_results, step.key(), _transactional=True, _queue='slow') if parse_suppression and step.status in PARSEABLE_STATUSES: deferred.defer(reparse_suppression_results, step.key(), step.step_name, _transactional=True, _queue='slow') db.run_in_transaction_custom_retries(10, tx_step)
5,328,562
def load_ipython_extension(ipython: 'IPython.InteractiveShell') -> None: """Initialize magics commands and initialize the ``abcjs`` library ``abcjs`` is initialized by loading the Javascript library and assigning it to the window global ``ABCJS``. :param ipython: The active IPython instance """ from IPython.core.display import display, HTML from jupyter_abc.magics import JupyterAbcMagics ipython.register_magics(JupyterAbcMagics) init_script = INIT_JAVASCRIPT_TEMPLATE.substitute( requirejs_config=get_requirejs_configuration()) display(HTML(init_script))
5,328,563
def pprint_int_dict(int_dict, indent=4, descending=False): """Prints the given dict with int values in a nice way. Parameters ---------- int_dict : list A dict object mapping each key to an int value. """ sorted_tup = sorted(int_dict.items(), key=lambda x: x[1]) if descending: sorted_tup.reverse() print('{') for tup in sorted_tup: print('{}{}: {}'.format(' '*indent, tup[0], tup[1])) print('}')
5,328,564
def splitTrainTestDataList(list_data, test_fraction=0.2, sample_size=None, replace=False, seed=None): """ Split a list of data into train and test data based on given test fraction. Each data in the list should have row for sample index, don't care about other axes. :param list_data: List of data to sample and train-test split. If only one data provided, then it doesn't have to be in a list. :type list_data: list/tuple(np.ndarray[n_samples, _]) or np.ndarray[n_samples, _] :param test_fraction: Fraction of data to use for test. :type test_fraction: float [0-1], optional (default=0.2) :param sample_size: Number of samples if doing sampling. If None, then no sampling is done and all samples are used. :type sample_size: int or None, optional (default=None) :param replace: Whether to draw samples with replacement. :type replace: bool, optional (default=False) :param seed: Whether to use seed for reproducibility. If None, then seed is not provided. :type seed: int or None, optional (default=None) :return: (Sampled) list of train data and list of test data :rtype: list(np.ndarray[n_samples_train, _]), list(np.ndarray[n_samples_test, _]) """ # Ensure list if isinstance(list_data, np.ndarray): list_data = [list_data] elif isinstance(list_data, tuple): list_data = list(list_data) # If sample_size is None, use all samples # Otherwise use provided sample_size but limit it sample_size = len(list_data[0]) if sample_size is None else int(min(sample_size, len(list_data[0]))) # Indices of the samples randomized np.random.seed(seed) rand_indices = np.random.choice(np.arange(len(list_data[0])), len(list_data[0]), replace=replace) # Train and test data sample size taking into account of sampling train_size = int(sample_size*(1. - test_fraction)) test_size = sample_size - train_size # Indices of train and test data after randomization and sampling indices_train, indices_test = rand_indices[:train_size], rand_indices[train_size:sample_size] # Go through all provided data list_data_train, list_data_test = list_data.copy(), list_data.copy() for i in range(len(list_data)): # Pick up samples for train and test respectively from index lists prepared earlier list_data_train[i] = list_data[i][indices_train] list_data_test[i] = list_data[i][indices_test] return list_data_train, list_data_test
5,328,565
def create_hyperbounds(hyperparameters): """ Gets the bounds of each hyperspace for sampling. Parameters ---------- * `hyperparameters` [list, shape=(n_hyperparameters,)] Returns ------- * `hyperspace_bounds` [list of lists, shape(n_spaces, n_hyperparameters)] - All combinations of hyperspace bounds. - Matches the bounds in hyerspaces from create_hyperspace. """ hparams_low = [] hparams_high = [] for hparam in hyperparameters: low, high = check_hyperbounds(hparam) hparams_low.append(low) hparams_high.append(high) all_spaces = fold_spaces(hparams_low, hparams_high) hyperspace_bounds = [] for space in all_spaces: hyperspace_bounds.append(space) return hyperspace_bounds
5,328,566
def race_from_string(str): """Convert race to one of ['white', 'black', None].""" race_dict = { "White/Caucasian": 'white', "Black/African American": 'black', "Unknown": None, "": None } return race_dict.get(str, 'other')
5,328,567
def _handling_alias_parameters(lgbm_params): # type: (Dict[str, Any]) -> None """Handling alias parameters.""" for alias_group in _ALIAS_GROUP_LIST: param_name = alias_group["param_name"] alias_names = alias_group["alias_names"] for alias_name in alias_names: if alias_name in lgbm_params: lgbm_params[param_name] = lgbm_params[alias_name] del lgbm_params[alias_name]
5,328,568
def test_client_disarm(server, client): """Should call the API and disarm the system.""" html = """[ { "Poller": {"Poller": 1, "Panel": 1}, "CommandId": 5, "Successful": true } ]""" server.add( responses.POST, "https://example.com/api/panel/syncSendCommand", body=html, status=200, ) client._session_id = "test" client._lock.acquire() assert client.disarm() is True assert len(server.calls) == 1 body = server.calls[0].request.body.split("&") assert "CommandType=2" in body assert "ElementsClass=1" in body assert "ElementsIndexes=1" in body assert "sessionId=test" in body
5,328,569
def test_wrap_coordinates(coords, origin, wgs84): """ Test whether coordinates wrap around the antimeridian in wgs84 """ lon_under_minus_170 = False lon_over_plus_170 = False if isinstance(coords[0], list): for c in coords[0]: c = list(transform(origin, wgs84, *c)) if c[0] < -170: lon_under_minus_170 = True elif c[0] > 170: lon_over_plus_170 = True else: return False return lon_under_minus_170 and lon_over_plus_170
5,328,570
def filter_dfg_contain_activity(dfg0, start_activities0, end_activities0, activities_count0, activity, parameters=None): """ Filters the DFG keeping only nodes that can reach / are reachable from activity Parameters --------------- dfg0 Directly-follows graph start_activities0 Start activities end_activities0 End activities activities_count0 Activities count activity Activity that should be reachable / should reach all the nodes of the filtered graph parameters Parameters Returns --------------- dfg Filtered DFG start_activities Filtered start activities end_activities Filtered end activities activities_count Filtered activities count """ if parameters is None: parameters = {} # since the dictionaries/sets are modified, a deepcopy is the best option to ensure data integrity dfg = deepcopy(dfg0) start_activities = deepcopy(start_activities0) end_activities = deepcopy(end_activities0) activities_count = deepcopy(activities_count0) changed = True while changed: changed = False predecessors = dfg_utils.get_predecessors(dfg, activities_count) successors = dfg_utils.get_successors(dfg, activities_count) predecessors_act = predecessors[activity].union({activity}) successors_act = successors[activity].union({activity}) start_activities1 = {x: y for x, y in start_activities.items() if x in predecessors_act} end_activities1 = {x: y for x, y in end_activities.items() if x in successors_act} if start_activities != start_activities1 or end_activities != end_activities1: changed = True start_activities = start_activities1 end_activities = end_activities1 reachable_nodes = predecessors_act.union(successors_act) if reachable_nodes != set(activities_count.keys()): changed = True activities_count = {x: y for x, y in activities_count.items() if x in reachable_nodes} dfg = {x: y for x, y in dfg.items() if x[0] in reachable_nodes and x[1] in reachable_nodes} return dfg, start_activities, end_activities, activities_count
5,328,571
def test_sigma_dut_suite_b_rsa(dev, apdev, params): """sigma_dut controlled STA Suite B (RSA)""" check_suite_b_192_capa(dev) logdir = params['logdir'] with open("auth_serv/rsa3072-ca.pem", "r") as f: with open(os.path.join(logdir, "suite_b_ca_rsa.pem"), "w") as f2: f2.write(f.read()) with open("auth_serv/rsa3072-user.pem", "r") as f: with open("auth_serv/rsa3072-user.key", "r") as f2: with open(os.path.join(logdir, "suite_b_rsa.pem"), "w") as f3: f3.write(f.read()) f3.write(f2.read()) dev[0].flush_scan_cache() params = suite_b_192_rsa_ap_params() hapd = hostapd.add_ap(apdev[0], params) ifname = dev[0].ifname sigma = start_sigma_dut(ifname, cert_path=logdir) cmd = "sta_set_security,type,eaptls,interface,%s,ssid,%s,PairwiseCipher,AES-GCMP-256,GroupCipher,AES-GCMP-256,GroupMgntCipher,BIP-GMAC-256,keymgmttype,SuiteB,PMF,Required,clientCertificate,suite_b_rsa.pem,trustedRootCA,suite_b_ca_rsa.pem,CertType,RSA" % (ifname, "test-suite-b") tests = [ "", ",TLSCipher,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", ",TLSCipher,TLS_DHE_RSA_WITH_AES_256_GCM_SHA384" ] for extra in tests: sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,PMF" % ifname) sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname) sigma_dut_cmd_check(cmd + extra) sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-suite-b")) sigma_dut_wait_connected(ifname) sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname) sigma_dut_cmd_check("sta_disconnect,interface," + ifname) sigma_dut_cmd_check("sta_reset_default,interface," + ifname) stop_sigma_dut(sigma)
5,328,572
def get_header_keys(files_dict,keys): """ Save important keywords from pre-swarp input files headers. """ for i in files_dict.keys(): cont = 1 with open('%s.hdr'%i,'w') as out_hdr: for _file in files_dict[i]: output = sp.check_output("dfits {} | egrep '{}' ".format(_file,'|'.join(keys)),shell=True).decode('UTF-8') output = reduce(lambda s, kv: s.replace(kv,kv+'00{}'.format(cont),),keys,output) out_hdr.write(output) cont+=1 out_hdr.close()
5,328,573
def generate_secret_key(length=16): """ Generates a key of the given length. :param length: Length of the key to generate, in bytes. :type length: :class:`int` :returns: :class:`str` -- The generated key, in byte string. """ return get_random_bytes(length)
5,328,574
def fetch_data(ctx, remote, startpoint, aset, nbytes, all_): """Get data from REMOTE referenced by STARTPOINT (short-commit or branch). The default behavior is to only download a single commit's data or the HEAD commit of a branch. Please review optional arguments for other behaviors """ from hangar.records.heads import get_branch_head_commit, get_staging_branch_head from hangar.utils import parse_bytes P = os.getcwd() repo = Repository(path=P) if startpoint is None: branch = get_staging_branch_head(repo._env.branchenv) commit = get_branch_head_commit(repo._env.branchenv, branch) click.echo(f'No startpoint supplied, fetching data of HEAD: {commit} for BRANCH: {branch}') elif startpoint in repo.list_branches(): commit = get_branch_head_commit(repo._env.branchenv, startpoint) click.echo(f'Fetching data for HEAD: {commit} of STARTPOINT BRANCH: {startpoint}') else: commit = expand_short_commit_digest(repo._env.refenv, startpoint) click.echo(f'Fetching data for STARTPOINT HEAD: {commit}') click.echo(f'aset argument: {aset}') try: max_nbytes = parse_bytes(nbytes) click.echo(f'nbytes argument: {max_nbytes}') except AttributeError: max_nbytes = None if len(aset) == 0: aset = None commits = repo.remote.fetch_data(remote=remote, commit=commit, arrayset_names=aset, max_num_bytes=max_nbytes, retrieve_all_history=all_) click.echo(f'completed data for commits: {commits}')
5,328,575
def nouveau_flux(title: str, link: str, description: str) -> parse: """ Crée un nouveau flux RSS. Parameters ---------- title : str Titre du flux RSS. link : str Lien vers le flux RSS. description : str Description générale du contenu. Returns ------- parse Arbre XML (ElementTree). """ flux = rss.RSS2(title, link, description, pubDate=Dt.now()) # rss.RSS2.write_xml ne retourne pas de valeur, il fait juste écrire dans # un fichier ou similaire f = io.StringIO() flux.write_xml(f) f.seek(0) return parse(f)
5,328,576
def benchmark(repo, runscript, wrapcmd, tag): """Run benchmark build.""" f = "bench.%s.%s.sh" % (repo, tag) u.verbose(1, "... running %s" % f) if os.path.exists(f): rmfile(f) try: with open(f, "w") as wf: wf.write("#!/bin/sh\n") wf.write("set -x\n") if flag_gomaxprocs: wf.write("export GOMAXPROCS=%s\n" % flag_gomaxprocs) wf.write("export LD_LIBRARY_PATH=%s/lib64\n" % gccgo_install) wf.write("go clean -cache\n") wf.write("cd %s/src/cmd/compile\n" % repo) wf.write("%s sh %s\n" % (wrapcmd, runscript)) wf.write("if [ $? != 0 ]; then\n") wf.write(" echo '*** FAIL ***'\n") wf.write(" exit 1\n") wf.write("fi\n") wf.write("exit 0\n") except IOError: u.error("unable to open %s for writing" % f) outfile = "err.benchrun.%s.%s.txt" % (repo, tag) docmderrout("sh %s" % f, outfile)
5,328,577
def select_all_rows_from_table(context, table): """Select number of all rows from given table.""" cursor = context.connection.cursor() try: cursor.execute("SELECT count(*) as cnt from {}".format(table)) results = cursor.fetchone() assert len(results) == 1, "Wrong number of records returned: {}".format(len(results)) context.query_count = results[0] except Exception as e: raise e
5,328,578
def get_open_strain_data( name, start_time, end_time, outdir, cache=False, buffer_time=0, **kwargs): """ A function which accesses the open strain data This uses `gwpy` to download the open data and then saves a cached copy for later use Parameters ---------- name: str The name of the detector to get data for start_time, end_time: float The GPS time of the start and end of the data outdir: str The output directory to place data in cache: bool If true, cache the data buffer_time: float Time to add to the begining and end of the segment. **kwargs: Passed to `gwpy.timeseries.TimeSeries.fetch_open_data` Returns ------- strain: gwpy.timeseries.TimeSeries The object containing the strain data. If the connection to the open-data server fails, this function retruns `None`. """ filename = '{}/{}_{}_{}.txt'.format(outdir, name, start_time, end_time) if buffer_time < 0: raise ValueError("buffer_time < 0") start_time = start_time - buffer_time end_time = end_time + buffer_time if os.path.isfile(filename) and cache: logger.info('Using cached data from {}'.format(filename)) strain = TimeSeries.read(filename) else: logger.info('Fetching open data from {} to {} with buffer time {}' .format(start_time, end_time, buffer_time)) try: strain = TimeSeries.fetch_open_data(name, start_time, end_time, **kwargs) logger.info('Saving cache of data to {}'.format(filename)) strain.write(filename) except Exception as e: logger.info("Unable to fetch open data, see debug for detailed info") logger.info("Call to gwpy.timeseries.TimeSeries.fetch_open_data returned {}" .format(e)) strain = None return strain
5,328,579
def extract_text(path): """ Extract the text of all txt files in the path, and store it in a dictionary. Return the dic with the structure: key : 'filename' (without .txt extension and without the page number) value : 'text' """ from tqdm import tqdm files_to_search = os.path.join(path,'*.txt') data_dic = {}# key:filename',value:'text' data_index = {} # Assuming the file to be made of one digit page number appended to the name like this: file.1.txt: set_of_text_files = set([remove_extensions(item) for item in glob.glob(files_to_search)]) nb_of_texts = len(set_of_text_files) # progress bar to display pbar = tqdm(total=nb_of_texts) for idx,file in enumerate(set_of_text_files): pbar.update(1) nb_of_pages = find_nb_of_pages(file) full_text,error_code = singlepdf_extract_text(file,nb_of_pages) path,fname = os.path.split(file) data_dic[fname] = {} data_dic[fname]['text'] = full_text data_dic[fname]['error'] = error_code data_dic[fname]['id'] = idx data_index[idx] = fname pbar.close() return data_dic,data_index
5,328,580
def extract_app_name_key(): """ Extracts the application name redis key and hash from the request The key should be of format: <metrics_prefix>:<metrics_application>:<ip>:<rounded_date_time_format> ie: "API_METRICS:applications:192.168.0.1:2020/08/04:14" The hash should be of format: <app_name> ie: "audius_dapp" """ application_name = request.args.get(app_name_param, type=str, default=None) ip = request.headers.get('X-Forwarded-For', request.remote_addr) date_time = get_rounded_date_time().strftime(datetime_format) application_key = f"{metrics_prefix}:{metrics_application}:{ip}:{date_time}" return (application_key, application_name)
5,328,581
def load(filename, fs, duration, flipud = True, display=False, **kwargs): """ Load an image from a file or an URL Parameters ---------- filename : string Image file name, e.g. ``test.jpg`` or URL. fs : scalar Sampling frequency of the audiogram (in Hz) duration : scalar Duration of the audiogram (in s) flipud : boolean, optional, default is True Vertical flip of the matrix (image) display : boolean, optional, default is False if True, display the image **kwargs, optional. This parameter is used by plt.plot figsize : tuple of integers, optional, default: (4,10) width, height in inches. title : string, optional, default : 'Spectrogram' title of the figure xlabel : string, optional, default : 'Time [s]' label of the horizontal axis ylabel : string, optional, default : 'Amplitude [AU]' label of the vertical axis cmap : string or Colormap object, optional, default is 'gray' See https://matplotlib.org/examples/color/colormaps_reference.html in order to get all the existing colormaps examples: 'hsv', 'hot', 'bone', 'tab20c', 'jet', 'seismic', 'viridis'... vmin, vmax : scalar, optional, default: None `vmin` and `vmax` are used in conjunction with norm to normalize luminance data. Note if you pass a `norm` instance, your settings for `vmin` and `vmax` will be ignored. ext : scalars (left, right, bottom, top), optional, default: None The location, in data-coordinates, of the lower-left and upper-right corners. If `None`, the image is positioned such that the pixel centers fall on zero-based (row, column) indices. dpi : integer, optional, default is 96 Dot per inch. For printed version, choose high dpi (i.e. dpi=300) => slow For screen version, choose low dpi (i.e. dpi=96) => fast format : string, optional, default is 'png' Format to save the figure ... and more, see matplotlib Returns ------- im : ndarray The different color bands/channels are stored in the third dimension, such that a gray-image is MxN, an RGB-image MxNx3 and an RGBA-image MxNx4. ext : list of scalars [left, right, bottom, top], optional, default: None The location, in data-coordinates, of the lower-left and upper-right corners. If `None`, the image is positioned such that the pixel centers fall on zero-based (row, column) indices. dt : scalar Time resolution of the spectrogram (horizontal x-axis) df : scalar Frequency resolution of the spectrogram (vertical y-axis) """ print(72 * '_' ) print("loading %s..." %filename) # Load image im = imread(filename, as_gray=True) # if 3D, convert into 2D if len(im.shape) == 3: im = im[:,:,0] # Rescale the image between 0 to 1 im = linear_scale(im, minval= 0.0, maxval=1.0) # Get the resolution df = fs/(im.shape[0]-1) dt = duration/(im.shape[1]-1) # Extent ext = [0, duration, 0, fs/2] # flip the image vertically if flipud: im = np.flip(im, 0) # Display if display : ylabel =kwargs.pop('ylabel','Frequency [Hz]') xlabel =kwargs.pop('xlabel','Time [sec]') title =kwargs.pop('title','loaded spectrogram') cmap =kwargs.pop('cmap','gray') figsize=kwargs.pop('figsize',(4, 13)) vmin=kwargs.pop('vmin',0) vmax=kwargs.pop('vmax',1) _, fig = plot2D (im, extent=ext, figsize=figsize,title=title, ylabel = ylabel, xlabel = xlabel,vmin=vmin, vmax=vmax, cmap=cmap, **kwargs) return im, ext, dt, df
5,328,582
def get_status(lib, device_id): """ A function of reading status information from the device You can use this function to get basic information about the device status. :param lib: structure for accessing the functionality of the libximc library. :param device_id: device id. """ x_status = status_t() result = lib.get_status(device_id, byref(x_status)) if result == Result.Ok: return x_status else: return None
5,328,583
def query_by_image_objects(image_path, weights_path, cfg_path, names_path, confidence_threshold=0.5, save=False): """Processes user-uploaded image to retrieve similar images from database. First, all the objects in the image are detected using the :method: ``rubrix.images.detect.detect_objects``. Next, the image descriptor array for the user-uploaded image is compared with that of all pruned images so as to retrieve the top-5 results. Arguments: ---------- image_path (numpy.ndarray): Path for user-uploaded image, for reverse-image search. weights_path (pathlib.Path): Path to YOLOv4 pretrained weights file. cfg_path (pathlib.Path): Path to darknet configuration file. names_path (pathlib.Path): Path to darknet names file. save (bool): If True, save predictions to /assets/predictions. Returns: -------- results (list of pathlib.Path objects): List of paths to images retrieved for user query. """ # Retrieve image descriptor vector for user-uploaded image. array = extract_image_descriptors(image_path, 'inception', TARGET_SIZE) array = array.reshape(-1) # Retrieve YOLOv4 model related variables to detect objects in an image. net = get_yolo_net(cfg_path, weights_path) labels = get_labels(names_path) image = cv2.imread(str(image_path)) objects = detect_objects(net, labels, image, confidence_threshold) index_path = pathfinder.get('assets', 'index.json') with open(index_path, 'r') as json_file: index = json.load(json_file) paths_to_images = set([]) for object in objects: paths_to_images |= set(index[object]) descriptors_path = pathfinder.get('assets', 'data', 'descriptors') results = [] for path in paths_to_images: path = Path(path) other_array = np.load(descriptors_path / f'{path.stem}.npy') score = dot_product(array, other_array) results.append(ReverseSearchResultObject( name=path.name, path_to_image=path, score=score, ) ) # Using heaps to extract N largest results from a list of n elements # is recommended, as the time complexity to do so is O(n * logN), which # is approximately O(n) if N is relatively small. results = heapq.nlargest(5, results) results = [result.path_to_image for result in results] if save: # Save predictions to /assets/predictions. save_predictions(results) return results
5,328,584
def suspend_supplier_services(client, logger, framework_slug, supplier_id, framework_info, dry_run): """ The supplier ID list should have been flagged by CCS as requiring action, but double check that the supplier: - has some services on the framework - has `agreementReturned: false` - has not `agreementReturned: on-hold :param client: API client instance :param framework_info: JSON :param dry_run: don't suspend if True :return: suspended_service_count :rtype: int """ suspended_service_count = 0 # Ignore any 'private' services that the suppliers have removed themselves new_service_status, old_service_status = 'disabled', 'published' if not framework_info['frameworkInterest']['onFramework']: logger.error(f'Supplier {supplier_id} is not on the framework.') return suspended_service_count if framework_info['frameworkInterest']['agreementReturned']: logger.error(f'Supplier {supplier_id} has returned their framework agreement.') return suspended_service_count if framework_info['frameworkInterest']['agreementStatus'] == 'on-hold': logger.error(f"Supplier {supplier_id}'s framework agreement is on hold.") return suspended_service_count # Find the supplier's non-private services on this framework services = client.find_services( supplier_id=supplier_id, framework=framework_slug, status=old_service_status ) if not services['services']: logger.error(f'Supplier {supplier_id} has no {old_service_status} services on the framework.') return suspended_service_count # Suspend all services for each supplier (the API will de-index the services from search results) logger.info( f"Setting {services['meta']['total']} services to '{new_service_status}' for supplier {supplier_id}." ) for service in services['services']: if dry_run: logger.info(f"[DRY RUN] Would suspend service {service['id']} for supplier {supplier_id}") else: client.update_service_status(service['id'], new_service_status, "Suspend services script") suspended_service_count += 1 # Return suspended service count (i.e. if > 0, some emails need to be sent) return suspended_service_count
5,328,585
def get_events(): """Get events from meetup website, parse them and return a list of dicts""" group = os.environ['MEETUP_GROUP'] url = f'https://www.meetup.com/{group}/events/' resp = HTMLSession().get(url, timeout=10) if not resp.ok: raise ConnectionError( f"Received http {resp.status_code} when connecting to {url}" ) events = resp.html.find('.flex.flex--column.flex--spaceBetween') events = [parse_event(event) for event in events] return events
5,328,586
def get_campaigns_with_goal_id(campaigns, goal_identifer): """Returns campaigns having the same goal_identifier passed in the args from the campaigns list Args: campaigns (list): List of campaign objects gaol_identifier (str): Global goal identifier Returns: tuple (campaign_goal_list, campaigns_without_goal): campaign_goal_list is a tuple of (campaign, campaign_goal) """ campaign_goal_list = [] campaigns_without_goal = [] for campaign in campaigns: campaign_goal = get_campaign_goal(campaign, goal_identifer) if campaign_goal: campaign_goal_list.append((campaign, campaign_goal)) else: campaigns_without_goal.append(campaign) return campaign_goal_list, campaigns_without_goal
5,328,587
def test_training_cli(generator_args, training_args, network_args, monkeypatch): """this tests that the training CLI validates the schemas and executes its logic. Calls to generator, network and training are minimally mocked. """ args = { "run_uid": "test_uid", "training_params": training_args, "generator_params": generator_args, "test_generator_params": generator_args, "network_params": network_args, "output_full_args": True } monkeypatch.setattr(cli, "ClassLoader", MockClassLoader) training = cli.Training(input_data=args, args=[]) training.run() model_path = os.path.join(args["training_params"]["output_dir"], args["run_uid"] + "_" + args["training_params"]["model_string"] + "_model.h5") assert Path(model_path).exists()
5,328,588
def tautologically_define_state_machine_transitions(state_machine): """Create a mapping of all transitions in ``state_machine`` Parameters ---------- state_machine : super_state_machine.machines.StateMachine The state machine you want a complete map of Returns ------- dict Dictionary of all transitions in ``state_machine`` Structured as {from_state1: [(to_state, allowed), ...], from_state2: [(to_state, allowed), ...], } where - ``allowed`` is a boolean - ``from_stateN`` is a string - ``to_state`` is a string """ transitions_as_enum = state_machine.__class__._meta['transitions'] transitions_as_names = { to_state.value: [from_state.value for from_state in from_states] for to_state, from_states in transitions_as_enum.items()} transition_map = defaultdict(list) all_states = set(state_machine.States.states()) for to_state, from_states in transitions_as_names.items(): for from_state in all_states: allowed = True if from_state not in from_states: allowed = False transition_map[to_state].append((from_state, allowed)) return transition_map
5,328,589
def test_load_labware_result(well_plate_def: LabwareDefinition) -> None: """It should have a LoadLabwareResult model.""" result = LoadLabwareResult( labwareId="labware-id", definition=well_plate_def, calibration=(1, 2, 3), ) assert result.labwareId == "labware-id" assert result.definition == well_plate_def assert result.calibration == (1, 2, 3)
5,328,590
def test_check_name_typographicsubfamilyname(): """ Check name table: TYPOGRAPHIC_SUBFAMILY_NAME entries. """ check = CheckTester(googlefonts_profile, "com.google.fonts/check/name/typographicsubfamilyname") RIBBI = "montserrat/Montserrat-BoldItalic.ttf" NON_RIBBI = "montserrat/Montserrat-ExtraLight.ttf" # Add incorrect TYPOGRAPHIC_SUBFAMILY_NAME entries to a RIBBI font ttFont = TTFont(TEST_FILE(RIBBI)) ttFont['name'].setName("FOO", NameID.TYPOGRAPHIC_SUBFAMILY_NAME, PlatformID.WINDOWS, WindowsEncodingID.UNICODE_BMP, WindowsLanguageID.ENGLISH_USA) ttFont['name'].setName("BAR", NameID.TYPOGRAPHIC_SUBFAMILY_NAME, PlatformID.MACINTOSH, MacintoshEncodingID.ROMAN, MacintoshLanguageID.ENGLISH) assert_results_contain(check(ttFont), FAIL, 'mismatch', f'with a RIBBI that has got incorrect' f' nameid={NameID.TYPOGRAPHIC_SUBFAMILY_NAME} entries...') assert_results_contain(check(ttFont), FAIL, 'bad-win-name') assert_results_contain(check(ttFont), FAIL, 'bad-mac-name') # non-RIBBI fonts must have a TYPOGRAPHIC_SUBFAMILY_NAME entry ttFont = TTFont(TEST_FILE(NON_RIBBI)) assert_PASS(check(ttFont), f'with a non-RIBBI containing a nameid={NameID.TYPOGRAPHIC_SUBFAMILY_NAME} entry...') # set bad values on the win TYPOGRAPHIC_SUBFAMILY_NAME entry: ttFont = TTFont(TEST_FILE(NON_RIBBI)) ttFont['name'].setName("Generic subfamily name", NameID.TYPOGRAPHIC_SUBFAMILY_NAME, PlatformID.WINDOWS, WindowsEncodingID.UNICODE_BMP, WindowsLanguageID.ENGLISH_USA) assert_results_contain(check(ttFont), FAIL, 'bad-typo-win', f'with a non-RIBBI with bad nameid={NameID.TYPOGRAPHIC_SUBFAMILY_NAME} entries...') # set bad values on the mac TYPOGRAPHIC_SUBFAMILY_NAME entry: ttFont = TTFont(TEST_FILE(NON_RIBBI)) ttFont['name'].setName("Generic subfamily name", NameID.TYPOGRAPHIC_SUBFAMILY_NAME, PlatformID.MACINTOSH, MacintoshEncodingID.ROMAN, MacintoshLanguageID.ENGLISH) assert_results_contain(check(ttFont), FAIL, 'bad-typo-mac', f'with a non-RIBBI with bad nameid={NameID.TYPOGRAPHIC_SUBFAMILY_NAME} entries...') # remove all TYPOGRAPHIC_SUBFAMILY_NAME entries ttFont = TTFont(TEST_FILE(NON_RIBBI)) win_name = ttFont['name'].getName(NameID.TYPOGRAPHIC_SUBFAMILY_NAME, PlatformID.WINDOWS, WindowsEncodingID.UNICODE_BMP, WindowsLanguageID.ENGLISH_USA) mac_name = ttFont['name'].getName(NameID.TYPOGRAPHIC_SUBFAMILY_NAME, PlatformID.MACINTOSH, MacintoshEncodingID.ROMAN, MacintoshLanguageID.ENGLISH) win_name.nameID = 254 if mac_name: mac_name.nameID = 255 assert_results_contain(check(ttFont), FAIL, 'missing-typo-win', f'with a non-RIBBI lacking a nameid={NameID.TYPOGRAPHIC_SUBFAMILY_NAME} entry...') # note: the check must not complain # about the lack of a mac entry!
5,328,591
def jump_pfd(data, *args, **kwargs): """ Uses refined searching stategies to greedily jump to a node in the search lattice. AG's feature_permutation returns feature importances that aren't additive and thus don't sum up to equal the model's loss function. Which is why I normalize the permutation feature importances to the model's measured_performance. This is not theoretically sound though -- there is no reason to think that FIs calculated for features [x_1, x_2, x_3, x_4] are the same FIs calculated for features [x_2, x_3, x_4] when using feature_permutation. Note that in AutoGluon, higher performance metrics are always better. This leads to the circumstance that the MSE is negative! Also, shap.TreeExplainer is capable of explaining a model's log_loss, which might be worthwhile investigating in future versions. """ logger = logging.getLogger('pfd') logger.debug(f"Start automatical search of PFDs for dataset {data.title}") df_train, df_validate, df_test = helps.load_splits(data) df_imp, measured_performance, metric, rhs = global_predictor_explained( data, df_train, df_validate, df_test) exclude_cols = [] include_cols = list(df_train.columns) lhs = [c for c in include_cols if c != rhs] logger.info(f"Trained a predictor with {metric} " f"{measured_performance}.") print("These are the feature importances found via " "feature permutation:") print(df_imp.loc[lhs, ['description', 'importance']].sort_values( 'importance', ascending=False)) print(f"What's your threshold for {metric}?") threshold = float(input('')) logger.debug("User set threshold of {threshold}") # the margin is how much performance we can shave off margin = measured_performance - threshold if margin < 0: logger.info(f"The set threshold of {threshold} is below " "the measured_performance of {measured_performance}. " "Stopping the search.") if measured_performance < threshold: logger.info("The newly trained model's performance of " f"{measured_performance} is below the threshold of " f" {threshold}. Stopping the search.") df_imp['normalized_importance'] = (df_imp.loc[:, 'importance'] / df_imp.loc[:, 'importance'].sum()) * measured_performance df_importance_cumsum = df_imp.sort_values( 'normalized_importance', ascending=True).cumsum() importance_distance = df_importance_cumsum.loc[:, 'normalized_importance'] - margin logger.debug("Calculated the following importance distance " f"{importance_distance}") exclude_cols = [int(x[0]) for x in importance_distance.iteritems() if x[1] < 0] include_cols = [c for c in include_cols if c not in exclude_cols] lhs = [c for c in include_cols if c != rhs] logger.info("Training a predictor next to check threshold.") logger.info("Begin predictor training") measured_performance = opt.iterate_pfd(include_cols, df_train, df_validate, df_test, rhs) logger.info("Using Feature Permutation Importances, jumped " f"to LHS {lhs}, resulting in a Model with {metric} " f"{round(measured_performance, 3)}. The threshold aimed for " f"was a {metric} of {threshold}.")
5,328,592
def topk_errors(preds: Tensor, labels: Tensor, ks: List[int]): """ Computes the top-k error for each k. Args: preds (array): array of predictions. Dimension is N. labels (array): array of labels. Dimension is N. ks (list): list of ks to calculate the top accuracies. """ num_topks_correct = topks_correct(preds, labels, ks) return [(1.0 - x / preds.size(0)) for x in num_topks_correct]
5,328,593
def extract_data_from_inspect(network_name, network_data): """ :param network_name: str :param network_data: dict :return: dict: { "ip_address4": "12.34.56.78" "ip_address6": "ff:fa:..." } """ a4 = None if network_name == "host": a4 = "127.0.0.1" n = {} a4 = graceful_chain_get(network_data, "IPAddress") or a4 if a4: n["ip_address4"] = a4 a6 = graceful_chain_get(network_data, "GlobalIPv6Address") if a6: n["ip_address4"] = a6 return n
5,328,594
def solveSudoku(fileName = "", showResults = False, showTime = False, matrix = []): """ Solves a Sudoku by prompting the sudoku or reading a text file containing the sudoku or by directly taking the matrix as a variable and either shows the solution or returns it. Can also tell the execution time (Any one of the arguments 'fileName' or 'matrix' should be given. Else rises ValueError) args: -fileName - Name of the text file in which sudoku is present (optional) -showResults - Prints the solution if set true. Else returns the solution (optional) -showTime - Calculates and shows the execution time only if set true (optional) -martix - 9x9 sudoku matrix (optional) returns: If 'showResults' parameter is given true, it returns the 9x9 solved sudoku list Else simply prints the solution """ if fileName == "" and matrix == []: rows = prompt_sudoku() elif fileName != "" and matrix == []: rows = get_sudoku(fileName) elif fileName == "" and matrix != []: rows = matrix elif fileName != "" and matrix !=[]: raise ValueError("Please give any of the arguments, 'fileName' or 'matrix' (Both are given)") st = time.perf_counter() all_combo = [] vert = vertical(rows) blocks = blockify(rows) for i in rows: all_combo.append(insert_combos(i, vert, blocks,rows.index(i))) a = all_combo.copy() for r1 in a[0]: for r2 in a[1]: if vertically_has_duplicates(r1,r2): continue for r3 in a[2]: if vertically_has_duplicates(r1,r2,r3) or blocks_has_duplicates([r1,r2,r3]): continue for r4 in a[3]: if vertically_has_duplicates(r1,r2,r3,r4): continue for r5 in a[4]: if vertically_has_duplicates(r1,r2,r3,r4,r5): continue for r6 in a[5]: if vertically_has_duplicates(r1,r2,r3,r4,r5,r6) or blocks_has_duplicates([r4,r5,r6]): continue for r7 in a[6]: if vertically_has_duplicates(r1,r2,r3,r4,r5,r6,r7): continue for r8 in a[7]: if vertically_has_duplicates(r1,r2,r3,r4,r5,r6,r7,r8): continue for r9 in a[8]: try_sol = [r1,r2,r3,r4,r5,r6,r7,r8,r9] if vertically_has_duplicates(r1,r2,r3,r4,r5,r6,r7,r8,r9) or blocks_has_duplicates([r7,r8,r9]): continue time_taken = 'Time Taken: '+str(round(time.perf_counter()-st, 4))+'s' if showResults: for row in try_sol: print(row) if showTime: print(time_taken) else: if showTime: try_sol.append(time_taken) return try_sol
5,328,595
def set_callback_timeout(timeout): """ Set the default timeout (in seconds) for DSWaitedCallback. In case of a deadlock, the wait function will exit after the timeout period. Args: timeout (int): timeout(s) to be used to end teh wait in DSWaitedCallback in case of a deadlock. Raises: ValueError: If timeout is invalid (<= 0 or > MAX_INT_32). Examples: >>> import mindspore.dataset as ds >>> # sets the new timout value. >>> ds.config.set_callback_timeout(100) """ if timeout <= 0 or timeout > INT32_MAX: raise ValueError("timeout given is not within the required range.") _config.set_callback_timeout(timeout)
5,328,596
def test_create_metadata_from_parsed_metadatafil(_innhold): """ GIVEN the content(XML) of a METS/XML file WHEN calling the method create_metadata_from_parsed_metadatafil() THEN check that the returned Arkivuttrekk domain object is correct """ expected = Metadata( obj_id=UUID("df53d1d8-39bf-4fea-a741-58d472664ce2"), status=ArkivuttrekkStatus.OPPRETTET, arkivutrekk_type=ArkivuttrekkType.NOARK5, tittel="The Lewis Caroll Society -- Wonderland (1862 - 1864) - 1234", sjekksum_sha256="2afeec307b0573339b3292e27e7971b5b040a5d7e8f7432339cae2fcd0eb936a", avgiver_navn="Lewis Caroll", avgiver_epost="lewis@caroll.net", metadatafil_id=1, arkiv_startdato=date.fromisoformat("1863-01-01"), arkiv_sluttdato=date.fromisoformat("1864-12-31"), storrelse=440320, avtalenummer="01/12345" ) metadatafil_id = 1 actual = create_metadata_from_parsed_metadatafil(metadatafil_id, _innhold) assert vars(actual) == vars(expected)
5,328,597
def vertexval(val, size): """Converto to row,col or raise GTP error.""" val = val.lower() if val == 'pass': return None letter = str(val[0]) number = int(val[1:], 10) if not 'a' <= letter <= 'z': raise GTPError('invalid vertex letter: {!r}'.format(val)) if number < 1: raise GTPError('invalid vertex number: {!r}'.format(val)) row = size - number col = ord(letter) - (ord('a') if letter < 'i' else ord('b')) if 0 > row or row >= size or 0 > col or col >= size: raise GTPError('off board') return row, col
5,328,598
def v0abs(x_ratio, distance_source, lr_angle, br_angle): """ Returns the norm for the velocity reference v_0""" vy_comp = vly(x_ratio, distance_source, lr_angle, br_angle) - (1.-x_ratio)*voy(lr_angle) vz_comp = vlz(x_ratio, distance_source, lr_angle, br_angle) - (1.-x_ratio)*voz(lr_angle, br_angle) #vy = - (1.-x)*voy(lr_angle,b) #vz = - (1.-x)*voz(lr_angle,b) return np.sqrt(vy_comp**2+vz_comp**2)
5,328,599