content
stringlengths
22
815k
id
int64
0
4.91M
def version_callback( value: Optional[bool], ) -> None: # pylint: disable=unsubscriptable-object """Provides a version option for the CLI""" if value: console.print( f"{app.info.name.title()} " # type: ignore[union-attr] f"CLI version: {__version__}" ) raise typer.Exit()
11,700
def create_schema(hostname='localhost', username=None, password=None, dbname=None, port=None, schema_name=None): """Create test schema.""" cn = create_cn(hostname, password, username, dbname, port) with cn.cursor() as cr: cr.execute('DROP SCHEMA IF EXISTS %s CASCADE' % dbname) cr.execute('CREATE SCHEMA %s' % dbname) cn.close() cn = create_cn(hostname, password, username, dbname, port) return cn
11,701
def test_load_no_project(): """Loading a project that does not exist throws an error""" assert_raises(Exception, inventory.load, PROJECT_NAME)
11,702
def set_color_in_session(intent, session): """ Sets the color in the session and prepares the speech to reply to the user. """ card_title = intent['name'] session_attributes = {} should_end_session = False if 'Color' in intent['slots']: favorite_color = intent['slots']['Color']['value'] session_attributes = create_favorite_color_attributes(favorite_color) speech_output = "I now know the bus stop you are in is " + \ favorite_color + \ ". You can ask me where your bus stop is by asking, " \ "what bus stop am I on?" reprompt_text = "You can ask me where your bus stop is by asking, " \ "what bus stop am I on?" else: speech_output = "I'm not sure what bus stop you are in. " \ "Please try again." reprompt_text = "I'm not sure what bus stop you are in " \ "You can ask me where your bus stop is by asking, " \ "what bus stop am I on?" return build_response(session_attributes, build_speechlet_response( card_title, speech_output, reprompt_text, should_end_session))
11,703
def fpath_to_pgn(fpath): """Slices the pgn string from file path. """ return fpath.split('/')[-1].split('.jpeg')[0]
11,704
def scheduler(): """ The scheduler operation command group. """ pass
11,705
def draw_kinetics_plots(rxn_list, path=None, t_min=(300, 'K'), t_max=(3000, 'K'), t_count=50): """ Draws plots of calculated rates and RMG's best values for reaction rates in rxn_list `rxn_list` has a .kinetics attribute calculated by ARC and an .rmg_reactions list with RMG rates """ plt.style.use(str('seaborn-talk')) t_min = ScalarQuantity(value=t_min[0], units=str(t_min[1])) t_max = ScalarQuantity(value=t_max[0], units=str(t_max[1])) temperature = np.linspace(t_min.value_si, t_max.value_si, t_count) pressure = 1e7 # Pa (=100 bar) pp = None if path is not None: path = os.path.join(path, str('rate_plots.pdf')) if os.path.exists(path): os.remove(path) pp = PdfPages(path) for rxn in rxn_list: reaction_order = len(rxn.reactants) units = '' conversion_factor = {1: 1, 2: 1e6, 3: 1e12} if reaction_order == 1: units = r' (s$^-1$)' elif reaction_order == 2: units = r' (cm$^3$/(mol s))' elif reaction_order == 3: units = r' (cm$^6$/(mol$^2$ s))' arc_k = list() for t in temperature: arc_k.append(rxn.kinetics.getRateCoefficient(t, pressure) * conversion_factor[reaction_order]) rmg_rxns = list() for rmg_rxn in rxn.rmg_reactions: rmg_rxn_dict = dict() rmg_rxn_dict['rmg_rxn'] = rmg_rxn rmg_rxn_dict['t_min'] = rmg_rxn.kinetics.Tmin if rmg_rxn.kinetics.Tmin is not None else t_min rmg_rxn_dict['t_max'] = rmg_rxn.kinetics.Tmax if rmg_rxn.kinetics.Tmax is not None else t_max k = list() temp = np.linspace(rmg_rxn_dict['t_min'].value_si, rmg_rxn_dict['t_max'].value_si, t_count) for t in temp: k.append(rmg_rxn.kinetics.getRateCoefficient(t, pressure) * conversion_factor[reaction_order]) rmg_rxn_dict['k'] = k rmg_rxn_dict['T'] = temp if rmg_rxn.kinetics.isPressureDependent(): rmg_rxn.comment += str(' (at {0} bar)'.format(int(pressure / 1e5))) rmg_rxn_dict['label'] = rmg_rxn.comment rmg_rxns.append(rmg_rxn_dict) _draw_kinetics_plots(rxn.label, arc_k, temperature, rmg_rxns, units, pp) pp.close()
11,706
def create_germline_samples_file(germline_samples_filepath, inputdata_symlinks): """ Writes a file to disk mapping patient IDs to the directories containing their data. Side effects: Writes a file to disk :param germline_samples_filepath: str Path to write out germline samples file :param inputdata_symlinks: list Relative paths to all symlinks linked to the original data """ # Create a dictionary mapping the relative path of the parent folder with each file inputdata_symlinks_dir_file_map = { os.path.dirname(k) + '/': os.path.basename(k) for k in inputdata_symlinks } with open(germline_samples_filepath, 'w') as germline_samples: # Write out the header germline_samples.write('ID dir\n') # Write out entry for each patient path for inputdata_dir, inputdata_file in six.iteritems(inputdata_symlinks_dir_file_map): # TODO Remove trailing slash after debugging germline_samples.write('{id} {dir}/\n'.format( id=os.path.splitext(inputdata_file)[ROOT], dir=inputdata_dir ))
11,707
def convert_to_constant(num): """ Convert one float argument to Constant, returning the converted object. :param float num: Float number to be converted to Constant :return: Float number converted to a Constant object :rtype: object """ return Constant(name=str(num), units = null_dimension, value = float(num) )
11,708
def data_zip(data): """ 输入数据,返回一个拼接了子项的列表,如([1,2,3], [4,5,6]) -> [[1,4], [2,5], [3,6]] {"a":[1,2],"b":[3,4]} -> [{"a":1,"b":3}, {"a":2,"b":4}] :param data: 数组 data 元组 (x, y,...) 字典 {"a":data1, "b":data2,...} :return: 列表或数组 """ if isinstance(data, tuple): return [list(d) for d in zip(*data)] if isinstance(data, dict): data_list = [] keys = data.keys() for i in range(len(data[list(keys)[0]])): # 迭代字典值中的数据 data_dict = {} for key in keys: data_dict[key] = data[key][i] data_list.append(data_dict) return data_list return data
11,709
def _days_in_leap_and_common_years(i_date, f_date): """Return the a tuple with number of days in common and leap years (respectively) between initial and final dates. """ iy = i_date.year fy = f_date.year days_in_leap = 0 days_in_common = 0 if iy == fy: # same year delta = f_date - i_date if _isleap(iy): days_in_leap += delta.days else: days_in_common += delta.days elif fy - iy >= 1: # different year delta1 = i_date.replace(year = iy+1, month=1, day=1) - i_date # days in initial year delta2 = f_date - f_date.replace(month=1, day=1) # days in final year if _isleap(iy): days_in_leap += delta1.days else: days_in_common += delta1.days if _isleap(fy): days_in_leap += delta2.days else: days_in_common += delta2.days leaps_in_between = [y for y in range(iy+1, fy) if _isleap(y)] commons_in_between = [y for y in range(iy+1, fy) if not(_isleap(y))] days_in_leap += len(leaps_in_between) * 366 days_in_common += len(commons_in_between) * 365 #else: #raise InputError(expr = "Error in days_in_years(), f_date.year must be greater than i_date.year") return (days_in_leap, days_in_common)
11,710
async def save_on_shutdown( app: aiohttp.web.Application ) -> None: """Flush the database on shutdown.""" if app["db_conn"] is not None: await app["db_conn"].close()
11,711
def make_space_kernel(data, background_kernel, trigger_kernel, time, time_cutoff=None, space_cutoff=None): """Produce a kernel object which evaluates the background kernel, and the trigger kernel based on the space locations in the data, always using the fixed time as passed in. :param data: An array of shape `(3,N)` giving the space-time locations events. Used when computing the triggered / aftershock events. :param background_kernel: The kernel object giving the background risk intensity. We assume this has a method `space_kernel` which gives just the two dimensional spacial kernel. :param trigger_kernel: The kernel object giving the trigger / aftershock risk intensity. :param time: The fixed time coordinate to evaluate at. :param time_cutoff: Optional; if set, then we assume the trigger_kernel is zero for times greater than this value (to speed up evaluation). :param space_cutoff: Optional; if set, then we assume the trigger_kernel is zero for space distances greater than this value (to speed up evaluation). :return: A kernel object which can be called on arrays of (2 dimensional space) points. """ mask = data[0] < time if time_cutoff is not None: mask = mask & (data[0] > time - time_cutoff) data_copy = _np.array(data[:, mask]) return SpaceKernel(time, background_kernel, trigger_kernel, data_copy, space_cutoff)
11,712
def send_gmail(from_addr, id_rsa_file, passwd_rsa_file, msg): """Send email via gmail """ try: s = smtplib.SMTP('smtp.gmail.com',587) s.ehlo() s.starttls() s.ehlo() ### login to gmail with gmail account 'from_addr' and with decripted password file s.login(from_addr, dec_pass(id_rsa_file, passwd_rsa_file)) s.send_message(msg) s.close() except: print('Email error occured. Email was not able to send.')
11,713
def get_dim_act_curv(args): """ Helper function to get dimension and activation at every layer. :param args: :return: """ if not args.act: act = lambda x: x else: act = getattr(F, args.act) acts = [act] * (args.num_layers - 1) dims = [args.feat_dim] # Check layer_num and hdden_dim match if args.num_layers > 1: hidden_dim = [int(h) for h in args.hidden_dim.split(',')] if args.num_layers != len(hidden_dim) + 1: raise RuntimeError('Check dimension hidden:{}, num_layers:{}'.format(args.hidden_dim, args.num_layers) ) dims = dims + hidden_dim dims += [args.dim] acts += [act] n_curvatures = args.num_layers if args.c_trainable == 1: # NOTE : changed from # if args.c is None: # create list of trainable curvature parameters curvatures = [nn.Parameter(torch.Tensor([args.c]).to(args.device)) for _ in range(n_curvatures)] else: # fixed curvature curvatures = [torch.tensor([args.c]) for _ in range(n_curvatures)] if not args.cuda == -1: curvatures = [curv.to(args.device) for curv in curvatures] return dims, acts, curvatures
11,714
def debug(): """ Function to return exported resources with types as dict. """ return exported_res_dict
11,715
def write_charset_executable(mysql_charset_script_name, here): """Write to disk as an executable the file that will be used to issue the MySQL statements that change the character set to UTF-8 -- return the absolute path. """ mysql_charset_script = os.path.join(here, mysql_charset_script_name) if not os.path.exists(mysql_charset_script): with open(mysql_charset_script, 'w') as f: pass os.chmod(mysql_charset_script, 0744) return mysql_charset_script
11,716
def readTemperature(file): """ Returns the temperature of the one wire sensor. Pass in the file containing the one wire data (ds18b20+) """ lines = read_temp_raw(file) while lines[0].strip()[-3:] != "YES": time.sleep(0.2) lines = read_temp_raw(file) equals_pos = lines[1].find("t=") if equals_pos != -1: temp_string = lines[1][equals_pos + 2:] # convert temperature to C temp_c = float(temp_string) / 1000.0 return temp_c return -273.15
11,717
def test_receipt(): """ Test event receipt message and attached couplets """ # manual process to generate a list of secrets # root = pysodium.randombytes(pysodium.crypto_pwhash_SALTBYTES) # secrets = generateSecrets(root=root, count=8) # Direct Mode coe is controller, val is validator # set of secrets (seeds for private keys) coeSecrets = [ 'ArwXoACJgOleVZ2PY7kXn7rA0II0mHYDhc6WrBH8fDAc', 'A6zz7M08-HQSFq92sJ8KJOT2cZ47x7pXFQLPB0pckB3Q', 'AcwFTk-wgk3ZT2buPRIbK-zxgPx-TKbaegQvPEivN90Y', 'Alntkt3u6dDgiQxTATr01dy8M72uuaZEf9eTdM-70Gk8', 'A1-QxDkso9-MR1A8rZz_Naw6fgaAtayda8hrbkRVVu1E', 'AKuYMe09COczwf2nIoD5AE119n7GLFOVFlNLxZcKuswc', 'AxFfJTcSuEE11FINfXMqWttkZGnUZ8KaREhrnyAXTsjw', 'ALq-w1UKkdrppwZzGTtz4PWYEeWm0-sDHzOv5sq96xJY' ] # create signers coeSigners = [Signer(qb64=secret) for secret in coeSecrets] assert [signer.qb64 for signer in coeSigners] == coeSecrets # set of secrets (seeds for private keys) valSecrets = ['AgjD4nRlycmM5cPcAkfOATAp8wVldRsnc9f1tiwctXlw', 'AKUotEE0eAheKdDJh9QvNmSEmO_bjIav8V_GmctGpuCQ', 'AK-nVhMMJciMPvmF5VZE_9H-nhrgng9aJWf7_UHPtRNM', 'AT2cx-P5YUjIw_SLCHQ0pqoBWGk9s4N1brD-4pD_ANbs', 'Ap5waegfnuP6ezC18w7jQiPyQwYYsp9Yv9rYMlKAYL8k', 'Aqlc_FWWrxpxCo7R12uIz_Y2pHUH2prHx1kjghPa8jT8', 'AagumsL8FeGES7tYcnr_5oN6qcwJzZfLKxoniKUpG4qc', 'ADW3o9m3udwEf0aoOdZLLJdf1aylokP0lwwI_M2J9h0s'] # create signers valSigners = [Signer(qb64=secret) for secret in valSecrets] assert [signer.qb64 for signer in valSigners] == valSecrets # create receipt signer prefixer default code is non-transferable valSigner = Signer(qb64=valSecrets[0], transferable=False) valPrefixer = Prefixer(qb64=valSigner.verfer.qb64) assert valPrefixer.code == MtrDex.Ed25519N valpre = valPrefixer.qb64 assert valpre == 'B8KY1sKmgyjAiUDdUBPNPyrSz_ad_Qf9yzhDNZlEKiMc' with openDB(name="controller") as coeLogger, openDB(name="validator") as valLogger: coeKevery = Kevery(db=coeLogger) valKevery = Kevery(db=valLogger) event_digs = [] # list of event digs in sequence to verify against database # create event stream kes = bytearray() sn = esn = 0 # sn and last establishment sn = esn # create receipt msg stream res = bytearray() # Event 0 Inception Transferable (nxt digest not empty) serder = incept(keys=[coeSigners[esn].verfer.qb64], nxt=Nexter(keys=[coeSigners[esn + 1].verfer.qb64]).qb64) assert sn == int(serder.ked["s"], 16) == 0 coepre = serder.ked["i"] assert coepre == 'DSuhyBcPZEZLK-fcw5tzHn2N46wRCG_ZOoeKtWTOunRA' event_digs.append(serder.said) # create sig counter counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1 # sign serialization siger = coeSigners[esn].sign(serder.raw, index=0) # return Siger if index # attach to key event stream kes.extend(serder.raw) kes.extend(counter.qb64b) kes.extend(siger.qb64b) # make copy of kes so can use again for valKevery parsing.Parser().parse(ims=bytearray(kes), kvy=coeKevery) # coeKevery.process(ims=bytearray(kes)) # create Kever using Kevery coeKever = coeKevery.kevers[coepre] assert coeKever.prefixer.qb64 == coepre assert coeKever.serder.raw == serder.raw parsing.Parser().parse(ims=kes, kvy=valKevery) # valKevery.process(ims=kes) # process by Val assert coepre in valKevery.kevers valKever = valKevery.kevers[coepre] assert len(kes) == 0 # create receipt from val to coe reserder = receipt(pre=coeKever.prefixer.qb64, sn=coeKever.sn, said=coeKever.serder.saider.qb64) # sign event not receipt valCigar = valSigner.sign(ser=serder.raw) # returns Cigar cause no index assert valCigar.qb64 == \ '0BbUeX7VXSTUMbR3f5nPRqVZTJ04RuzzbgyE6780JATE9dS2xxPDk2piRMkNzanS6NXP8TioMMiGELLsSGIV87CA' recnt = Counter(code=CtrDex.NonTransReceiptCouples, count=1) assert recnt.qb64 == '-CAB' res.extend(reserder.raw) res.extend(recnt.qb64b) res.extend(valPrefixer.qb64b) res.extend(valCigar.qb64b) assert res == bytearray(b'{"v":"KERI10JSON000091_","t":"rct","d":"EG4EuTsxPiRM7soX10XXzNsS' b'1KqXKUp8xsQ-kW_tWHoI","i":"DSuhyBcPZEZLK-fcw5tzHn2N46wRCG_ZOoeKt' b'WTOunRA","s":"0"}-CABB8KY1sKmgyjAiUDdUBPNPyrSz_ad_Qf9yzhDNZlEKiM' b'c0BbUeX7VXSTUMbR3f5nPRqVZTJ04RuzzbgyE6780JATE9dS2xxPDk2piRMkNzan' b'S6NXP8TioMMiGELLsSGIV87CA') parsing.Parser().parse(ims=res, kvy=coeKevery) # coeKevery.process(ims=res) # coe process the receipt from val # check if in receipt database result = coeKevery.db.getRcts(key=dgKey(pre=coeKever.prefixer.qb64, dig=coeKever.serder.saider.qb64)) assert bytes(result[0]) == valPrefixer.qb64b + valCigar.qb64b assert len(result) == 1 # create invalid receipt to escrow use invalid dig and sn so not in db fake = reserder.said # some other dig reserder = receipt(pre=coeKever.prefixer.qb64, sn=2, said=fake) # sign event not receipt valCigar = valSigner.sign(ser=serder.raw) # returns Cigar cause no index recnt = Counter(code=CtrDex.NonTransReceiptCouples, count=1) # attach to receipt msg stream res.extend(reserder.raw) res.extend(recnt.qb64b) res.extend(valPrefixer.qb64b) res.extend(valCigar.qb64b) parsing.Parser().parse(ims=res, kvy=coeKevery) # coeKevery.process(ims=res) # coe process the escrow receipt from val # check if in escrow database result = coeKevery.db.getUres(key=snKey(pre=coeKever.prefixer.qb64, sn=2)) assert bytes(result[0]) == fake.encode("utf-8") + valPrefixer.qb64b + valCigar.qb64b # create invalid receipt stale use valid sn so in database but invalid dig # so bad receipt fake = coring.Diger(qb64="E-dapdcC6XR1KWmWDsNl4J_OxcGxNZw1Xd95JH5a34fI").qb64 reserder = receipt(pre=coeKever.prefixer.qb64, sn=coeKever.sn, said=fake) # sign event not receipt valCigar = valSigner.sign(ser=serder.raw) # returns Cigar cause no index recnt = Counter(code=CtrDex.NonTransReceiptCouples, count=1) # attach to receipt msg stream res.extend(reserder.raw) res.extend(recnt.qb64b) res.extend(valPrefixer.qb64b) res.extend(valCigar.qb64b) parsing.Parser().parseOne(ims=res, kvy=coeKevery) # coeKevery.processOne(ims=res) # coe process the escrow receipt from val # no new receipt at valid dig result = coeKevery.db.getRcts(key=dgKey(pre=coeKever.prefixer.qb64, dig=coeKever.serder.saider.qb64)) assert len(result) == 1 # no new receipt at invalid dig result = coeKevery.db.getRcts(key=dgKey(pre=coeKever.prefixer.qb64, dig=fake)) assert not result # Next Event Rotation Transferable sn += 1 esn += 1 assert sn == esn == 1 serder = rotate(pre=coeKever.prefixer.qb64, keys=[coeSigners[esn].verfer.qb64], dig=coeKever.serder.saider.qb64, nxt=Nexter(keys=[coeSigners[esn + 1].verfer.qb64]).qb64, sn=sn) event_digs.append(serder.said) # create sig counter counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1 # sign serialization siger = coeSigners[esn].sign(serder.raw, index=0) # returns siger # extend key event stream kes.extend(serder.raw) kes.extend(counter.qb64b) kes.extend(siger.qb64b) parsing.Parser().parse(ims=bytearray(kes), kvy=coeKevery) # coeKevery.process(ims=bytearray(kes)) # update key event verifier state parsing.Parser().parse(ims=kes, kvy=valKevery) # valKevery.process(ims=kes) # Next Event Interaction sn += 1 # do not increment esn assert sn == 2 assert esn == 1 serder = interact(pre=coeKever.prefixer.qb64, dig=coeKever.serder.saider.qb64, sn=sn) event_digs.append(serder.said) # create sig counter counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1 # sign serialization siger = coeSigners[esn].sign(serder.raw, index=0) # extend key event stream kes.extend(serder.raw) kes.extend(counter.qb64b) kes.extend(siger.qb64b) parsing.Parser().parse(ims=bytearray(kes), kvy=coeKevery) # coeKevery.process(ims=bytearray(kes)) # update key event verifier state parsing.Parser().parse(ims=kes, kvy=valKevery) # valKevery.process(ims=kes) # Next Event Rotation Transferable sn += 1 esn += 1 assert sn == 3 assert esn == 2 serder = rotate(pre=coeKever.prefixer.qb64, keys=[coeSigners[esn].verfer.qb64], dig=coeKever.serder.saider.qb64, nxt=Nexter(keys=[coeSigners[esn + 1].verfer.qb64]).qb64, sn=sn) event_digs.append(serder.said) # create sig counter counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1 # sign serialization siger = coeSigners[esn].sign(serder.raw, index=0) # extend key event stream kes.extend(serder.raw) kes.extend(counter.qb64b) kes.extend(siger.qb64b) parsing.Parser().parse(ims=bytearray(kes), kvy=coeKevery) # coeKevery.process(ims=bytearray(kes)) # update key event verifier state parsing.Parser().parse(ims=kes, kvy=valKevery) # valKevery.process(ims=kes) # Next Event Interaction sn += 1 # do not increment esn assert sn == 4 assert esn == 2 serder = interact(pre=coeKever.prefixer.qb64, dig=coeKever.serder.saider.qb64, sn=sn) event_digs.append(serder.said) # create sig counter counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1 # sign serialization siger = coeSigners[esn].sign(serder.raw, index=0) # extend key event stream kes.extend(serder.raw) kes.extend(counter.qb64b) kes.extend(siger.qb64b) parsing.Parser().parse(ims=bytearray(kes), kvy=coeKevery) # coeKevery.process(ims=bytearray(kes)) # update key event verifier state parsing.Parser().parse(ims=kes, kvy=valKevery) # valKevery.process(ims=kes) # Next Event Interaction sn += 1 # do not increment esn assert sn == 5 assert esn == 2 serder = interact(pre=coeKever.prefixer.qb64, dig=coeKever.serder.saider.qb64, sn=sn) event_digs.append(serder.said) # create sig counter counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1 # sign serialization siger = coeSigners[esn].sign(serder.raw, index=0) # extend key event stream kes.extend(serder.raw) kes.extend(counter.qb64b) kes.extend(siger.qb64b) parsing.Parser().parse(ims=bytearray(kes), kvy=coeKevery) # coeKevery.process(ims=bytearray(kes)) # update key event verifier state parsing.Parser().parse(ims=kes, kvy=valKevery) # valKevery.process(ims=kes) # Next Event Interaction sn += 1 # do not increment esn assert sn == 6 assert esn == 2 serder = interact(pre=coeKever.prefixer.qb64, dig=coeKever.serder.saider.qb64, sn=sn) event_digs.append(serder.said) # create sig counter counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1 # sign serialization siger = coeSigners[esn].sign(serder.raw, index=0) # extend key event stream kes.extend(serder.raw) kes.extend(counter.qb64b) kes.extend(siger.qb64b) parsing.Parser().parse(ims=bytearray(kes), kvy=coeKevery) # coeKevery.process(ims=bytearray(kes)) # update key event verifier state parsing.Parser().parse(ims=kes, kvy=valKevery) # valKevery.process(ims=kes) assert coeKever.verfers[0].qb64 == coeSigners[esn].verfer.qb64 db_digs = [bytes(val).decode("utf-8") for val in coeKever.db.getKelIter(coepre)] assert len(db_digs) == len(event_digs) == 7 assert valKever.sn == coeKever.sn assert valKever.verfers[0].qb64 == coeKever.verfers[0].qb64 == coeSigners[esn].verfer.qb64 assert not os.path.exists(valKevery.db.path) assert not os.path.exists(coeKever.db.path) """ Done Test """
11,718
def layernorm_backward(dout, cache): """ Backward pass for layer normalization. For this implementation, you can heavily rely on the work you've done already for batch normalization. Inputs: - dout: Upstream derivatives, of shape (N, D) - cache: Variable of intermediates from layernorm_forward. Returns a tuple of: - dx: Gradient with respect to inputs x, of shape (N, D) - dgamma: Gradient with respect to scale parameter gamma, of shape (D,) - dbeta: Gradient with respect to shift parameter beta, of shape (D,) """ dx, dgamma, dbeta = None, None, None ########################################################################### # TODO: Implement the backward pass for layer norm. # # # # HINT: this can be done by slightly modifying your training-time # # implementation of batch normalization. The hints to the forward pass # # still apply! # ########################################################################### # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** x, x_norm, mu, sigma2, gamma = cache D, N = x_norm.shape x_mean0 = x - mu dgamma = sum(dout*x_norm) dbeta = sum(dout) dx_norm = dout * gamma dx_norm = dx_norm.T x_norm = x_norm.T #dsigma2 = -0.5*sum(dx_norm*x_norm/sigma2) dsigma2 = -0.5*sum(dx_norm * x_mean0)* (sigma2**-1.5) dmu = - sum(dx_norm / np.sqrt(sigma2)) - 2* dsigma2 * np.mean(x_mean0) dx = (dx_norm/np.sqrt(sigma2)) + (dsigma2*2*x_mean0/N) + (dmu/N) dx =dx.T # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ########################################################################### # END OF YOUR CODE # ########################################################################### return dx, dgamma, dbeta
11,719
def energyAct( grid, deltaE, xA, yA, zA, xB, yB, zB, temp, eList, i, dimensions): """Perform swap or not, based on deltaE value""" kB = 8.617332e-5 # boltzmann constant, w/ ~eV units kTemp = kB * temp if deltaE <= 0: # Swap lowers energy, therefore is favourable, # so perform swap in grid grid = performSwap(grid, xA, yA, zA, xB, yB, zB, dimensions) eList[i + 1] = eList[i] + deltaE else: # i.e. deltaE > 0: if temp == 0: thermalEnergy = 0 else: thermalEnergy = exp((-1 * deltaE) / (kTemp)) R = randint(0, 1000) / 1000 if thermalEnergy > R: grid = performSwap(grid, xA, yA, zA, xB, yB, zB, dimensions) eList[i + 1] = eList[i] + deltaE else: eList[i + 1] = eList[i] return grid, eList
11,720
def submit_job(name, index, script_path): """ Function that writes the submission settings in the sge files inputs: - name: Computations to run names (list) - script_path: Path to the created SGE scripts (string) outputs: - The function has no outputs """ print ("Launching computation: %s" %(name[index]))
11,721
def lambda_handler(event, context): """ Lambda function that transforms input data and stores inital DB entry Parameters ---------- event: dict, required context: object, required Lambda Context runtime methods and attributes Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html Returns ------ Lambda Output Format: dict """ log.log_request_and_context(event, context) labeling_jobs = event["labelingJobs"] batch_id = event["batchId"] error_message = "" """ Example database entry input for batch { "BatchCurrentStep": "INPUT", "BatchId": "notebook-test-08f874a7", "BatchMetadataType": "INPUT", "BatchStatus": "INTERNAL_ERROR", "LabelingJobs": [ { "inputConfig": { "inputManifestS3Uri": "s3://smgt-qa-batch-input-468814823616-us-east-1/two-frame-manifest.manifest" }, "jobLevel": 1, "jobModality": "PointCloudObjectDetectionAudit", "jobName": "notebook-test-08f874a7-first-level", "jobType": "BATCH", "labelCategoryConfigS3Uri": "s3://smgt-qa-batch-input-468814823616-us-east-1/first-level-label-category-file.json", "maxConcurrentTaskCount": 1, "taskAvailabilityLifetimeInSeconds": 864000, "taskTimeLimitInSeconds": 604800, "workteamArn": "arn:aws:sagemaker:us-east-1:468814823616:workteam/private-crowd/first-level" }, { "inputConfig": { "chainFromJobName": "notebook-test-08f874a7-first-level" }, "jobLevel": 2, "jobModality": "PointCloudObjectDetectionAudit", "jobName": "notebook-test-08f874a7-second-level", "jobType": "BATCH", "maxConcurrentTaskCount": 1, "taskAvailabilityLifetimeInSeconds": 864000, "taskTimeLimitInSeconds": 604800, "workteamArn": "arn:aws:sagemaker:us-east-1:468814823616:workteam/private-crowd/first-level" } ] } """ db.insert_transformed_input_batch_metadata( batch_id=batch_id, batch_current_step=BatchCurrentStep.INPUT, batch_status=BatchStatus.IN_PROGRESS, batch_metadata_type=BatchMetadataType.INPUT, error_message=error_message, labeling_jobs=labeling_jobs, ) return { "batch_id": batch_id, }
11,722
def test_well_rows(experiment): """Test experiment well rows.""" rows = [0] assert experiment.well_rows == rows
11,723
def getModCase(s, mod): """Checks the state of the shift and caps lock keys, and switches the case of the s string if needed.""" if bool(mod & KMOD_RSHIFT or mod & KMOD_LSHIFT) ^ bool(mod & KMOD_CAPS): return s.swapcase() else: return s
11,724
def plot_D_dt_histogram(all_samples, lens_i=0, true_D_dt=None, save_dir='.'): """Plot the histogram of D_dt samples, overlaid with a Gaussian fit and truth D_dt all_samples : np.array D_dt MCMC samples """ bin_heights, bin_borders, _ = plt.hist(all_samples, bins=200, alpha=0.5, density=True, edgecolor='k', color='tab:blue', range=[0.0, 15000.0]) bin_centers = bin_borders[:-1] + np.diff(bin_borders) / 2 # Compute the mode and std for lognormal lognorm_stats = h0_utils.get_lognormal_stats(all_samples) mu = lognorm_stats['mu'] sigma = lognorm_stats['sigma'] mode = lognorm_stats['mode'] std = lognorm_stats['std'] popt = [mu, sigma] #x_interval_for_fit = np.linspace(bin_borders[0], bin_borders[-1], 10000) x_interval_for_fit = np.linspace(bin_centers[0], bin_centers[-1], 1000) # Overlay the fit gaussian pdf plt.plot(x_interval_for_fit, lognormal(x_interval_for_fit, *popt), color='k', label='fit: mode={:0.1f}, std={:0.1f}'.format(mode, std)) if save_dir is not None: if true_D_dt is not None: plt.axvline(x=true_D_dt, linestyle='--', color='red', label='truth') plt.xlabel(r'$D_{{\Delta t}}$ (Mpc)') plt.ylabel('density') plt.title(r'$D_{{\Delta t}}$ posterior for lens {0:04d}'.format(lens_i)) plt.legend() save_path = os.path.join(save_dir, 'D_dt_histogram_{0:04d}.png'.format(lens_i)) plt.savefig(save_path) plt.close() return mu, sigma
11,725
def should_see_link(self, link_url): """Assert a link with the provided URL is visible on the page.""" elements = ElementSelector( world.browser, str('//a[@href="%s"]' % link_url), filter_displayed=True, ) if not elements: raise AssertionError("Expected link not found.")
11,726
def try_provider(package, provider, domain): """Try using a provider.""" downloaded_file = None data = None apk_name = f'{package}.apk' temp_file = Path(gettempdir()) / apk_name link = find_apk_link(provider, domain) if link: downloaded_file = download_file(link, temp_file) if downloaded_file: data = add_apk(downloaded_file, apk_name) if data: return data return None
11,727
def acquire_patents(): """ from search terms get search results as a dataframe save to program_generated """ work_completed('acquire_patents', 0) f = os.path.join(retrieve_path('search_terms')) print('f = ' + str(f)) df_search_terms = pd.read_csv(f) search_terms = list(df_search_terms['term']) for term in search_terms: name_dataset = 'patents' result_limits = [5, 10, 7000, 8000, 9000, 10000, 15000, 20000] #result_limits = retrieve_format('patent_result_limits') query_patents(name_dataset, term, result_limits) work_completed('acquire_patents', 1)
11,728
def AcceptGroupApplication(request, callback, customData = None, extraHeaders = None): """ Accepts an outstanding invitation to to join a group https://docs.microsoft.com/rest/api/playfab/groups/groups/acceptgroupapplication """ if not PlayFabSettings._internalSettings.EntityToken: raise PlayFabErrors.PlayFabException("Must call GetEntityToken before calling this method") def wrappedCallback(playFabResult, error): if callback: callback(playFabResult, error) PlayFabHTTP.DoPost("/Group/AcceptGroupApplication", request, "X-EntityToken", PlayFabSettings._internalSettings.EntityToken, wrappedCallback, customData, extraHeaders)
11,729
def test_parse_steps(): """Simple tests for the PartialParse.parse_step function Warning: these are not exhaustive """ _test_parse_step('shift', PartialParse.shift_id, 'tingle', [0, 1], 2, [], [0, 1, 2], 3, []) _test_parse_step('left-arc', PartialParse.left_arc_id, 'tingle', [0, 1, 2], 3, [], [0, 2], 3, [(2, 1, 'tingle')]) _test_parse_step('right-arc', PartialParse.right_arc_id, 'koolimpah', [0, 1, 2], 3, [], [0, 1], 3, [(1, 2, 'koolimpah')])
11,730
def _plot_line(axis, origin, angle, size_hi, size_lo=0.0, **kwargs): """Plot a straight line into ``axis``. The line is described through the ``origin`` and the ``angle``. It is drawn from ``size_lo`` to ``size_hi``, where both parameters are passed as fractions of said line. ``kwargs`` are passed to :py:meth:`pylab.plot`. """ src = (origin[0] + np.cos(angle) * size_lo, origin[1] + np.sin(angle) * size_lo) trg = (origin[0] + np.cos(angle) * size_hi, origin[1] + np.sin(angle) * size_hi) axis.plot((src[0], trg[0]), (src[1], trg[1]), **kwargs)
11,731
def _get_list(sline): """Takes a list of strings and converts them to floats.""" try: sline2 = convert_to_float(sline) except ValueError: print("sline = %s" % sline) raise SyntaxError('cannot parse %s' % sline) return sline2
11,732
def test_unionfind_sim(size, Code, errors, faulty, max_rate, extra_keys): """Test initialize function for all configurations.""" Decoder_module = getattr(oss.decoders, "unionfind").sim if hasattr(Decoder_module, Code.capitalize()): decoder_module = getattr(Decoder_module, Code.capitalize()) Code_module = getattr(oss.codes, Code).sim code_module = getattr(Code_module, "FaultyMeasurements") if faulty else getattr(Code_module, "PerfectMeasurements") code = code_module(size) code.initialize(*errors) decoder = decoder_module(code) error_keys = get_error_keys(errors) + extra_keys trivial = 0 for _ in range(ITERS): error_rates = {key: random.random() * max_rate for key in error_keys} code.random_errors(**error_rates) decoder.decode() trivial += code.trivial_ancillas assert trivial == ITERS else: assert True
11,733
def add_number(partitions, number): """ Adds to the partition provided `number` in all its combinations """ # Add to each list in partitions add 1 prods = partitions.values() nKeys = [(1,) + x for x in partitions.keys()] # apply sum_ones on each partition, and add results to partitions # Done use reduce, the continues list creation is just too slow #partitions = reduce(lambda acc, x: acc + sum_ones(x), partitions, []) newParts = [] newProds = [] for part, prod in zip(nKeys, prods): npart, nprod = sum_ones(part, prod) newParts.extend(npart) newProds.extend(nprod) # Remove duplicates return dict(zip(newParts, newProds))
11,734
def copy_log_init(chron, new_direct_long): """ Function to incrementally update new init.csv file Args: chron (list): sorted list of temporally related log directories new_direct_long (str): path of new log directory """ for dr in chron: local = glob.glob(dr + "/*csv") src = glob.glob(new_direct_long + "/*csv") if len(src) == 0: [shutil.copy(loc, new_direct_long) for loc in local] src = glob.glob(new_direct_long + "/*csv") # pipeline to merge log.csv src_init_file = [fl for fl in src if "init.csv" in fl][0] src_init_df = pd.read_csv(src_init_file) if "until" not in src_init_df.columns: src_log_file = [fl for fl in src if "log.csv" in fl][0] src_log_df = pd.read_csv(src_log_file) max_epoch = max(src_log_df["epoch"]) src_init_df["until"] = max_epoch src_init_df.to_csv(src_init_file, index=False) else: # pipeline to merge log.csv src_log_file = [fl for fl in src if "log.csv" in fl][0] src_log_df = pd.read_csv(src_log_file) max_epoch = max(src_log_df["epoch"]) local_log_file = [fl for fl in local if "log.csv" in fl][0] local_log_df = pd.read_csv(local_log_file) local_log_df["epoch"] = local_log_df["epoch"] + max_epoch # write combined log.csv to file src_log_df = pd.concat([src_log_df, local_log_df]) src_log_df.to_csv(src_log_file, index=False) max_epoch = max(src_log_df["epoch"]) src_init_file = [fl for fl in src if "init.csv" in fl][0] src_init_df = pd.read_csv(src_init_file) local_init_file = [fl for fl in local if "init.csv" in fl][0] local_init_df = pd.read_csv(local_init_file) local_init_df["until"] = max_epoch pd.concat([src_init_df, local_init_df]).to_csv(src_init_file, index=False)
11,735
def _color_to_rgb(color, input): """Add some more flexibility to color choices.""" if input == "hls": color = colorsys.hls_to_rgb(*color) elif input == "husl": color = husl.husl_to_rgb(*color) color = tuple(np.clip(color, 0, 1)) elif input == "xkcd": color = xkcd_rgb[color] return color
11,736
def run(): """ 如果文件不存在,则创建 :return: """ if not os.path.exists('./res'): os.makedirs('res') config = get_config() if not os.path.exists(config['url']) or not os.path.exists( config['title'] or not os.path.exists(config['content'])): data_load(config) if not os.path.exists(config['content_clean']): data_clean_content(config) if not os.path.exists(config['content_filter']): filter_stop_word(config) if not os.path.exists(config['content_stemming']): stemming(config) if not os.path.exists(config['term_list']): create_term_list(config) documents = get_content(config) tf_documents = get_tf(documents) if not os.path.exists(config['idf']): create_idf(config, documents) idf_documents = get_idf(config) if not os.path.exists(config['tf_idf']): create_tf_idf(config, tf_documents, idf_documents, documents)
11,737
def in_days(range_in_days): """ Generate time range strings between start and end date where each range is range_in_days days long :param range_in_days: number of days :return: list of strings with time ranges in the required format """ delta = observation_period_end - observation_period_start # timedelta period_starts = [] for d in range(0, delta.days + 1, range_in_days): # print(observation_period_start + timedelta(days=d)) period_starts.append(observation_period_start + timedelta(days=d)) start_end = [] for i, start in enumerate(period_starts[:-1]): start_end.append((start, period_starts[i+1] - timedelta(days=1))) time_periods = [start.strftime("%Y%m%d") + ":" + end.strftime("%Y%m%d") for start, end in start_end] return time_periods
11,738
def tags_in_file(path: Path) -> List[str]: """Return all tags in a file.""" matches = re.findall(r'@([a-zA-Z1-9\-]+)', path.read_text()) return matches
11,739
def pad_to_longest_in_one_batch(batch): """According to the longest item to pad dataset in one batch. Notes: usage of pad_sequence: seq_list = [(L_1, dims), (L_2, dims), ...] item.size() must be (L, dims) return (longest_len, len(seq_list), dims) Args: batch: [ (noisy_mag_1, noise_mag_1, clean_mag_1, n_frames_1), (noisy_mag_2, noise_mag_2, clean_mag_2, n_frames_2), ... ] """ noisy_mag_list = [] mask_mag_list = [] clean_mag_list = [] n_frames_list = [] for noisy_mag, mask, clean_mag, n_frames in batch: noisy_mag_list.append(torch.t(torch.tensor(noisy_mag))) # the shape of tensor is (T, F). mask_mag_list.append(torch.t(torch.tensor(mask))) clean_mag_list.append(torch.t(torch.tensor(clean_mag))) n_frames_list.append(n_frames) noisy_mag_one_batch = pad_sequence(noisy_mag_list) # the shape is (longest T, len(seq_list), F) mask_one_batch = pad_sequence(mask_mag_list) clean_mag_one_batch = pad_sequence(clean_mag_list) noisy_mag_one_batch = noisy_mag_one_batch.permute(1, 0, 2) # the shape is (len(seq_list), longest T, F) mask_one_batch = mask_one_batch.permute(1, 0, 2) clean_mag_one_batch = clean_mag_one_batch.permute(1, 0, 2) # (batch_size, longest T, F) return noisy_mag_one_batch, mask_one_batch, clean_mag_one_batch, n_frames_list
11,740
def main(): """ Process the tokenized text of each review by adding a part-of-speech tag, removing stop words, performing lemmatization, and finally making bigrams. These bigrams will be used to classify reviews as incentivized or non-incentivized. """ # NLTK corpora are not present on the other nodes of the cluster, # we can either load them into memory on the master node or send the zip # file to the other nodes with sc.addFile(). The first approach worked so # we went with that. This also avoids using NLTK's lazy loader which ran into # infinite recursion after being unpickled on the slave nodes by Spark. root = nltk.data.find('corpora/omw') reader = nltk.corpus.CorpusReader(root, r'.*/wn-data-.*\.tab', encoding='utf8') # WordNet corpus for lemmatization wn = nltk.corpus.reader.WordNetCorpusReader( nltk.data.find('corpora/wordnet'), reader) sc = SparkContext() # Add the NLTK library sc.addPyFile('nltk.zip') sqlContext = SQLContext(sc) sqlContext.setConf('spark.sql.parquet.compression.codec', 'snappy') df = sqlContext.read.parquet(DATA_PATH) # List of English stop words, use set for better search performance stops = sc.broadcast(set(stopwords.words('english'))) wn = sc.broadcast(wn) # NLTK's default part-of-speech tagger tagger = sc.broadcast(PerceptronTagger()) # PoS tagging tag = UserDefinedFunction( lambda t: tagger.value.tag(t), ArrayType(ArrayType(StringType())) ) # Remove stop words remstops = UserDefinedFunction( lambda t: remove_stops(t, stops), ArrayType(ArrayType(StringType())) ) # Lemmatize lem = UserDefinedFunction( lambda t: lemmatize_tagged_tokens(t, wn), ArrayType(StringType()) ) # Make bigrams mk_bg = UserDefinedFunction(lambda t: list(bigrams(t)), ArrayType(ArrayType(StringType()))) # One function at a time or Spark complains about UDFs # not being callable df = df.withColumn('bg', tag(df.tokenized_text)) df = df.withColumn('bg', remstops(df.bg)) df = df.withColumn('bg', lem(df.bg)) df = df.withColumn('bg', mk_bg(df.bg)) # Save the results to HDFS df.write.mode('overwrite').parquet(BIGRAMS_PATH)
11,741
def create_instance(c_instance): """ Creates and returns the Twister script """ return Twister(c_instance)
11,742
def command_handler(command_type: Type[CommandAPI], *, name: str = None) -> Callable[[CommandHandlerFn], Type[CommandHandler]]: """ Decorator that can be used to construct a CommandHandler from a simple function. .. code-block:: python @command_handler(Ping) def handle_ping(connection, msg): connection.get_base_protocol().send_pong() """ if name is None: name = f'handle_{command_type.__name__}' def decorator(fn: CommandHandlerFn) -> Type[CommandHandler]: return type( name, (CommandHandler,), { 'cmd_type': command_type, 'handle': staticmethod(fn), }, ) return decorator
11,743
def check_ip_in_lists(ip, db_connection, penalties): """ Does an optimized ip lookup with the db_connection. Applies only the maximum penalty. Args: ip (str): ip string db_connection (DBconnector obj) penalties (dict): Contains tor_penalty, vpn_penalty, blacklist_penalty keys with integer values Returns: :int: penalty_added """ penalties = {'tor': int(penalties['tor_penalty']), 'vpn': int(penalties['vpn_penalty']), 'blacklist': int(penalties['ip_blacklist_penalty'])} penalties = sorted(penalties.items(), key=lambda x: x[1]) # sort by penalty value to check in that order and perform early stopping penalty_added = 0 for penalty_type, penalty_value in penalties: if penalty_value == 0: continue if penalty_type == 'tor': if db_connection.set_exists('tor_ips', ip): penalty_added = penalty_value elif penalty_type == 'blacklist': if db_connection.set_exists('blacklist_ips', ip): penalty_added = penalty_value elif db_connection.set_exists('blacklist_ips', '.'.join(ip.split('.')[:3])): penalty_added = penalty_value elif db_connection.set_exists('blacklist_ips', '.'.join(ip.split('.')[:2])): penalty_added = penalty_value elif penalty_type == 'vpn': if db_connection.set_exists('vpn_ips', ip): penalty_added = penalty_value elif db_connection.set_exists('vpn_ips', '.'.join(ip.split('.')[:3])): penalty_added = penalty_value elif db_connection.set_exists('vpn_ips', '.'.join(ip.split('.')[:2])): penalty_added = penalty_value if penalty_added > 0: break return penalty_added
11,744
def scan(fn, sequences=None, outputs_info=None, non_sequences=None, n_steps=None, truncate_gradient=-1, go_backwards=False, mode=None, name=None, options=None, profile=False): """ This function constructs and applies a Scan op to the provided arguments. :param fn: ``fn`` is a function that describes the operations involved in one step of ``scan``. ``fn`` should construct variables describing the output of one iteration step. It should expect as input theano variables representing all the slices of the input sequences and previous values of the outputs, as well as all other arguments given to scan as ``non_sequences``. The order in which scan passes these variables to ``fn`` is the following : * all time slices of the first sequence * all time slices of the second sequence * ... * all time slices of the last sequence * all past slices of the first output * all past slices of the second otuput * ... * all past slices of the last output * all other arguments (the list given as `non_sequences` to scan) The order of the sequences is the same as the one in the list `sequences` given to scan. The order of the outputs is the same as the order of ``outputs_info``. For any sequence or output the order of the time slices is the same as the one in which they have been given as taps. For example if one writes the following : .. code-block:: python scan(fn, sequences = [ dict(input= Sequence1, taps = [-3,2,-1]) , Sequence2 , dict(input = Sequence3, taps = 3) ] , outputs_info = [ dict(initial = Output1, taps = [-3,-5]) , dict(initial = Output2, taps = None) , Output3 ] , non_sequences = [ Argument1, Argument 2]) ``fn`` should expect the following arguments in this given order: #. ``Sequence1[t-3]`` #. ``Sequence1[t+2]`` #. ``Sequence1[t-1]`` #. ``Sequence2[t]`` #. ``Sequence3[t+3]`` #. ``Output1[t-3]`` #. ``Output1[t-5]`` #. ``Output3[t-1]`` #. ``Argument1`` #. ``Argument2`` The list of ``non_sequences`` can also contain shared variables used in the function, though ``scan`` is able to figure those out on its own so they can be skipped. For the clarity of the code we recommend though to provide them to scan. To some extend ``scan`` can also figure out other ``non sequences`` (not shared) even if not passed to scan (but used by `fn`). A simple example of this would be : .. code-block:: python import theano.tensor as TT W = TT.matrix() W_2 = W**2 def f(x): return TT.dot(x,W_2) The function is expected to return two things. One is a list of outputs ordered in the same order as ``outputs_info``, with the difference that there should be only one output variable per output initial state (even if no tap value is used). Secondly `fn` should return an update dictionary (that tells how to update any shared variable after each iteration step). The dictionary can optionally be given as a list of tuples. There is no constraint on the order of these two list, ``fn`` can return either ``(outputs_list, update_dictionary)`` or ``(update_dictionary, outputs_list)`` or just one of the two (in case the other is empty). To use ``scan`` as a while loop, the user needs to change the function ``fn`` such that also a stopping condition is returned. To do so, he/she needs to wrap the condition in an ``until`` class. The condition should be returned as a third element, for example: .. code-block:: python ... return [y1_t, y2_t], {x:x+1}, theano.scan_module.until(x < 50) Note that a number of steps (considered in here as the maximum number of steps ) is still required even though a condition is passed (and it is used to allocate memory if needed). = {}): :param sequences: ``sequences`` is the list of Theano variables or dictionaries describing the sequences ``scan`` has to iterate over. If a sequence is given as wrapped in a dictionary, then a set of optional information can be provided about the sequence. The dictionary should have the following keys: * ``input`` (*mandatory*) -- Theano variable representing the sequence. * ``taps`` -- Temporal taps of the sequence required by ``fn``. They are provided as a list of integers, where a value ``k`` impiles that at iteration step ``t`` scan will pass to ``fn`` the slice ``t+k``. Default value is ``[0]`` Any Theano variable in the list ``sequences`` is automatically wrapped into a dictionary where ``taps`` is set to ``[0]`` :param outputs_info: ``outputs_info`` is the list of Theano variables or dictionaries describing the initial state of the outputs computed recurrently. When this initial states are given as dictionary optional information can be provided about the output corresponding to these initial states. The dictionary should have the following keys: * ``initial`` -- Theano variable that represents the initial state of a given output. In case the output is not computed recursively (think of a map) and does not require a initial state this field can be skiped. Given that only the previous time step of the output is used by ``fn`` the initial state should have the same shape as the output. If multiple time taps are used, the initial state should have one extra dimension that should cover all the possible taps. For example if we use ``-5``, ``-2`` and ``-1`` as past taps, at step 0, ``fn`` will require (by an abuse of notation) ``output[-5]``, ``output[-2]`` and ``output[-1]``. This will be given by the initial state, which in this case should have the shape (5,)+output.shape. If this variable containing the initial state is called ``init_y`` then ``init_y[0]`` *corresponds to* ``output[-5]``. ``init_y[1]`` *correponds to* ``output[-4]``, ``init_y[2]`` corresponds to ``output[-3]``, ``init_y[3]`` coresponds to ``output[-2]``, ``init_y[4]`` corresponds to ``output[-1]``. While this order might seem strange, it comes natural from splitting an array at a given point. Assume that we have a array ``x``, and we choose ``k`` to be time step ``0``. Then our initial state would be ``x[:k]``, while the output will be ``x[k:]``. Looking at this split, elements in ``x[:k]`` are ordered exactly like those in ``init_y``. * ``taps`` -- Temporal taps of the output that will be pass to ``fn``. They are provided as a list of *negative* integers, where a value ``k`` implies that at iteration step ``t`` scan will pass to ``fn`` the slice ``t+k``. ``scan`` will follow this logic if partial information is given: * If an output is not wrapped in a dictionary, ``scan`` will wrap it in one assuming that you use only the last step of the output (i.e. it makes your tap value list equal to [-1]). * If you wrap an output in a dictionary and you do not provide any taps but you provide an initial state it will assume that you are using only a tap value of -1. * If you wrap an output in a dictionary but you do not provide any initial state, it assumes that you are not using any form of taps. * If you provide a ``None`` instead of a variable or a empty dictionary ``scan`` assumes that you will not use any taps for this output (like for example in case of a map) If ``outputs_info`` is an empty list or None, ``scan`` assumes that no tap is used for any of the outputs. If information is provided just for a subset of the outputs an exception is raised (because there is no convention on how scan should map the provided information to the outputs of ``fn``) :param non_sequences: ``non_sequences`` is the list of arguments that are passed to ``fn`` at each steps. One can opt to exclude variable used in ``fn`` from this list as long as they are part of the computational graph, though for clarity we encourage not to do so. :param n_steps: ``n_steps`` is the number of steps to iterate given as an int or Theano scalar. If any of the input sequences do not have enough elements, scan will raise an error. If the *value is 0* the outputs will have *0 rows*. If the value is negative, ``scan`` will run backwards in time. If the ``go_backwards`` flag is already set and also ``n_steps`` is negative, ``scan`` will run forward in time. If n stpes is not provided, ``scan`` will figure out the amount of steps it should run given its input sequences. :param truncate_gradient: ``truncate_gradient`` is the number of steps to use in truncated BPTT. If you compute gradients through a scan op, they are computed using backpropagation through time. By providing a different value then -1, you choose to use truncated BPTT instead of classical BPTT, where you go for only ``truncate_gradient`` number of steps back in time. :param go_backwards: ``go_backwards`` is a flag indicating if ``scan`` should go backwards through the sequences. If you think of each sequence as indexed by time, making this flag True would mean that ``scan`` goes back in time, namely that for any sequence it starts from the end and goes towards 0. :param name: When profiling ``scan``, it is crucial to provide a name for any instance of ``scan``. The profiler will produce an overall profile of your code as well as profiles for the computation of one step of each instance of ``scan``. The ``name`` of the instance appears in those profiles and can greatly help to disambiguate information. :param mode: It is recommended to leave this argument to None, especially when profiling ``scan`` (otherwise the results are not going to be accurate). If you prefer the computations of one step of ``scan`` to be done differently then the entire function, you can use this parameter to describe how the computations in this loop are done (see ``theano.function`` for details about possible values and their meaning). :param profile: Flag or string. If true, or different from the empty string, a profile object will be created and attached to the inner graph of scan. In case ``profile`` is True, the profile object will have the name of the scan instance, otherwise it will have the passed string. Profile object collect (and print) information only when running the inner graph with the new cvm linker ( with default modes, other linkers this argument is useless) :rtype: tuple :return: tuple of the form (outputs, updates); ``outputs`` is either a Theano variable or a list of Theano variables representing the outputs of ``scan`` (in the same order as in ``outputs_info``). ``updates`` is a subclass of dictionary specifying the update rules for all shared variables used in scan This dictionary should be passed to ``theano.function`` when you compile your function. The change compared to a normal dictionary is that we validate that keys are SharedVariable and addition of those dictionary are validated to be consistent. """ # Note : see the internal documentation of the scan op for naming # conventions and all other details if options is None: options = {} rvals = scan_utils.canonical_arguments(sequences, outputs_info, non_sequences, go_backwards, n_steps) inputs, states_and_outputs_info, parameters, T = rvals # If we provided a known number of steps ( before compilation) # and if that number is 1 or -1, then we can skip the Scan Op, # and just apply the inner function once # To do that we check here to see the nature of n_steps T_value = None if isinstance(n_steps, (float, int)): T_value = int(n_steps) else: try: T_value = opt.get_scalar_constant_value(n_steps) except (TypeError, AttributeError): T_value = None if T_value in (1, -1): return one_step_scan(fn, inputs, states_and_outputs_info, parameters, truncate_gradient) # 1. Variable representing the current time step t = scalar_shared(numpy.int64(0), name='t') # 2. Allocate memory for the states of scan. mintaps = [] lengths = [] for pos, arg_info in enumerate(states_and_outputs_info): if arg_info.get('taps', None) == [-1]: mintaps.append(1) lengths.append(scalar_shared(numpy.int64(0), name='l%d' % pos)) arg_info['initial'] = scan_utils.expand(tensor.unbroadcast( tensor.shape_padleft(arg_info['initial']), 0), T) elif arg_info.get('taps', None): if numpy.any(numpy.array(arg_info.get('taps', [])) > 0): # Make sure we do not have requests for future values of a # sequence we can not provide such values raise ValueError('Can not use future taps of outputs', arg_info) mintap = abs(numpy.min(arg_info['taps'])) lengths.append(scalar_shared(numpy.int64(0), name='l%d' % pos)) mintaps.append(mintap) arg_info['initial'] = scan_utils.expand( arg_info['initial'][:mintap], T) else: mintaps.append(0) lengths.append(scalar_shared(numpy.int64(0), name='l%d' % pos)) # 3. Generate arguments for the function passed to scan. This will # function will return the outputs that need to be computed at every # timesteps inputs_slices = [input[t] for input in inputs] states_slices = [] for n, state in enumerate(states_and_outputs_info): # Check if it is actually a state and not an output if mintaps[n] != 0: for k in state['taps']: states_slices.append( state['initial'][(t + mintaps[n] + k) % lengths[n]]) # 4. Construct outputs that are to be computed by the inner # function of scan args = inputs_slices + states_slices + parameters cond, states_and_outputs, updates = \ scan_utils.get_updates_and_outputs(fn(*args)) # User is allowed to provide no information if it only behaves like a # map if (len(states_and_outputs) != len(states_and_outputs_info) and len(states_and_outputs_info) == 0): mintaps = [0] * len(states_and_outputs) # 5. Construct the scan op # 5.1 Construct list of shared variables with updates (those that # can be treated as states (i.e. of TensorType) and those that can not # (like Random States) if cond is not None: _cond = [cond] else: _cond = [] rvals = rebuild_collect_shared( states_and_outputs + _cond, updates=updates, rebuild_strict=True, copy_inputs_over=True, no_default_updates=False) # extracting the arguments input_variables, cloned_outputs, other_rval = rvals clone_d, update_d, update_expr, shared_inputs = other_rval additional_input_states = [] additional_output_states = [] additional_lengths = [] additional_mintaps = [] original_numeric_shared_variables = [] non_numeric_input_states = [] non_numeric_output_states = [] original_non_numeric_shared_variables = [] pos = len(lengths) for sv in shared_inputs: if sv in update_d: if isinstance(sv, (TensorVariable, TensorSharedVariable)): # We can treat it as a sit sot nw_state = scan_utils.expand( tensor.unbroadcast(tensor.shape_padleft(sv), 0), T) additional_lengths.append(scalar_shared(numpy.int64(0), name='l%d' % pos)) pos = pos + 1 additional_mintaps.append(1) additional_input_states.append(nw_state) additional_output_states.append( scan_utils.clone(tensor.set_subtensor( nw_state[(t + 1) % additional_lengths[-1]], update_d[sv]))) original_numeric_shared_variables.append(sv) else: non_numeric_input_states.append(sv) non_numeric_output_states.append(update_d[sv]) original_non_numeric_shared_variables.append(sv) # Replace shared variables in the update _additional_output_states = [] replace = {} for sv, buf in zip(original_numeric_shared_variables, additional_input_states): replace[sv] = buf[t] for out in additional_output_states: _additional_output_states.append( scan_utils.clone(out, replace=replace)) additional_output_states = _additional_output_states # 5.2 Collect inputs/outputs of the inner function inputs = [] outputs = [] for n, mintap in enumerate(mintaps): if mintap != 0: input_state = states_and_outputs_info[n]['initial'] inputs.append(input_state) outputs.append( tensor.set_subtensor( input_state[(t + mintap) % lengths[n]], states_and_outputs[n])) else: mem_buffer = scan_utils.allocate_memory( T, states_and_outputs_info[n], states_and_outputs[n]) inputs.append(output) outputs.append( tensor.set_subtensor(output[t % lengths[n]], states_and_outputs[n])) inputs.extend(additional_input_states) outputs.extend(additional_output_states) lengths.extend(additional_lengths) mintaps.extend(additional_mintaps) inputs.extend(non_numeric_input_states) outputs.extend(non_numeric_output_states) all_other_inputs = gof.graph.inputs(outputs) parameters = [x for x in all_other_inputs if (x not in inputs and x not in lengths and x is not t and isinstance(x, gof.Variable) and not isinstance(x, gof.Constant))] inputs.extend(parameters) # 5.3 Construct the the options dictionary options['name'] = name options['profile'] = profile options['mode'] = mode options['inplace'] = False options['gpu'] = False options['truncate_gradient'] = truncate_gradient options['hash_inner_graph'] = 0 # 5.4 Construct the ScanOp instance local_op = scan_op.ScanOp(inputs=inputs, outputs=outputs, lengths=lengths, switches=[], mintaps=mintaps, index=t, options=options, as_repeatUntil=cond) # Note that we get here all the outputs followed by the update rules to # the shared variables we had in our scan # we know that we have (in this given order): # * len(states_and_outputs) real outputs # * len(additional_input_states) updates for numeric shared variable # * len(non_numeric_input_states) updates for non numeric shared # variables scan_inputs = [T] + inputs scan_outputs_update_rules = scan_utils.to_list(local_op(*scan_inputs)) # 5.5 Collect outputs and add permutation object scan_outputs = [] for pos in xrange(len(states_and_outputs)): out = scan_utils.ScanPermutation(mintaps[pos])( scan_outputs_update_rules[pos], t) scan_outputs.append(out[mintaps[pos]:]) # 5.6 Construct updates dictionary update_rules = scan_outputs_update_rules[len(states_and_outputs):] updates = {} for v, u in izip(original_numeric_shared_variables, update_rules[:len(additional_input_states)]): updates[v] = u[-1] for v, u in izip(original_non_numeric_shared_variables, update_rules[len(additional_input_states):]): updates[v] = u # Step 5.7 We are done and can return everything back to the user return scan_outputs, updates
11,745
def stock_analyst(stock_list): """This function accepts a list of data P and outputs the best day to buy(B) and sell(S) stock. Args: stock_list: expects a list of stocks as a parameter Returns: a string promting to buy stock if one has not bought stock i.e the value of stock is less than 1 If the value of stock is > 0 it returns the best days to stock at value and sell stock at maximum value """ B = stock_list.index(min(stock_list)) buy_value = min(stock_list) sell_value = -1 if buy_value > 1: for sell_indx in range(B, len(stock_list)): if sell_value < stock_list[sell_indx]: sell_value = stock_list[sell_indx] S = sell_indx else: return 'Buy stock first' return [B, S]
11,746
def is_sync_available(admin_id): """Method to check the synchronization's availability about networks connection. Args: admin_id (str): Admin privileges flag. """ return r_synchronizer.is_sync_available()
11,747
def test_boolean005_1861_boolean005_1861_v(mode, save_output, output_format): """ TEST :Facet Schemas for string : value=0 """ assert_bindings( schema="msData/datatypes/boolean.xsd", instance="msData/datatypes/boolean005.xml", class_name="Root", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
11,748
def on_second_thought(divider): """sort the characters according to number of times they appears in given text, returns the remaining word as a string """ unsorted_list = list(unsorted_string) # characters occurence determines the order occurence = collections.Counter(unsorted_list) # sort by characters frequency in descending order occurences_list = sorted(unsorted_list, key=occurence.get, reverse=True) # already sorted, duplicates would provide no value reduced_list = list(collections.OrderedDict.fromkeys(occurences_list)) divider_position = reduced_list.index(divider) # everything behind (and including) the divider is irrelevant return ''.join(reduced_list[:divider_position])
11,749
def aiohttp_unused_port(loop, aiohttp_unused_port, socket_enabled): """Return aiohttp_unused_port and allow opening sockets.""" return aiohttp_unused_port
11,750
def os_specific_command_line(command_line): """ Gets the operating system specific command string. :param command_line: command line to execute. :type command_line: str """ current_os = os.environ["TEMPLATE_OS"] command = "/bin/bash -c '{}'" if current_os.lower() == "linux" else "cmd.exe /c \"{}\"" return command.format(command_line)
11,751
def test_compare_methods(fake_data): """Test that fast and "slow" methods yield same answer.""" data, is_random = fake_data # define model parameters zmean = 8.0 alpha = 0.9 k0 = 1.0 boxsize = 10.0 rsmooth = 1.0 deconvolve = True # use slow function zre1 = zreion.apply_zreion(data, zmean, alpha, k0, boxsize, rsmooth, deconvolve) # use fast function zre2 = zreion.apply_zreion_fast( data, zmean, alpha, k0, boxsize, rsmooth, deconvolve ) assert np.allclose(zre1, zre2) return
11,752
def multi_voters_example(): """ Example of using a combination of many types of voters, which may be seen as multi-kernel learning (MKL). This particular dataset is easy to solve and combining voters degrades performance. However, it might be a good idea for more complex datasets. """ # MinCq parameters, fixed to a given value as this is a simple example. mu = 0.001 # We load iris dataset, We convert the labels to be -1 or 1, and we split it in two parts: train and test. dataset = load_iris() dataset.target[dataset.target == 0] = -1 dataset.target[dataset.target == 2] = -1 X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, random_state=42) # We create a set of voters of different kind. voters = voter.KernelVotersGenerator(rbf_kernel, gamma=0.01).generate(X_train) voters = np.append(voters, voter.KernelVotersGenerator(rbf_kernel, gamma=0.1).generate(X_train)) voters = np.append(voters, voter.KernelVotersGenerator(rbf_kernel, gamma=1).generate(X_train)) voters = np.append(voters, voter.KernelVotersGenerator(rbf_kernel, gamma=10).generate(X_train)) voters = np.append(voters, voter.KernelVotersGenerator(rbf_kernel, gamma=100).generate(X_train)) voters = np.append(voters, voter.KernelVotersGenerator(polynomial_kernel, degree=2).generate(X_train)) voters = np.append(voters, voter.KernelVotersGenerator(polynomial_kernel, degree=3).generate(X_train)) voters = np.append(voters, voter.KernelVotersGenerator(linear_kernel).generate(X_train)) # We train MinCq using these voters, on the training set. learner = MinCqLearner(mu, voters_type='manual') learner.fit(X_train, y_train, voters) # We predict the train and test labels and print the risk. predictions_train = learner.predict(X_train) predictions_test = learner.predict(X_test) print("\nMultiVotersMinCq") print("-----------") print("Training set risk: {:.4f}".format(zero_one_loss(y_train, predictions_train))) print("Testing set risk: {:.4f}\n".format(zero_one_loss(y_test, predictions_test)))
11,753
def ProgrammingDebug(obj, show_all=False) -> None: """return all attributes of a specific objec""" try: _LOGGER.debug("%s - ProgrammingDebug: %s", DOMAIN, obj) for attr in dir(obj): if attr.startswith('_') and not show_all: continue if hasattr(obj, attr ): _LOGGER.critical("%s - ProgrammingDebug: %s = %s", DOMAIN, attr, getattr(obj, attr)) except Exception as e: _LOGGER.critical("%s - ProgrammingDebug: failed: %s (%s.%s)", DOMAIN, str(e), e.__class__.__module__, type(e).__name__) pass
11,754
def font_variant(name, tokens): """Expand the ``font-variant`` shorthand property. https://www.w3.org/TR/css-fonts-3/#font-variant-prop """ return expand_font_variant(tokens)
11,755
def with_part_names(*part_names): """Add part names for garage.parts.assemble. Call this when you want to assemble these parts but do not want them to be passed to main. """ return lambda main: ensure_app(main).with_part_names(*part_names)
11,756
def movieServiceProvider(movie_duration_List, flight_duration): """ Assuming users will watch exactly two movies. assuming 2 movies do not have same length O(n2) time complexity """ possible_pairs = [] max_sum = 0 max_sum_max_index = None for i in range(len(movie_duration_List)): for j in range(len(movie_duration_List)): if (i != j and movie_duration_List[i] + movie_duration_List[j] <= flight_duration): min_new = min(movie_duration_List[i], movie_duration_List[j]) max_new = max(movie_duration_List[i], movie_duration_List[j]) if (min_new, max_new) not in possible_pairs: sum_new = min_new+ max_new possible_pairs.append((min_new, max_new)) if sum_new >= max_sum: if sum_new == max_sum: if max_sum_max_index != None and possible_pairs[max_sum_max_index][1] < max_new: max_sum_max_index = len(possible_pairs)-1 else: max_sum = sum_new max_sum_max_index = len(possible_pairs)-1 print("movie lengths :", movie_duration_List, "flight duration : ", flight_duration) print("Total count of possible pair of movies : ",len(possible_pairs)) print("all possible pair of movies : ",possible_pairs) print("Best possible movie length : ", max_sum) print("Best possible movie duration : ", possible_pairs[max_sum_max_index]) print("-"*200)
11,757
def library_view(request): """View for image library.""" if request.user.is_authenticated: the_user = request.user albums = Album.objects.filter(user=the_user) context = {'the_user': the_user, 'albums': albums} return render(request, 'imager_profile/library.html', context)
11,758
def combine_html_json_pbp(json_df, html_df, game_id, date): """ Join both data sources. First try merging on event id (which is the DataFrame index) if both DataFrames have the same number of rows. If they don't have the same number of rows, merge on: Period', Event, Seconds_Elapsed, p1_ID. :param json_df: json pbp DataFrame :param html_df: html pbp DataFrame :param game_id: id of game :param date: date of game :return: finished pbp """ # Don't need those columns to merge in json_df = json_df.drop(['p1_name', 'p2_name', 'p2_ID', 'p3_name', 'p3_ID'], axis=1) try: html_df.Period = html_df.Period.astype(int) # If they aren't equal it's usually due to the HTML containing a challenge event if html_df.shape[0] == json_df.shape[0]: json_df = json_df[['period', 'event', 'seconds_elapsed', 'xC', 'yC']] game_df = pd.merge(html_df, json_df, left_index=True, right_index=True, how='left') else: # We always merge if they aren't equal but we check if it's due to a challenge so we can print out a better # warning message for the user. # NOTE: May be slightly incorrect. It's possible for there to be a challenge and another issue for one game. if'CHL' in list(html_df.Event): shared.print_warning("The number of columns in the Html and Json pbp are different because the" " Json pbp, for some reason, does not include challenges. Will instead merge on " "Period, Event, Time, and p1_id.") else: shared.print_warning("The number of columns in the Html and json pbp are different because " "someone fucked up. Will instead merge on Period, Event, Time, and p1_id.") # Actual Merging game_df = pd.merge(html_df, json_df, left_on=['Period', 'Event', 'Seconds_Elapsed', 'p1_ID'], right_on=['period', 'event', 'seconds_elapsed', 'p1_ID'], how='left') # This is always done - because merge doesn't work well with shootouts game_df = game_df.drop_duplicates(subset=['Period', 'Event', 'Description', 'Seconds_Elapsed']) except Exception as e: shared.print_warning('Problem combining Html Json pbp for game {}'.format(game_id, e)) return game_df['Game_Id'] = game_id[-5:] game_df['Date'] = date return pd.DataFrame(game_df, columns=pbp_columns)
11,759
def CreateAlerts(config): """"Creates Stackdriver alerts for logs-based metrics.""" # Stackdriver alerts can't yet be created in Deployment Manager, so create # them here. alert_email = config.project.get('stackdriver_alert_email') if alert_email is None: logging.warning('No Stackdriver alert email specified, skipping creation ' 'of Stackdriver alerts.') return project_id = config.project['project_id'] # Create an email notification channel for alerts. logging.info('Creating Stackdriver notification channel.') channel = utils.CreateNotificationChannel(alert_email, project_id) logging.info('Creating Stackdriver alerts.') utils.CreateAlertPolicy( 'global', 'iam-policy-change-count', 'IAM Policy Change Alert', ('This policy ensures the designated user/group is notified when IAM ' 'policies are altered.'), channel, project_id) utils.CreateAlertPolicy( 'gcs_bucket', 'bucket-permission-change-count', 'Bucket Permission Change Alert', ('This policy ensures the designated user/group is notified when ' 'bucket/object permissions are altered.'), channel, project_id) for data_bucket in config.project.get('data_buckets', []): # Every bucket with 'expected_users' has an expected-access alert. if 'expected_users' in data_bucket: bucket_name = project_id + data_bucket['name_suffix'] metric_name = 'unexpected-access-' + bucket_name utils.CreateAlertPolicy( 'gcs_bucket', metric_name, 'Unexpected Access to {} Alert'.format(bucket_name), ('This policy ensures the designated user/group is notified when ' 'bucket {} is accessed by an unexpected user.'.format(bucket_name)), channel, project_id)
11,760
def read_json_file(path): """ Given a line-by-line JSON file, this function converts it to a Python dict and returns all such lines as a list. :param path: the path to the JSON file :returns items: a list of dictionaries read from a JSON file """ items = list() with open(path, 'r') as raw_data: for line in raw_data: line = json.loads(line) items.append(line) return items
11,761
def get_boss_wage2(employee): """ Monadic version. """ return bind3(bind3(unit3(employee), Employee.get_boss), Employee.get_wage)
11,762
def format_history(src, dest, format="basic"): """ Formats history based on module `releases <https://github.com/bitprophet/releases>`_. @param src source history (file) @param dest destination (file) Parameter *format* was added. :epkg:`Sphinx` extension *release* no longer used but the formatting is still available. """ with open(src, "r", encoding="utf-8") as f: lines = f.readlines() new_lines = [] if format == "release": tag = None for i in range(0, len(lines)): line = lines[i].rstrip("\r\t\n ") if line.startswith("===") and i > 0: rel = lines[i - 1].rstrip("\r\t\n ") if "." in rel: del new_lines[-1] res = "* :release:`{0}`".format(rel) res = res.replace("(", "<").replace(")", ">") if new_lines[-1].startswith("==="): new_lines.append("") new_lines.append(res) tag = None else: new_lines.append(line) elif len(line) > 0: if line.startswith("**"): ll = line.lower().strip("*") if ll in ('bug', 'bugfix', 'bugfixes'): tag = "bug" elif ll in ('features', 'feature'): tag = "feature" elif ll in ('support', 'support'): tag = "support" else: raise ValueError( "Line {0}, unable to infer tag from '{1}'".format(i, line)) else: nline = line.lstrip("* ") if nline.startswith("`"): if tag is None: tag = 'issue' res = "* :{0}:{1}".format(tag, nline) if new_lines[-1].startswith("==="): new_lines.append("") new_lines.append(res) else: new_lines.append(line) if line.startswith(".. _"): new_lines.append("") elif format == "basic": reg = re.compile("(.*?)`([0-9]+)`:(.*?)[(]([-0-9]{10})[)]") for line in lines: match = reg.search(line) if match: gr = match.groups() new_line = "{0}:issue:`{1}`:{2}({3})".format(*gr) new_lines.append(new_line) else: new_lines.append(line.strip("\n\r")) else: raise ValueError("Unexpected value for format '{0}'".format(format)) with open(dest, "w", encoding="utf-8") as f: f.write("\n".join(new_lines))
11,763
def reverse_migration(apps, schema_editor): """There's no need to do anything special to reverse these migrations.""" return
11,764
def keypoint_angle(kp1, kp2): """求两个keypoint的夹角 """ k = [ (kp1.angle - 180) if kp1.angle >= 180 else kp1.angle, (kp2.angle - 180) if kp2.angle >= 180 else kp2.angle ] if k[0] == k[1]: return 0 else: return abs(k[0] - k[1])
11,765
def get_args_static_distribute_cells(): """ Distribute ranges of cells across workers. :return: list of lists """ pop_names_list = [] gid_lists = [] for pop_name in context.pop_names: count = 0 gids = context.spike_trains[pop_name].keys() while count < len(gids): pop_names_list.append(pop_name) gid_lists.append(gids[count:count+context.gid_block_size]) count += context.gid_block_size return [pop_names_list, gid_lists]
11,766
def test_ioc_set_ignored(cbcsdk_mock): """Tests setting the ignore status of an IOC.""" cbcsdk_mock.mock_request("PUT", "/threathunter/watchlistmgr/v3/orgs/test/reports/a1b2/iocs/foo/ignore", IOC_GET_IGNORED) api = cbcsdk_mock.api ioc = IOC_V2.create_equality(api, "foo", "process_name", "Alpha") with pytest.raises(InvalidObjectError): ioc.ignore() ioc._report_id = "a1b2" ioc.ignore() ioc._info['id'] = None with pytest.raises(InvalidObjectError): ioc.ignore()
11,767
def le(input, other, *args, **kwargs): """ In ``treetensor``, you can get less-than-or-equal situation of the two tree tensors with :func:`le`. Examples:: >>> import torch >>> import treetensor.torch as ttorch >>> ttorch.le( ... torch.tensor([[1, 2], [3, 4]]), ... torch.tensor([[1, 1], [4, 4]]), ... ) tensor([[ True, False], [ True, True]]) >>> ttorch.le( ... ttorch.tensor({ ... 'a': [[1, 2], [3, 4]], ... 'b': [1.0, 1.5, 2.0], ... }), ... ttorch.tensor({ ... 'a': [[1, 1], [4, 4]], ... 'b': [1.3, 1.2, 2.0], ... }), ... ) <Tensor 0x7ff363bc6198> ├── a --> tensor([[ True, False], │ [ True, True]]) └── b --> tensor([ True, False, True]) """ return torch.le(input, other, *args, **kwargs)
11,768
def linear_to_srgb(data): """Convert linear color data to sRGB. Acessed from https://entropymine.com/imageworsener/srgbformula Parameters ---------- data: :class:`numpy.ndarray`, required Array of any shape containing linear data to be converted to sRGB. Returns ------- converted: :class:`numpy.ndarray` Array with the same shape as `data` containing values in sRGB space. """ return numpy.where(data <= 0.0031308, data * 12.92, 1.055 * numpy.power(data, 1 / 2.4) - 0.055)
11,769
def yices_reset_yval_vector(v): """Resets a yval (node descriptor) vector, for storing non atomic values in models.""" libyices.yices_reset_yval_vector(pointer(v))
11,770
async def test_build_and_deploy_prometheus_tester(ops_test, prometheus_tester_charm): """Test that Prometheus tester charm can be deployed successfully.""" app_name = "prometheus-tester" await ops_test.model.deploy( prometheus_tester_charm, resources=tester_resources, application_name=app_name ) await ops_test.model.wait_for_idle(apps=[app_name], status="active") await ops_test.model.block_until(lambda: len(ops_test.model.applications[app_name].units) > 0) assert ops_test.model.applications[app_name].units[0].workload_status == "active" await ops_test.model.applications[app_name].remove() await ops_test.model.block_until(lambda: app_name not in ops_test.model.applications) await ops_test.model.reset()
11,771
def circuit_to_dagdependency(circuit): """Build a ``DAGDependency`` object from a ``QuantumCircuit``. Args: circuit (QuantumCircuit): the input circuits. Return: DAGDependency: the DAG representing the input circuit as a dag dependency. """ dagdependency = DAGDependency() dagdependency.name = circuit.name for register in circuit.qregs: dagdependency.add_qreg(register) for register in circuit.cregs: dagdependency.add_creg(register) for operation, qargs, cargs in circuit.data: dagdependency.add_op_node(operation, qargs, cargs) dagdependency._add_successors() return dagdependency
11,772
def node_rgb_color(node_name, linear=True): """ Returns color of the given node :param node_name: str :param linear: bool, Whether or not the RGB should be in linear space (matches viewport color) :return: """ raise NotImplementedError()
11,773
def tweets_factory(fixtures_factory): """Factory for tweets from YAML file""" def _tweets_factory(yaml_file): all_fixtures = fixtures_factory(yaml_file) return [t for t in all_fixtures if isinstance(t, TweetBase)] return _tweets_factory
11,774
def monte_carlo(ds,duration,n,pval,timevar): """ pval: two-tailed pval """ x=0 mc = np.empty([ds.shape[1],ds.shape[2],n]) while x<n: dummy = np.random.randint(0, len(ds[timevar])-duration, size=1) # have to adjust size so total number of points is always the same mc[:,:,x] = ds[int(dummy):int(dummy+duration),::].mean(timevar) x=x+1 # derive percentile perc_upper = np.nanpercentile(mc,100-pval,axis=2) perc_lower = np.nanpercentile(mc,pval,axis=2) return perc_lower,perc_upper
11,775
def num_crl(wf_n): """Function computes the autocorrelation function from given vectors\ and the Discrete Fourier transform Args: wf_n(numpy array, complex): Wave function over time Returns: numpy array, complex: The wave function complex over time. numpy array, complex: The autocorrelation function over time. numpy array, complex: The Discrete Fourier Transformation function\ over frequency """ # setting up the time vector and deleting it from array time_vc = np.zeros([len(wf_n[0])]) time_vc = wf_n[0] wf_n = np.delete(wf_n, [0], axis=0) # the lenth of the vector t_wf = len(wf_n[0]) p_wf = len(wf_n[:, 0]) # turning array into complex comp_vc = np.zeros([p_wf, t_wf], dtype=np.complex_) for n in range(p_wf): comp_vc[:, n] = wf_n[n * 2] + wf_n[1 + n * 2] * 1j return comp_vc, time_vc
11,776
def test_dbt_parse_mocked_all_args(): """Test mocked dbt parse call with all arguments.""" op = DbtParseOperator( task_id="dbt_task", project_dir="/path/to/project/", profiles_dir="/path/to/profiles/", profile="dbt-profile", target="dbt-target", vars={"target": "override"}, log_cache_events=True, ) assert op.command == "parse" config = op.get_dbt_config() assert isinstance(config, ParseTaskConfig) is True assert config.project_dir == "/path/to/project/" assert config.profiles_dir == "/path/to/profiles/" assert config.profile == "dbt-profile" assert config.target == "dbt-target" assert config.vars == '{"target": "override"}' assert config.log_cache_events is True
11,777
def resample_time_series(series, period="MS"): """ Resample and interpolate a time series dataframe so we have one row per time period (useful for FFT) Parameters ---------- df: DataFrame Dataframe with date as index col_name: string, Identifying the column we will pull out period: string Period for resampling Returns ------- Series: pandas Series with datetime index, and one column, one row per day """ # give the series a date index if the DataFrame is not index by date already # if df.index.name != 'date': # series.index = df.date # just in case the index isn't already datetime type series.index = pd.to_datetime(series.index) # resample to get one row per time period rseries = series.resample(period).mean() new_series = rseries.interpolate() return new_series
11,778
def _timeliness_todo(columns, value, df, dateFormat=None, timeFormat=None): """ Returns what (columns, as in spark columns) to compute to get the results requested by the parameters. :param columns: :type columns: list :param value :type value: str :param df: :type df: DataFrame :param dateFormat: :type dateFormat: str :param timeFormat: :type timeFormat: str :return: Pyspark columns representing what to compute. """ assert (dateFormat is None or timeFormat is None) and ( not dateFormat is None or not timeFormat is None), "Pass either a dateFormat or a timeFormat, " \ "not both. " todo = [] types = dict(df.dtypes) if dateFormat: value_date = to_date(lit(value), dateFormat) for c in columns: if types[c] == "timestamp" or types[c] == "date": todo.append(sum(when(datediff(value_date, c) > 0, 1).otherwise(0)).alias(c)) elif types[c] == "string": todo.append(sum(when(datediff(value_date, to_date(c, dateFormat)) > 0, 1).otherwise(0)).alias(c)) else: print( "Type of a column on which the timeliness metric is run must be either timestamp, " "date or string, if the metric is being run on dateFormat.") exit() elif timeFormat: value_long = to_timestamp(lit(value), timeFormat).cast("long") # check if value contains a date and not only hours, minutes, seconds has_date = _contains_date(timeFormat) if has_date: for c in columns: if types[c] == "timestamp": todo.append(sum(when(value_long - col(c).cast("long") > 0, 1).otherwise(0)).alias(c)) elif types[c] == "string": todo.append( sum(when(value_long - to_timestamp(col(c), timeFormat).cast("long") > 0, 1).otherwise(0)).alias( c)) else: print( "Type of a column on which the timeliness metric is run must be either timestamp or string, if " "the metric is being run on a timeFormat") exit() else: for c in columns: if types[c] == "timestamp": """ If there is no years, months, days we must ignore the years, months, days in the timestamp. """ value_long = to_timestamp(lit(value), timeFormat) # remove years, months, days value_long = value_long.cast("long") - value_long.cast("date").cast("timestamp").cast("long") # check for difference, but only considering hours, minutes, seconds todo.append(sum( when( value_long - (col(c).cast("long") - col(c).cast("date").cast("timestamp").cast("long")) > 0, 1).otherwise(0)).alias(c)) elif types[c] == "string": """ If there are no years, months, days and the column is in the same format, meaning that it also has no years, months, days, this means that they will be both initialized to the same year, month, day; so years, months, days will be basically ignored. """ todo.append( sum(when((value_long - to_timestamp(c, timeFormat).cast("long")) > 0, 1).otherwise(0)).alias(c)) else: print( "Type of a column on which the timeliness metric is run must be either timestamp or string, if " "the metric is being run on a timeFormat") exit() return todo
11,779
def test_release_non_existant_alloc(): """Release allocation that doesn't exist""" with pytest.raises(LauncherError): slurm.release_allocation(00000000)
11,780
def build_arm( simulator, n_elem:int=11, override_params:Optional[dict]=None, attach_head:bool=None, # TODO: To be implemented attach_weight:Optional[bool]=None, # TODO: To be implemented ): """ Import default parameters (overridable) """ param = _OCTOPUS_PROPERTIES.copy() # Always copy parameter for safety if isinstance(override_params, dict): param.update(override_params) """ Import default parameters (non-overridable) """ arm_scale_param = _DEFAULT_SCALE_LENGTH.copy() """ Set up an arm """ L0 = arm_scale_param['base_length'] r0 = arm_scale_param['base_radius'] arm_pos = np.array([0.0, 0.0, 0.0]) arm_dir = np.array([1.0, 0.0, 0.0]) normal = np.array([0.0, 0.0, 1.0]) rod = CosseratRod.straight_rod( n_elements=n_elem, start=arm_pos, direction=arm_dir, normal=normal, **arm_scale_param, **param, ) simulator.append(rod) """Add gravity forces""" _g = -9.81 gravitational_acc = np.array([0.0, 0.0, _g]) simulator.add_forcing_to(rod).using( GravityForces, acc_gravity=gravitational_acc ) """Add friction forces (always the last thing before finalize)""" contact_k = 1e2 # TODO: These need to be global parameter to tune contact_nu = 1e1 period = 2.0 origin_plane = np.array([0.0, 0.0, -r0]) slip_velocity_tol = 1e-8 froude = 0.1 mu = L0 / (period * period * np.abs(_g) * froude) if param['friction_symmetry']: kinetic_mu_array = np.array( [mu, mu, mu] ) * param['friction_multiplier'] # [forward, backward, sideways] else: kinetic_mu_array = np.array( [mu, 1.5 * mu, 2.0 * mu] ) * param['friction_multiplier'] # [forward, backward, sideways] static_mu_array = 2 * kinetic_mu_array simulator.add_forcing_to(rod).using( AnisotropicFrictionalPlane, k=contact_k, nu=contact_nu, plane_origin=origin_plane, plane_normal=normal, slip_velocity_tol=slip_velocity_tol, static_mu_array=static_mu_array, kinetic_mu_array=kinetic_mu_array, ) return rod
11,781
def check_user(user, pw, DB): """ Check if user exists and if password is valid. Return the user's data as a dict or a string with an error message. """ userdata = DB.get(user) if not userdata: log.error("Unknown user: %s", user) return "Unknown user: %s" % user elif userdata.get(C.Password) != pw: log.error("Invalid password!") return "Invalid password!" return userdata
11,782
def get_single_response_value(dom_response_list: list, agg_function): """ Get value of a single scenario's response. :param dom_response_list: Single response provided as a list of one term. :param agg_function: Function to aggregate multiple responses. :return: Value of such observation. """ response_list = extract_list_from_dom(dom_object=dom_response_list[0], tag_name='Observation', attribute_name='Value') if len(response_list) == 0: response_value = np.NaN else: try: response_value = agg_function([float(item) for item in response_list]) except TypeError: response_value = np.NaN return response_value
11,783
def check_gpu_plugin(): """ Check for the gpuCache plugin load """ if not cmds.pluginInfo('gpuCache', query=True, loaded=True): cmds.loadPlugin('gpuCache')
11,784
def sharpe_ratio(returns, periods=252): """ Create the Sharpe ratio for the strategy, based on a benchmark of zero (i.e. no risk-free rate information). Args: returns (list, Series) - A pandas Series representing period percentage returns. periods (int.) Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc. Returns: float. The result """ return np.sqrt(periods) * (np.mean(returns)) / np.std(returns)
11,785
def is_validated(user): """Is this user record validated?""" # An account is "validated" if it has the `validated` field set to True, or # no `validated` field at all (for accounts created before the "account # validation option" was enabled). return user.get("validated", True)
11,786
def current_chart_provider_monthly(): """ API for monthly provider chart """ mytime = dubwebdb.CTimes("%Y-%m", request.args.get('time_start'), request.args.get('time_end')) myids = dubwebdb.Ids(prv_id=sanitize_list(request.args.get('prvid')), team_id=sanitize_list(request.args.get('teamid')), project_id=request.args.get('prjid'), div_id=None) csv_only = request.args.get('dl_csv') if csv_only: myrows = dubwebdb.get_data_budget_provider(mytime, myids) return convert_to_download_csv(myrows) else: return dubwebdb.get_data_provider(mytime, myids, add_budget=True)
11,787
def create_round_meander(radius, theta=0, offset=Point()): """ Returns a single period of a meandering path based on radius and angle theta """ deg_to_rad = 2 * pi / 360 r = radius t = theta * deg_to_rad # The calculation to obtain the 'k' coefficient can be found here: # http://itc.ktu.lt/itc354/Riskus354.pdf # "APPROXIMATION OF A CUBIC BEZIER CURVE BY CIRCULAR ARCS AND VICE VERSA" # by Aleksas Riskus k = 0.5522847498 # the control points need to be shortened relative to the angle by this factor j = 2*t/pi path = "m %s,%s " % (-2*r*cos(t)-offset.x, -offset.y) path += "c %s,%s %s,%s %s,%s " % (-k*r*j*sin(t),-k*r*j*cos(t), -(r-r*cos(t)),-r*sin(t)+r*k*j, -(r-r*cos(t)),-r*sin(t)) path += "c %s,%s %s,%s %s,%s " % (0,-k*r, r-k*r,-r, r,-r) path += "c %s,%s %s,%s %s,%s " % (k*r,0, r,r-k*r, r,r) path += "c %s,%s %s,%s %s,%s " % (0,k*r*j, -(r-r*cos(t)-k*r*j*sin(t)),r*sin(t)-r*k*j*cos(t), -r+r*cos(t),r*sin(t)) path += "c %s,%s %s,%s %s,%s " % (-k*r*j*sin(t),k*r*j*cos(t), -(r-r*cos(t)),r*sin(t)-r*k*j, -(r-r*cos(t)),r*sin(t)) path += "c %s,%s %s,%s %s,%s " % (0,k*r, r-k*r,r, r,r) path += "c %s,%s %s,%s %s,%s " % (k*r,0, r,-r+k*r, r,-r) path += "c %s,%s %s,%s %s,%s " % (0,-k*r*j, -(r-r*cos(t)-k*r*j*sin(t)),-r*sin(t)+r*k*j*cos(t), -r+r*cos(t),-r*sin(t)) return path
11,788
def get_env_info() -> Dict[str, Any]: """Get the environment information.""" return { "k2-version": k2.version.__version__, "k2-build-type": k2.version.__build_type__, "k2-with-cuda": k2.with_cuda, "k2-git-sha1": k2.version.__git_sha1__, "k2-git-date": k2.version.__git_date__, "lhotse-version": lhotse.__version__, "torch-cuda-available": torch.cuda.is_available(), "torch-cuda-version": torch.version.cuda, "python-version": sys.version[:3], "icefall-git-branch": get_git_branch_name(), "icefall-git-sha1": get_git_sha1(), "icefall-git-date": get_git_date(), "icefall-path": str(Path(__file__).resolve().parent.parent), "k2-path": str(Path(k2.__file__).resolve()), "lhotse-path": str(Path(lhotse.__file__).resolve()), "hostname": socket.gethostname(), "IP address": socket.gethostbyname(socket.gethostname()), }
11,789
def load_config(path): """Loads the config dict from a file at path; returns dict.""" with open(path, "rb") as f: config = pickle.load(f) return config
11,790
def _check_year(clinicaldf: pd.DataFrame, year_col: int, filename: str, allowed_string_values: list = []) -> str: """Check year columns Args: clinicaldf: Clinical dataframe year_col: YEAR column filename: Name of file allowed_string_values: list of other allowed string values Returns: Error message """ error = '' if process_functions.checkColExist(clinicaldf, year_col): # Deal with pre-redacted values and other allowed strings # first because can't int(text) because there are # instances that have <YYYY year_series = clinicaldf[year_col][ ~clinicaldf[year_col].isin(allowed_string_values) ] year_now = datetime.datetime.utcnow().year try: years = year_series.apply( lambda x: datetime.datetime.strptime( str(int(x)), '%Y').year > year_now ) # Make sure that none of the years are greater than the current # year. It can be the same, but can't future years. assert not years.any() except Exception: error = (f"{filename}: Please double check your {year_col} " "column, it must be an integer in YYYY format " f"<= {year_now}") # Tack on allowed string values if allowed_string_values: error += " or '{}'.\n".format( "', '".join(allowed_string_values) ) else: error += ".\n" else: error = f"{filename}: Must have {year_col} column.\n" return error
11,791
def matchups( sport, datasets, date, file_type, compression, batch_size, groups, output, config, tz, parallel, complete, force ): """Retrieves Fox Sports Matchups Data""" start = time.time() if force: os.environ["AUTO_MAKEDIRS"] = "1" config = config or {} groups = groups or [] datasets = datasets or [] config = merge_dicts( read_config(f"fox-sports-{sport}.yaml", layer="data_lake"), config, ) logger.info("Configuring Fox Sports Matchups for '{sport}'", sport=sport, config=config) relations = relationships.Relations() datasets_map = relations.datasets_map for group in groups: for dataset in relations.groups[group]: create_relation(datasets_map, dataset, *relations.dependencies.get(dataset, [])) datasets_map[dataset]["export"] = True for dataset in datasets: create_relation(datasets_map, dataset, *relations.dependencies.get(dataset, [])) datasets_map[dataset]["export"] = True if complete: for dataset in datasets_map: datasets_map[dataset]["fetch"] = True datasets_map[dataset]["export"] = True orchestrate = Orchestrator( date=date, parallel=parallel, feeds_config=config, file_type=file_type, compression=compression, batch_size=batch_size, permissions=datasets_map, tz=tz, output=output, ) orchestrate.start() if compression in get_supported_extensions(): file_ext = FILE_EXT[file_type] + compression else: file_ext = FILE_EXT[file_type] for uri, record_count in orchestrate.written_files.items(): full_path = uri if uri.endswith(file_ext) else f"{uri}{file_ext}" logger.info("'{file}' written with {records} records", records=record_count, file=full_path) logger.info("Process Complete in {total_runtime} seconds!", total_runtime=round(time.time() - start, 4))
11,792
def test_increment_version(): """Run test cases to ensure that increment_version works correctly. This is critical since running release_build.py with the --auto switch will automatically increment the release versions without prompting the user to verify the new versions.""" assert increment_version('1.0.3') == '1.0.4' assert increment_version('1.0.9') == '1.0.10' assert increment_version('1.1.9') == '1.1.10' assert increment_version('1.3.0') == '1.3.1' assert increment_version('1.3.1') == '1.3.2' assert increment_version('1.9.9') == '1.9.10' assert increment_version('3.6.22') == '3.6.23' assert increment_version('3.22.6') == '3.22.7' assert increment_version('1.0.3.1') == '1.0.4' assert increment_version('1.0.9.1') == '1.0.10' assert increment_version('1.1.9.1') == '1.1.10' assert increment_version('1.3.0.1') == '1.3.1' assert increment_version('1.3.1.1') == '1.3.2' assert increment_version('1.9.9.1') == '1.9.10' assert increment_version('3.6.22.1') == '3.6.23' assert increment_version('3.22.6.1') == '3.22.7' assert increment_version('1.0.3', True) == '1.0.4' assert increment_version('1.0.9', True) == '1.1.0' assert increment_version('1.1.9', True) == '1.2.0' assert increment_version('1.3.0', True) == '1.3.1' assert increment_version('1.3.1', True) == '1.3.2' assert increment_version('1.9.9', True) == '2.0.0' assert increment_version('3.6.22', True) == '3.7.0' assert increment_version('3.22.6', True) == '3.22.7'
11,793
def flaskbb_load_migrations(): """Hook for registering additional migrations."""
11,794
def lower_strings(string_list): """ Helper function to return lowercase version of a list of strings. """ return [str(x).lower() for x in string_list]
11,795
def AAprime(): """ >> AAprime() aaprime and aprimea """ aprimeA = dot(transpose(ATable), ATable) # Aaprime = dot(ATable1, ATable) return aprimeA
11,796
def normalize_group_path(group, suffix=None): """ :param group: :param suffix: :return: """ group = os.path.join('/', group) if suffix is not None: if not group.endswith(suffix): group = os.path.join(group, suffix.rstrip('/')) return group
11,797
def _load_dataset(data_filename_or_set, comm, verbosity): """Loads a DataSet from the data_filename_or_set argument of functions in this module.""" printer = _baseobjs.VerbosityPrinter.create_printer(verbosity, comm) if isinstance(data_filename_or_set, str): if comm is None or comm.Get_rank() == 0: if _os.path.splitext(data_filename_or_set)[1] == ".pkl": with open(data_filename_or_set, 'rb') as pklfile: ds = _pickle.load(pklfile) else: ds = _io.read_dataset(data_filename_or_set, True, "aggregate", printer) if comm is not None: comm.bcast(ds, root=0) else: ds = comm.bcast(None, root=0) else: ds = data_filename_or_set # assume a Dataset object return ds
11,798
def test_blog_pagination(web_server: str, browser: DriverAPI, dbsession: Session, publish_posts): """When posts exceed batch size, pagination is activated. Test that it's sane.""" # Direct Splinter browser to the blog showing 5 items per page on page 1 b = browser b.visit(web_server + "/blog/?batch_num=0&batch_size=5") # Iterate 5 times navigating with the 'Next' button for _ in range(5): # After the last 'Next' the button should be disabled if _ == 4: end_of_blog = b.find_by_xpath('//body/main/div[2]/div/div/div//ul/li[3]') # After 10 iterations the next button should be disabled assert end_of_blog.has_class("disabled") b.find_by_xpath('//body/main/div[2]/div/div/div//ul/li[3]/a').first.click()
11,799