code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
|---|---|---|
def config(self, **kw): <NEW_LINE> <INDENT> self._config_fixture.config(**kw)
|
Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the fixtures cleanup process.
|
625941bc7b25080760e39340
|
def _run(self): <NEW_LINE> <INDENT> raise NotImplementedError()
|
Run Docker image.
Need run inside executor.
|
625941bc851cf427c661a3f7
|
@match_args_return <NEW_LINE> def p_from_Abs_Pressure(Absolute_Pressure): <NEW_LINE> <INDENT> return (Absolute_Pressure - P0) * 1. / db2Pascal
|
Calculates sea pressure from Absolute Pressure. Note that Absolute
Pressure is in Pa NOT dbar.
Parameters
---------
Absolute_Pressure : array_like
Absolute Pressure [Pa]
Returns
-------
p : array_like
sea pressure [dbar]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (2.2.1).
Modifications:
2011-03-29. Trevor McDougall & Paul Barker
|
625941bccdde0d52a9e52f15
|
def addNewEntry(entry,dateDict,date=datetime.date.today()): <NEW_LINE> <INDENT> bothSides = entry.split('.') <NEW_LINE> if(len(bothSides)==1): <NEW_LINE> <INDENT> print("Can't split on full stop.") <NEW_LINE> return <NEW_LINE> <DEDENT> if date not in dateDict.keys(): <NEW_LINE> <INDENT> dateDict[date] = [] <NEW_LINE> <DEDENT> tags = _getTagsFromEntry(bothSides[1]) <NEW_LINE> dateDict[date].append((bothSides[0],tags))
|
Add a new entry to the journal for a particular date. If no date
is given, today's date is used.
|
625941bce64d504609d74725
|
def get_all_file_paths(directory): <NEW_LINE> <INDENT> file_paths = [] <NEW_LINE> click.echo(directory) <NEW_LINE> for root, _, files in os.walk(directory): <NEW_LINE> <INDENT> for filename in files: <NEW_LINE> <INDENT> filepath = os.path.join(root, filename) <NEW_LINE> file_paths.append(filepath) <NEW_LINE> <DEDENT> <DEDENT> return file_paths
|
Recursively walk the directory and get paths
to the files inside
|
625941bc50812a4eaa59c20a
|
def make_navitia_wrapper(): <NEW_LINE> <INDENT> url = current_app.config['NAVITIA_URL'] <NEW_LINE> token = current_app.config.get('NAVITIA_TOKEN') <NEW_LINE> instance = current_app.config['NAVITIA_INSTANCE'] <NEW_LINE> return navitia_wrapper.Navitia(url=url, token=token).instance(instance)
|
return a navitia wrapper to call the navitia API
|
625941bc460517430c394072
|
def start_and_end(self): <NEW_LINE> <INDENT> print("\nВведите номер вопроса, с которого хотите начать (по умолчанию 1)") <NEW_LINE> self.start = input() <NEW_LINE> try: <NEW_LINE> <INDENT> if self.start == '': <NEW_LINE> <INDENT> self.start = 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.start = int(self.start) - 1 <NEW_LINE> <DEDENT> <DEDENT> except ValueError: <NEW_LINE> <INDENT> print(f"{Fore.RED}Неверный ввод, вы начнете с первого вопроса{Fore.CYAN}") <NEW_LINE> self.start = 0 <NEW_LINE> <DEDENT> print(f"Введите номер вопроса, которым хотите закончить (по умолчанию {len(self.questions)})") <NEW_LINE> self.ends = input() <NEW_LINE> try: <NEW_LINE> <INDENT> if self.ends == '': <NEW_LINE> <INDENT> self.ends = len(self.questions) <NEW_LINE> <DEDENT> elif int(self.ends) > len(self.questions) or int(self.ends) < self.start: <NEW_LINE> <INDENT> print(f"{Fore.RED}Неверный ввод, вы закончите вопросом номер {len(self.questions)} {Fore.CYAN}") <NEW_LINE> self.ends = len(self.questions) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.ends = int(self.ends) <NEW_LINE> <DEDENT> <DEDENT> except ValueError: <NEW_LINE> <INDENT> print(f"{Fore.RED}Неверный ввод, вы закончите вопросом номер {len(self.questions)} {Fore.CYAN}") <NEW_LINE> self.ends = len(self.questions)
|
Asks user for the question to start from and the question to end with
|
625941bc55399d3f05588598
|
def MoveObject(self, obj, new_container): <NEW_LINE> <INDENT> _adclient.MoveObject_adclient(self.obj, obj, new_container)
|
It moves given user to new container.
|
625941bc07d97122c417876a
|
def rollout_sup(self): <NEW_LINE> <INDENT> self.grid.reset_mdp() <NEW_LINE> self.sup_mistakes = 0 <NEW_LINE> traj = [] <NEW_LINE> for t in range(self.moves): <NEW_LINE> <INDENT> if self.record: <NEW_LINE> <INDENT> raise Exception("Should not be collecting data on test rollout") <NEW_LINE> <DEDENT> x_t = self.mdp.state <NEW_LINE> self.compare_sup_policies(x_t) <NEW_LINE> tmp_pi = self.mdp.pi <NEW_LINE> self.mdp.pi = self.super_pi <NEW_LINE> a_t = self.grid.step(self.mdp) <NEW_LINE> self.mdp.pi = tmp_pi <NEW_LINE> x_t_1 = self.mdp.state
|
Take actions according to the supervisor (whether it is noisy or not)
Run analytics to get the loss of the learner on the supervisor's distr
|
625941bc3c8af77a43ae3683
|
def play(self, location): <NEW_LINE> <INDENT> self.play_at(self.next_turn, location) <NEW_LINE> self.save()
|
Convenience wrapper to take the next turn and save the results
|
625941bcbe8e80087fb20b2d
|
@register.inclusion_tag('booking/emails/provider_service.html') <NEW_LINE> def render_service(booking_service, provider=None): <NEW_LINE> <INDENT> bs = None <NEW_LINE> c = {} <NEW_LINE> if booking_service.base_service.category == 'T': <NEW_LINE> <INDENT> if hasattr(booking_service, 'booking_package'): <NEW_LINE> <INDENT> bs = BookingProvidedTransfer.objects.get(id=booking_service.id) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> bs = BookingProvidedTransfer.objects.get(id=booking_service.id) <NEW_LINE> <DEDENT> <DEDENT> elif booking_service.base_service.category == 'A': <NEW_LINE> <INDENT> if hasattr(booking_service, 'booking_package'): <NEW_LINE> <INDENT> bs = BookingProvidedAllotment.objects.get(id=booking_service.id) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> bs = BookingProvidedAllotment.objects.get(id=booking_service.id) <NEW_LINE> <DEDENT> <DEDENT> elif booking_service.base_service.category == 'E': <NEW_LINE> <INDENT> if hasattr(booking_service, 'booking_package'): <NEW_LINE> <INDENT> bs = BookingProvidedExtra.objects.get(id=booking_service.id) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> bs = BookingProvidedExtra.objects.get(id=booking_service.id) <NEW_LINE> <DEDENT> <DEDENT> elif booking_service.base_category == 'BP': <NEW_LINE> <INDENT> bs = BookingExtraPackage.objects.get(id=booking_service.id) <NEW_LINE> <DEDENT> c.update({'bs': bs}) <NEW_LINE> if provider: <NEW_LINE> <INDENT> c.update({'provider': provider}) <NEW_LINE> <DEDENT> rooming = bs.rooming_list.all() <NEW_LINE> if not rooming and bs.booking_package: <NEW_LINE> <INDENT> rooming = bs.booking_package.rooming_list.all() <NEW_LINE> <DEDENT> c.update({'rooming_list': rooming}) <NEW_LINE> return c
|
Renders some html into provider emails depending on
booking_service type
|
625941bce8904600ed9f1e0f
|
def __toNight(self): <NEW_LINE> <INDENT> self.record("NIGHT " + str(self.day)) <NEW_LINE> self.timerOn = False <NEW_LINE> for player in self.players: <NEW_LINE> <INDENT> player.timerOn = False <NEW_LINE> <DEDENT> self.time = "Night" <NEW_LINE> for p in self.players: <NEW_LINE> <INDENT> p.vote = None <NEW_LINE> <DEDENT> self.mainComm.cast("Night falls and everyone sleeps") <NEW_LINE> self.mafiaComm.cast("As the sky darkens, so too do your intentions. " "Pick someone to kill") <NEW_LINE> self.mafia_options() <NEW_LINE> for p in self.players: <NEW_LINE> <INDENT> if p.role == "COP": <NEW_LINE> <INDENT> self.send_options("Use /target letter (i.e. /target C) " "to pick someone to investigate",p.id) <NEW_LINE> <DEDENT> elif p.role == "DOCTOR": <NEW_LINE> <INDENT> self.send_options("Use /target letter (i.e. /target D) " "to pick someone to save",p.id) <NEW_LINE> <DEDENT> elif p.role == "STRIPPER": <NEW_LINE> <INDENT> self.send_options("Use /target letter (i.e. /target A) " "to pick someone to distract",p.id) <NEW_LINE> <DEDENT> elif p.role == "MILKY": <NEW_LINE> <INDENT> self.send_options("Use /target letter (i.e. /target B) " "to pick someone to milk",p.id) <NEW_LINE> <DEDENT> <DEDENT> if self.pref.book["auto_timer"] in ["NIGHT"]: <NEW_LINE> <INDENT> self.timerOn = True <NEW_LINE> for player in self.players: <NEW_LINE> <INDENT> player.timerOn = True <NEW_LINE> <DEDENT> self.timer_value = SET_TIMER_VALUE <NEW_LINE> <DEDENT> return True
|
Change state to Night, send out relevant info.
|
625941bc07f4c71912b1136d
|
def __call__(self, argv, capture=False, cwd=os.getcwd(), shell=False): <NEW_LINE> <INDENT> return self.mock_list.pop(0)(argv, capture=capture, cwd=cwd, shell=shell)
|
Mock of common.BuildEnvironment.run_subprocess().
Args:
argv: Arguments to compare with the expected arguments of the
RunCommandMock instance at the head of mock_list.
capture: Whether to return a tuple (self.stdout, self.stderr)
cwd: Optional path relative to the project directory to run process in
for commands that do not allow specifying this, such as ant.
shell: Whether to run the command to run in a shell.
Returns:
(self.stdout, self.stderr) if capture is True, None otherwise.
|
625941bc15fb5d323cde09f1
|
def OnAttributesToolbarToggle(self, event): <NEW_LINE> <INDENT> attributes_toolbar = self.main_window._mgr.GetPane("attributes_toolbar") <NEW_LINE> self._toggle_pane(attributes_toolbar) <NEW_LINE> event.Skip()
|
Format toolbar toggle event handler
|
625941bcbf627c535bc130b5
|
def oula(a): <NEW_LINE> <INDENT> i = 2 <NEW_LINE> ret = 1 <NEW_LINE> while i <= (a**0.5): <NEW_LINE> <INDENT> if a%i == 0: <NEW_LINE> <INDENT> ret *= (i-1) <NEW_LINE> a = a/i <NEW_LINE> while a%i == 0: <NEW_LINE> <INDENT> ret *= (i) <NEW_LINE> a = a/i <NEW_LINE> <DEDENT> <DEDENT> i += 1 <NEW_LINE> <DEDENT> if a>1: <NEW_LINE> <INDENT> ret *=(a-1) <NEW_LINE> <DEDENT> return ret
|
求欧拉函数
:param a:
:return:
|
625941bc96565a6dacc8f5b2
|
def createGuestUser(): <NEW_LINE> <INDENT> if CFG_ACCESS_CONTROL_LEVEL_GUESTS == 0: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return run_sql("""insert into "user" (email, note) values ('', '1')""") <NEW_LINE> <DEDENT> except OperationalError: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return run_sql("""insert into "user" (email, note) values ('', '0')""") <NEW_LINE> <DEDENT> except OperationalError: <NEW_LINE> <INDENT> return None
|
Create a guest user , insert into user null values in all fields
createGuestUser() -> GuestUserID
|
625941bc60cbc95b062c642f
|
@contextmanager <NEW_LINE> def xa(db_url, metadata=None): <NEW_LINE> <INDENT> if db_url.startswith('sqlite:'): <NEW_LINE> <INDENT> engine = sqlalchemy.create_engine(db_url, isolation_level='SERIALIZABLE') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> engine = sqlalchemy.create_engine(db_url) <NEW_LINE> <DEDENT> if metadata is not None: <NEW_LINE> <INDENT> metadata.create_all(engine) <NEW_LINE> <DEDENT> session_factory = sessionmaker(bind=engine) <NEW_LINE> session = scoped_session(session_factory) <NEW_LINE> try: <NEW_LINE> <INDENT> yield session <NEW_LINE> session.commit() <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> session.rollback() <NEW_LINE> raise <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> session.close()
|
Provide a transactional scope around a series of operations.
|
625941bc92d797404e30406f
|
def rawwire_speed(self, rawwire_speed: int = RawWireSpeed.SPEED_400KHZ) -> bool: <NEW_LINE> <INDENT> self.write(0x60 | rawwire_speed) <NEW_LINE> return self.read(1) == 0x01
|
Raw Wire Speed Configuration
:param rawwire_speed: The Clock Rate
:type rawwire_speed: int.
:returns: returns Success or Failure
:rtype: bool
|
625941bc99cbb53fe6792acd
|
def test_custom_config(): <NEW_LINE> <INDENT> retry_handler = RetryHandler( max_retries=10, retry_backoff_factor=0.2, retry_backoff_max=200, retry_time_limit=100, retry_on_status_codes=[502, 503] ) <NEW_LINE> assert retry_handler.max_retries == 10 <NEW_LINE> assert retry_handler.timeout == 100 <NEW_LINE> assert retry_handler.backoff_max == 200 <NEW_LINE> assert retry_handler.backoff_factor == 0.2 <NEW_LINE> assert retry_handler._retry_on_status_codes == {429, 502, 503, 504}
|
Test that default configuration is overrriden if custom configuration is provided
|
625941bcbe383301e01b5371
|
def set_output_log_filename(self, filename): <NEW_LINE> <INDENT> self.output_log_filename = filename
|
Set logging output file
:param filename: logging configuration filename
|
625941bc4428ac0f6e5ba6d7
|
def __init__(self): <NEW_LINE> <INDENT> self.systran_types = { 'start': 'int', 'end': 'int', 'type': 'str' } <NEW_LINE> self.attribute_map = { 'start': 'start', 'end': 'end', 'type': 'type' } <NEW_LINE> self.start = None <NEW_LINE> self.end = None <NEW_LINE> self.type = None
|
Systran model
:param dict systran_types: The key is attribute name and the value is attribute type.
:param dict attribute_map: The key is attribute name and the value is json key in definition.
|
625941bc7d43ff24873a2b83
|
def printStatus(self,mod=""): <NEW_LINE> <INDENT> dims = "" <NEW_LINE> corner_labels = {"back_right":"br","back_left":"bl","front_right":"fr", "front_left":"fl"} <NEW_LINE> for x in self.four_corners: <NEW_LINE> <INDENT> dims += "{}({},{}), ".format(corner_labels[x],self.four_corners[x][0], self.four_corners[x][1]) <NEW_LINE> <DEDENT> print("{}{}\tLEN: {}\tLANES: ({},{})".format(mod, self.label,round(self.length,2), self.top_up_lane.label, self.bottom_down_lane.label)) <NEW_LINE> print("{}{}\t{}\n".format(mod,self.label,dims))
|
Used to print the properties of the road during debugging
|
625941bc6aa9bd52df036c88
|
def act(self, action): <NEW_LINE> <INDENT> action = int(action) <NEW_LINE> assert isinstance(action, int) <NEW_LINE> assert action < self.actions_num, "%r (%s) invalid"%(action, type(action)) <NEW_LINE> for k in self.world_layer.buttons: <NEW_LINE> <INDENT> self.world_layer.buttons[k] = 0 <NEW_LINE> <DEDENT> for key in self.world_layer.player.controls[action]: <NEW_LINE> <INDENT> if key in self.world_layer.buttons: <NEW_LINE> <INDENT> self.world_layer.buttons[key] = 1 <NEW_LINE> <DEDENT> <DEDENT> self.step() <NEW_LINE> observation = self.world_layer.get_state() <NEW_LINE> reward = self.world_layer.player.get_reward() <NEW_LINE> terminal = self.world_layer.player.game_over <NEW_LINE> info = {} <NEW_LINE> return observation, reward, terminal, info
|
Take one action for one step
|
625941bc711fe17d82542257
|
def product(*iterables, **tqdm_kwargs): <NEW_LINE> <INDENT> kwargs = tqdm_kwargs.copy() <NEW_LINE> tqdm_class = kwargs.pop("tqdm_class", tqdm_auto) <NEW_LINE> try: <NEW_LINE> <INDENT> lens = list(map(len, iterables)) <NEW_LINE> <DEDENT> except TypeError: <NEW_LINE> <INDENT> total = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> total = 1 <NEW_LINE> for i in lens: <NEW_LINE> <INDENT> total *= i <NEW_LINE> <DEDENT> kwargs.setdefault("total", total) <NEW_LINE> <DEDENT> with tqdm_class(**kwargs) as t: <NEW_LINE> <INDENT> for i in itertools.product(*iterables): <NEW_LINE> <INDENT> yield i <NEW_LINE> t.update()
|
Equivalent of `itertools.product`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm].
|
625941bcfb3f5b602dac3576
|
def ffmpeg(originalfile, newfile): <NEW_LINE> <INDENT> command = 'ffmpeg -y -i "' + originalfile + '" "' + newfile + '"' <NEW_LINE> remove = 'rm "' + originalfile + '"' <NEW_LINE> os.system(command) <NEW_LINE> os.system(remove)
|
this function allows us to run the unix tool ffmpeg with our music files.
Takes two arguments, the first being the original filename and the second
being the filename to which you would like to save the result.
|
625941bc009cb60464c63299
|
def userStyleSheetUrl(self): <NEW_LINE> <INDENT> pass
|
QWebSettings.userStyleSheetUrl() -> QUrl
|
625941bc07d97122c417876b
|
def _parser_oxstats(line, reverse=False): <NEW_LINE> <INDENT> data = line.rstrip().split() <NEW_LINE> chromosome = data[0] <NEW_LINE> position = data[2] <NEW_LINE> ref = data[3] <NEW_LINE> alt = data[4] <NEW_LINE> alt_counts = [] <NEW_LINE> for i in np.arange(5, len(data) - 2, 3): <NEW_LINE> <INDENT> alt_count = 1 * float(data[i + 1]) <NEW_LINE> alt_count += 2 * float(data[i + 2]) <NEW_LINE> alt_counts.append(alt_count) <NEW_LINE> <DEDENT> if reverse: <NEW_LINE> <INDENT> alt_counts = [2 - count for count in alt_counts] <NEW_LINE> <DEDENT> return chromosome, position, ref, alt, alt_counts
|
Parse for Oxstats file, for details visit:
http://www.stats.ox.ac.uk/~marchini/software/gwas/file_format.html
In brief:
First 5 columns:
SNP ID, RS ID of the SNP, base-pair position of the SNP,
the allele coded A and the allele coded B
Following columns for each sample:
sample_1_AA, sample_1_AB, sample_1_BB, sample_2_AA ...
3 columns per sample: AA, AB, BB
Args:
line (str): line of oxstats file
reverse (bool, optional): reverse reference and alt alleles
Returns:
list: chromosome, position, ref, alt, alt_counts
|
625941bc16aa5153ce36235e
|
def setLongitude(self, *args): <NEW_LINE> <INDENT> return _AriaPy.ArLLACoords_setLongitude(self, *args)
|
setLongitude(self, double l)
|
625941bcaad79263cf390922
|
def to_vcf(in_tsv, data): <NEW_LINE> <INDENT> call_convert = {"Amp": "DUP", "Del": "DEL"} <NEW_LINE> out_file = "%s.vcf" % utils.splitext_plus(in_tsv)[0] <NEW_LINE> if not utils.file_uptodate(out_file, in_tsv): <NEW_LINE> <INDENT> with file_transaction(data, out_file) as tx_out_file: <NEW_LINE> <INDENT> with open(in_tsv) as in_handle: <NEW_LINE> <INDENT> with open(tx_out_file, "w") as out_handle: <NEW_LINE> <INDENT> out_handle.write(VCF_HEADER + "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\n" % (dd.get_sample_name(data))) <NEW_LINE> header = in_handle.readline().split("\t") <NEW_LINE> for cur in (dict(zip(header, l.split("\t"))) for l in in_handle): <NEW_LINE> <INDENT> if cur["Amp_Del"] in call_convert: <NEW_LINE> <INDENT> svtype = call_convert[cur["Amp_Del"]] <NEW_LINE> info = "SVTYPE=%s;END=%s;SVLEN=%s;FOLD_CHANGE_LOG=%s;PROBES=%s;GENE=%s" % ( svtype, cur["End"], int(cur["End"]) - int(cur["Start"]), cur["Log2ratio"], cur["Ab_Seg"], cur["Gene"]) <NEW_LINE> out_handle.write("\t".join([cur["Chr"], cur["Start"], ".", "N", "<%s>" % (svtype), ".", ".", info, "GT", "1/1"]) + "\n") <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return vcfutils.sort_by_ref(out_file, data)
|
Convert seq2c output file into BED output.
|
625941bc10dbd63aa1bd2a8c
|
def check_password(self, password): <NEW_LINE> <INDENT> return check_password_hash(self.password, password)
|
Lakukan check hashing atau decode pada password yang diberikan
dan menyesuaikan dengan neggunakan algorithm hashing password
params:
password(string): user password
return:
password(boolen): True or False
|
625941bcb830903b967e97f7
|
def remove(self, item): <NEW_LINE> <INDENT> empties = [] <NEW_LINE> for coords, items in self._items.items(): <NEW_LINE> <INDENT> if item in items: <NEW_LINE> <INDENT> items.remove(item) <NEW_LINE> if not items: <NEW_LINE> <INDENT> empties.append(coords) <NEW_LINE> <DEDENT> break <NEW_LINE> <DEDENT> <DEDENT> for empty in empties: <NEW_LINE> <INDENT> del self._items[empty]
|
Remove the item provided from the tank.
|
625941bc377c676e91272090
|
def test(self): <NEW_LINE> <INDENT> pass
|
Testing things would go here
|
625941bcdd821e528d63b090
|
def build(self, client, pull=False, usecache=True): <NEW_LINE> <INDENT> stage = staging.StagedFile( self.sourceimage, self.sourcepath, self.destpath, cache_from=self.cache_from ) <NEW_LINE> stage.stage(self.baseimage, self.buildname)
|
Note:
`pull` and `usecache` are for compatibility only. They're irrelevant because
hey were applied when BUILDING self.sourceimage
|
625941bcc4546d3d9de72917
|
def getDataMatrix(file_set, shot=True, leave_out_class=None): <NEW_LINE> <INDENT> matrix = [] <NEW_LINE> classes = [] <NEW_LINE> for file in file_set: <NEW_LINE> <INDENT> feature_lines = getFeatureLinesFromFile(file, shot, leave_out_class=leave_out_class) <NEW_LINE> for line in feature_lines: <NEW_LINE> <INDENT> if shot: <NEW_LINE> <INDENT> classes.append(SHOT_NAMES.index(line.pop())) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> classes.append(int(line.pop()=="True")) <NEW_LINE> <DEDENT> matrix.append(np.array(line)) <NEW_LINE> <DEDENT> <DEDENT> return np.array(matrix, dtype=np.float64), np.array(classes)
|
Returns an Numpy-Array with the feature_lines converted from all the beatscripts
mentioned in files.
|
625941bcde87d2750b85fc75
|
def despread(self, fft): <NEW_LINE> <INDENT> corr_fft = fft * self.template_fft.conj <NEW_LINE> corr_full = corr_fft.ifft <NEW_LINE> corr = corr_full[:self.corr_len] <NEW_LINE> return corr
|
Correlate / despread using FFT.
|
625941bc293b9510aa2c317e
|
def __init__(self, kubelet_endpoint=None, local_vars_configuration=None): <NEW_LINE> <INDENT> if local_vars_configuration is None: <NEW_LINE> <INDENT> local_vars_configuration = Configuration() <NEW_LINE> <DEDENT> self.local_vars_configuration = local_vars_configuration <NEW_LINE> self._kubelet_endpoint = None <NEW_LINE> self.discriminator = None <NEW_LINE> if kubelet_endpoint is not None: <NEW_LINE> <INDENT> self.kubelet_endpoint = kubelet_endpoint
|
V1NodeDaemonEndpoints - a model defined in OpenAPI
|
625941bc6aa9bd52df036c89
|
@click.command() <NEW_LINE> @click.argument('input_file') <NEW_LINE> @click.option('--model', '-m', type=click.Path(), help='An existing dnamodel .h5 file') <NEW_LINE> @click.option('--config', '-c', type=click.Path(), help='A json configuration file') <NEW_LINE> def main(input_file, model, config): <NEW_LINE> <INDENT> greet = 'Hello' <NEW_LINE> raw_data = pd.read_excel(input_file, header=0) <NEW_LINE> col_names = ['sequence'] <NEW_LINE> for i in range(1, len(raw_data.columns)): <NEW_LINE> <INDENT> col_names.append('output'+str(i)) <NEW_LINE> <DEDENT> raw_data.columns = col_names <NEW_LINE> df = raw_data[np.isfinite(raw_data['output1'])] <NEW_LINE> dnaCNN = dm.dnaModel(df, filename=model, config = config) <NEW_LINE> dnaCNN.train() <NEW_LINE> dnaCNN.design() <NEW_LINE> dnaCNN.save() <NEW_LINE> dnaCNN.test()
|
Automated, experimentally-driven design and optimization of DNA sequences.
|
625941bc1f5feb6acb0c4a3a
|
@oauth.route('/authenticate/', methods=['GET', 'POST']) <NEW_LINE> def authenticate(): <NEW_LINE> <INDENT> from flask import request <NEW_LINE> response = make_response() <NEW_LINE> context = make_context() <NEW_LINE> result = authomatic.login(WerkzeugAdapter(request, response), 'google') <NEW_LINE> if result: <NEW_LINE> <INDENT> context['result'] = result <NEW_LINE> if not result.error: <NEW_LINE> <INDENT> save_credentials(result.user.credentials) <NEW_LINE> get_document(app_config.COPY_GOOGLE_DOC_KEY, app_config.COPY_PATH) <NEW_LINE> <DEDENT> return render_template('oauth/authenticate.html', **context) <NEW_LINE> <DEDENT> return response
|
Run OAuth workflow.
|
625941bc9f2886367277a776
|
def multiprocessing_run(process_count, match_num, iteration_limit, mcts_process_num): <NEW_LINE> <INDENT> with Manager() as manager: <NEW_LINE> <INDENT> shared_list = manager.list() <NEW_LINE> processes = [Process(target=run, args=(iteration_limit, match_num, mcts_process_num, shared_list, i)) for i in range(process_count)] <NEW_LINE> for p in processes: <NEW_LINE> <INDENT> p.start() <NEW_LINE> <DEDENT> for p in processes: <NEW_LINE> <INDENT> p.join() <NEW_LINE> <DEDENT> return [i for i in shared_list]
|
Run the match for MCTS and three simple agents multiprocessing.
:param process_count: The number of processes
:param match_num: The number of matches
:param iteration_limit: The maximal iteration of MCTS
:param mcts_process_num: The number of processes used in MCTS
:return: None
|
625941bc0c0af96317bb80ce
|
def start(self, *args): <NEW_LINE> <INDENT> if self._is_verbose: <NEW_LINE> <INDENT> return self <NEW_LINE> <DEDENT> self.writeln('start', *args) <NEW_LINE> self._indent += 1 <NEW_LINE> return self
|
Start a nested log.
|
625941bca79ad161976cc02b
|
def __init__(self, emitter, func, args): <NEW_LINE> <INDENT> super(_BackGroundWorkerCB, self).__init__() <NEW_LINE> self.emitter = emitter <NEW_LINE> self.func = func <NEW_LINE> self.args = args + (self._get_callback(),) <NEW_LINE> self.proceed, self._result = True, None
|
emitter - pyqtSignal that emits signal,
func(*args, callback_fun) - target procedure,
callback function should be declared as
int callback_fun(QString BaseName, QString SubName,
double proc1, double proc2)
it should return 1 for cancellation requiry and 0 otherwise
|
625941bca17c0f6771cbdf39
|
def swissPairings(): <NEW_LINE> <INDENT> pairings = [] <NEW_LINE> standings = playerStandings() <NEW_LINE> player_num = countPlayers() <NEW_LINE> for i in range(0, player_num, 2): <NEW_LINE> <INDENT> id1 = standings[i][0] <NEW_LINE> name1 = standings[i][1] <NEW_LINE> id2 = standings[i + 1][0] <NEW_LINE> name2 = standings[i + 1][1] <NEW_LINE> standing = (id1, name1, id2, name2) <NEW_LINE> pairings.append(standing) <NEW_LINE> <DEDENT> return pairings
|
Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
|
625941bc50485f2cf553cc7f
|
def update_deployment(self, namespace=None, name=generate_random_name(), **kwargs): <NEW_LINE> <INDENT> namespace = self.namespace if namespace is None else namespace <NEW_LINE> d_kwargs = { 'app_type': kwargs.get('app_type', 'web'), 'version': kwargs.get('version', 'v99'), 'replicas': kwargs.get('replicas', 4), 'pod_termination_grace_period_seconds': 2, 'image': 'quay.io/fake/image', 'entrypoint': 'sh', 'command': 'start', } <NEW_LINE> deployment = self.scheduler.deployment.update(namespace, name, **d_kwargs) <NEW_LINE> data = deployment.json() <NEW_LINE> self.assertEqual(deployment.status_code, 200, data) <NEW_LINE> return name
|
Helper function to update and verify a deployment on the namespace
|
625941bc97e22403b379ce7f
|
def get_image_path(image_version): <NEW_LINE> <INDENT> upload_dir = BuiltIn().get_variable_value("${upload_dir_path}") <NEW_LINE> grk.run_key_u("Open Connection And Log In") <NEW_LINE> status, image_list = grk.run_key("Execute Command On BMC ls -d " + upload_dir + "*/") <NEW_LINE> image_list = image_list.split("\n") <NEW_LINE> retry = 0 <NEW_LINE> while (retry < 10): <NEW_LINE> <INDENT> for i in range(0, len(image_list)): <NEW_LINE> <INDENT> version = get_image_version(image_list[i] + "MANIFEST") <NEW_LINE> if (version == image_version): <NEW_LINE> <INDENT> return image_list[i] <NEW_LINE> <DEDENT> <DEDENT> time.sleep(10) <NEW_LINE> retry += 1
|
Query the upload image dir for the presence of image matching
the version that was read from the MANIFEST before uploading
the image. Based on the purpose verify the activation object
exists and is either READY or INVALID.
Description of argument(s):
image_version The version of the image that should match one
of the images in the upload dir.
|
625941bc2ae34c7f2600d018
|
def _skip_frame(self): <NEW_LINE> <INDENT> for line in self._f: <NEW_LINE> <INDENT> if line == 'ITEM: ATOMS\n': <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> for i in range(self.num_atoms): <NEW_LINE> <INDENT> next(self._f)
|
Skip the next time frame
|
625941bc1d351010ab855a03
|
def __init__(self, channel): <NEW_LINE> <INDENT> self.Put = channel.unary_unary( '/protobuf.Management/Put', request_serializer=cockatrice_dot_protobuf_dot_management__pb2.PutRequest.SerializeToString, response_deserializer=cockatrice_dot_protobuf_dot_management__pb2.PutResponse.FromString, ) <NEW_LINE> self.Get = channel.unary_unary( '/protobuf.Management/Get', request_serializer=cockatrice_dot_protobuf_dot_management__pb2.GetRequest.SerializeToString, response_deserializer=cockatrice_dot_protobuf_dot_management__pb2.GetResponse.FromString, ) <NEW_LINE> self.Delete = channel.unary_unary( '/protobuf.Management/Delete', request_serializer=cockatrice_dot_protobuf_dot_management__pb2.DeleteRequest.SerializeToString, response_deserializer=cockatrice_dot_protobuf_dot_management__pb2.DeleteResponse.FromString, ) <NEW_LINE> self.Clear = channel.unary_unary( '/protobuf.Management/Clear', request_serializer=cockatrice_dot_protobuf_dot_management__pb2.ClearRequest.SerializeToString, response_deserializer=cockatrice_dot_protobuf_dot_management__pb2.ClearResponse.FromString, )
|
Constructor.
Args:
channel: A grpc.Channel.
|
625941bc5f7d997b87174980
|
def distances(a, b): <NEW_LINE> <INDENT> if (len(a) != len(b)): <NEW_LINE> <INDENT> print("\nInput array error...\n{}".format(distances.__doc__)) <NEW_LINE> return None <NEW_LINE> <DEDENT> d0 = np.subtract.outer(a[:, 0], b[:, 0]) <NEW_LINE> d1 = np.subtract.outer(a[:, 1], b[:, 1]) <NEW_LINE> return np.hypot(d0, d1)
|
A fast implementation for distance calculations
Requires:
--------
`a`, `b` - arrays
2D arrays of equal size!! ... can be the same array
Notes:
-----
Similar to my e_dist and scipy cdist
|
625941bc8e7ae83300e4aeb2
|
def _latex_term(self, m): <NEW_LINE> <INDENT> if not m: <NEW_LINE> <INDENT> return "e_{\\emptyset}" <NEW_LINE> <DEDENT> from sage.misc.latex import latex <NEW_LINE> from sage.sets.set import Set <NEW_LINE> return "e_{{{}}}".format(latex(Set(sorted(m))))
|
Return a string representation of the basis element indexed by ``m``.
EXAMPLES::
sage: M = matroids.Wheel(3)
sage: OT = M.orlik_terao_algebra(QQ)
sage: OT._latex_term(frozenset([0, 1]))
'e_{\\left\\{0, 1\\right\\}}'
sage: OT._latex_term(frozenset())
'e_{\\emptyset}'
|
625941bceab8aa0e5d26da44
|
def algorithm(self, parameters=None): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> logging.info("Cleaning up the old request docs") <NEW_LINE> report = self.wmstatsCouchDB.deleteOldDocs(self.DataKeepDays) <NEW_LINE> logging.info("%s docs deleted" % report) <NEW_LINE> <DEDENT> except Exception as ex: <NEW_LINE> <INDENT> logging.error("Cleaning up the old docs failed") <NEW_LINE> msg = traceback.format_exc() <NEW_LINE> logging.error(msg) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> logging.info("Cleaning up the archived request docs") <NEW_LINE> report = self.cleanAlreadyArchivedWorkflows() <NEW_LINE> logging.info("%s archived workflows deleted" % report) <NEW_LINE> <DEDENT> except Exception as ex: <NEW_LINE> <INDENT> logging.error("Cleaning up archived failed") <NEW_LINE> msg = traceback.format_exc() <NEW_LINE> logging.error(msg) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> logging.info("Creating workload summary") <NEW_LINE> finishedwfsWithLogCollectAndCleanUp = DataCache.getFinishedWorkflows() <NEW_LINE> self.archiveSummaryAndPublishToDashBoard(finishedwfsWithLogCollectAndCleanUp) <NEW_LINE> logging.info("%s workload summary is created" % len(finishedwfsWithLogCollectAndCleanUp)) <NEW_LINE> logging.info("Cleaning up wmsbs and disk") <NEW_LINE> self.deleteWorkflowFromWMBSAndDisk() <NEW_LINE> logging.info("Done: cleaning up wmsbs and disk") <NEW_LINE> logging.info("Cleaning up couch db") <NEW_LINE> self.cleanCouchDBAndChangeToArchiveStatus() <NEW_LINE> logging.info("Done: cleaning up couch db") <NEW_LINE> <DEDENT> except Exception as ex: <NEW_LINE> <INDENT> msg = traceback.format_exc() <NEW_LINE> logging.error(msg) <NEW_LINE> logging.error("Error occurred, will try again next cycle")
|
Get information from wmbs, workqueue and local couch.
- It deletes old wmstats docs
- Archive workflows
|
625941bc796e427e537b04a9
|
def users_listed_by(self, user, name=None): <NEW_LINE> <INDENT> from django.contrib.auth import get_user_model <NEW_LINE> ids_listed = self.get_user_ids(user, name=name) <NEW_LINE> return get_user_model().objects.filter(pk__in=ids_listed)
|
Renvoyer les utilisateurs dans une blocklist de user
|
625941bccb5e8a47e48b7994
|
def linear_regression(x_list: list, y_list: list) -> (float, float): <NEW_LINE> <INDENT> x_total_sum = sum(x_list) <NEW_LINE> y_total_sum = sum(y_list) <NEW_LINE> x_total_times_y_total = sum(map(lambda x, y: x * y, x_list, y_list)) <NEW_LINE> x_total_squared = sum(map(lambda x: x ** 2, x_list)) <NEW_LINE> x_len = len(x_list) <NEW_LINE> _numerator = ((x_len * x_total_times_y_total) - (x_total_sum * y_total_sum)) <NEW_LINE> _denominator = ((x_len * x_total_squared) - x_total_sum ** 2) <NEW_LINE> slope = _numerator / _denominator <NEW_LINE> y_intercept = (y_total_sum - slope * x_total_sum) / x_len <NEW_LINE> return y_intercept, slope
|
Cannot handle floating point errors
:param x_list:
:param y_list:
:return:
|
625941bc507cdc57c6306bba
|
def test_searchproj(self, driver, site): <NEW_LINE> <INDENT> r <NEW_LINE> phtext = "Search for projects..." <NEW_LINE> for ntries in range(10): <NEW_LINE> <INDENT> els = driver.find_elements_by_tag_name("input") <NEW_LINE> for x in els: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if x.get_attribute("placeholder") == phtext: <NEW_LINE> <INDENT> print('found project placeholder after {ntries+1} tries') <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> break <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> time.sleep(10) <NEW_LINE> driver.save_screenshot('sep.png') <NEW_LINE> assert 0, f'Placeholder "{phtext}" not found' <NEW_LINE> <DEDENT> project = site.get('project') <NEW_LINE> x.send_keys(project) <NEW_LINE> driver.save_screenshot('sfp.png')
|
type in the name of the test project
|
625941bc67a9b606de4a7da2
|
def generateDataTwoCard(): <NEW_LINE> <INDENT> random.seed(SEED) <NEW_LINE> for i in range(TRAINING_SIZE): <NEW_LINE> <INDENT> inputs = getRandomDeal(2) <NEW_LINE> result = torch.zeros(1, 4, 13) <NEW_LINE> result = result.new_full((1,4, 13), getResultTwoCardNT(inputs)) <NEW_LINE> torch.save(torch.cat((inputs, result), 0), os.path.join('trn2', str(i)+'.pt')) <NEW_LINE> <DEDENT> for i in range(VALIDATION_SIZE): <NEW_LINE> <INDENT> inputs = getRandomDeal(2) <NEW_LINE> result = torch.zeros(1,4, 13) <NEW_LINE> result = result.new_full((1,4, 13), getResultTwoCardNT(inputs)) <NEW_LINE> torch.save(torch.cat((inputs, result), 0), os.path.join('val2', str(i)+'.pt'))
|
Creates a file with randonly generated data that
has both the inputs and outputs.
|
625941bcbe7bc26dc91cd4eb
|
def process_log_data(spark, df, output_data): <NEW_LINE> <INDENT> users_table = df.selectExpr("userId as user_id", "firstName as first_name", "lastName as last_name", "gender", "level").where("page = 'NextSong' and userId IS NOT NULL") <NEW_LINE> users_table.write.mode("overwrite").parquet(os.path.join(output_data, "users")) <NEW_LINE> get_timestamp = udf(lambda x: str(int(int(x) / 1000))) <NEW_LINE> df = df.withColumn("timestamp", get_timestamp(df["ts"])) <NEW_LINE> get_datetime = udf(lambda x: str(datetime.fromtimestamp(int(x) / 1000.0))) <NEW_LINE> df = df.withColumn("datetime", get_datetime(df["ts"])) <NEW_LINE> time_table = df.selectExpr("datetime as start_time", "hour(datetime) as hour", "day(datetime) as day", "weekofyear(datetime) as week", "month(datetime) as month", "year(datetime) as year", "dayofweek(datetime) as weekday").dropDuplicates(["start_time"]) <NEW_LINE> time_table.write.mode("overwrite").partitionBy("year", "month").parquet(os.path.join(output_data, "time"))
|
Processing log data to get users and time tables and store data in AWS S3 Bucket.
Args:
spark: Spark Session object.
df: PySpark Dataframe of log data.
output_data (str): AWS S3 Bucket path to store users and time tables.
Returns:
None
|
625941bc3c8af77a43ae3684
|
def log2debug(code, message): <NEW_LINE> <INDENT> _logit(code, message, error=False, verbose=False, level='debug')
|
Log debug message to file only, but don't die.
Args:
code: Message code
message: Message text
Returns:
None
|
625941bc8da39b475bd64e57
|
def __int__(self) -> int: <NEW_LINE> <INDENT> return int(self.to_nunits())
|
Cast to smallest denomination
|
625941bc4f88993c3716bf52
|
def eigensolve(self, epsilon=0.85): <NEW_LINE> <INDENT> raise NotImplementedError("Problem 2 Incomplete")
|
Compute the PageRank vector using the eigenvalue method.
Normalize the resulting eigenvector so its entries sum to 1.
Parameters:
epsilon (float): the damping factor, between 0 and 1.
Return:
dict(str -> float): A dictionary mapping labels to PageRank values.
|
625941bc71ff763f4b54956d
|
def run(): <NEW_LINE> <INDENT> test_runner = unittest.TextTestRunner() <NEW_LINE> return test_runner.run(testsuite())
|
Run all tests.
:return: a :class:`unittest.TestResult` object
|
625941bcc4546d3d9de72918
|
def categories(self): <NEW_LINE> <INDENT> return self.values('category').annotate(**ANNOTATE_DICT)
|
Returns all the available categories
|
625941bc8e05c05ec3eea258
|
def l10n_base_reference_and_tests(files): <NEW_LINE> <INDENT> def get_reference_and_tests(path): <NEW_LINE> <INDENT> match = files.match(path) <NEW_LINE> if match is None: <NEW_LINE> <INDENT> return None, None <NEW_LINE> <DEDENT> ref, _, _, extra_tests = match <NEW_LINE> return ref, extra_tests <NEW_LINE> <DEDENT> return get_reference_and_tests
|
Get reference files to check for conflicts in gecko-strings and friends.
|
625941bc26068e7796caebc0
|
def test_user_duplicate(self): <NEW_LINE> <INDENT> res = self.client().post('/userlists/', data={'name': 'john', 'email': 'john@gmail.com'}) <NEW_LINE> rv = self.client().post('/userlists/', data={'name': 'john', 'email': 'john@gmail.com'}) <NEW_LINE> self.assertEqual(res.status_code, 201) <NEW_LINE> response = json.loads(rv.get_data()) <NEW_LINE> self.assertEqual(response['message'], 'user already exist')
|
Test duplicate user
|
625941bcbde94217f3682cdf
|
def get_default_field(vname, fieldIndex): <NEW_LINE> <INDENT> global DEFAULTS <NEW_LINE> return (DEFAULTS[vname] >> (8*fieldIndex)) & 0xff
|
Get specified byte in the parameter default value (read at program
start from Verilog parameters)
<vname> Verilog parameter name string (as listen in 'parameters')
<fieldIndex> byte field index (0 - lowest byte, 1 - bits[15:8], etc)
Return specified byte
|
625941bcd18da76e235323b9
|
def GetQuerySet(self): <NEW_LINE> <INDENT> qs = FakeQuerySet( model = self.model, using = self.using, where = self.where, sort = self.sort, order = self.order, fields = self.fields ) <NEW_LINE> return qs
|
Возвращает соответствующий таблице QuerySet
|
625941bc2eb69b55b151c792
|
def test_execute_callback_was_blocked_true(self): <NEW_LINE> <INDENT> self.tractor.was_blocked = True <NEW_LINE> self.tractor.old_queue = [] <NEW_LINE> self.tractor.execute_callback() <NEW_LINE> self.assertFalse(self.tractor.was_blocked) <NEW_LINE> self.assertEqual(self.tractor._action_queue,self.tractor.old_queue)
|
test was_blocked is true and action queue is old
queue when was_blocked is true
|
625941bcdc8b845886cb541a
|
def keyphrases_from_file(filepath): <NEW_LINE> <INDENT> extractor = pke.unsupervised.TopicRank() <NEW_LINE> try: <NEW_LINE> <INDENT> extractor.load_document(input=filepath, language="en", normalization=None) <NEW_LINE> extractor.candidate_selection() <NEW_LINE> extractor.candidate_weighting() <NEW_LINE> return { keyphrase: score for keyphrase, score in extractor.get_n_best(n=10, stemming="true") } <NEW_LINE> <DEDENT> except ValueError as error: <NEW_LINE> <INDENT> print(' error: {error}') <NEW_LINE> return {}
|
uses pke to extract the top words/phrases
from a given file.
TODO: read in an entire directory and extract the top phrases
from the entire corpuse
|
625941bc01c39578d7e74d22
|
def format_family_events(self, event_ref_list, place_lat_long): <NEW_LINE> <INDENT> with Html("table", class_="infolist eventlist") as table: <NEW_LINE> <INDENT> thead = Html("thead") <NEW_LINE> table += thead <NEW_LINE> thead += self.event_header_row() <NEW_LINE> tbody = Html("tbody") <NEW_LINE> table += tbody <NEW_LINE> for evt_ref in event_ref_list: <NEW_LINE> <INDENT> event = self.r_db.get_event_from_handle(evt_ref.ref) <NEW_LINE> tbody += self.display_event_row(event, evt_ref, place_lat_long, uplink=True, hyperlink=True, omit=EventRoleType.FAMILY) <NEW_LINE> <DEDENT> <DEDENT> return table
|
displays the event row for events such as marriage and divorce
@param: event_ref_list -- List of events reference
@param: place_lat_long -- For use in Family Map Pages. This will be
None if called from Family pages, which do
not create a Family Map
|
625941bc24f1403a92600a50
|
def put(self, request, *args, **kwargs): <NEW_LINE> <INDENT> return self.update(request, *args, **kwargs)
|
this code can be used to create an object if it does not exist using put method
|
625941bc21a7993f00bc7bd2
|
def __init__(self, categories=None, description=None, details=None, exact_matches=None, id=None, name=None, symbol=None, synonyms=None, uri=None): <NEW_LINE> <INDENT> self._categories = None <NEW_LINE> self._description = None <NEW_LINE> self._details = None <NEW_LINE> self._exact_matches = None <NEW_LINE> self._id = None <NEW_LINE> self._name = None <NEW_LINE> self._symbol = None <NEW_LINE> self._synonyms = None <NEW_LINE> self._uri = None <NEW_LINE> self.discriminator = None <NEW_LINE> if categories is not None: <NEW_LINE> <INDENT> self.categories = categories <NEW_LINE> <DEDENT> if description is not None: <NEW_LINE> <INDENT> self.description = description <NEW_LINE> <DEDENT> if details is not None: <NEW_LINE> <INDENT> self.details = details <NEW_LINE> <DEDENT> if exact_matches is not None: <NEW_LINE> <INDENT> self.exact_matches = exact_matches <NEW_LINE> <DEDENT> if id is not None: <NEW_LINE> <INDENT> self.id = id <NEW_LINE> <DEDENT> if name is not None: <NEW_LINE> <INDENT> self.name = name <NEW_LINE> <DEDENT> if symbol is not None: <NEW_LINE> <INDENT> self.symbol = symbol <NEW_LINE> <DEDENT> if synonyms is not None: <NEW_LINE> <INDENT> self.synonyms = synonyms <NEW_LINE> <DEDENT> if uri is not None: <NEW_LINE> <INDENT> self.uri = uri
|
BeaconConceptWithDetails - a model defined in Swagger
|
625941bc23e79379d52ee44d
|
def RRDB_Model(size, channels, cfg_net, gc=32, wd=0., name='RRDB_model'): <NEW_LINE> <INDENT> nf, nb = cfg_net['nf'], cfg_net['nb'] <NEW_LINE> lrelu_f = functools.partial(LeakyReLU, alpha=0.2) <NEW_LINE> rrdb_f = functools.partial(ResInResDenseBlock, nf=nf, gc=gc, wd=wd) <NEW_LINE> conv_f = functools.partial(Conv2D, kernel_size=3, padding='same', bias_initializer='zeros', kernel_initializer=_kernel_init(), kernel_regularizer=_regularizer(wd)) <NEW_LINE> rrdb_truck_f = tf.keras.Sequential( [rrdb_f(name="RRDB_{}".format(i)) for i in range(nb)], name='RRDB_trunk') <NEW_LINE> x = inputs = Input([size, size, channels], name='input_image') <NEW_LINE> fea = conv_f(filters=nf, name='conv_first')(x) <NEW_LINE> fea_rrdb = rrdb_truck_f(fea) <NEW_LINE> trunck = conv_f(filters=nf, name='conv_trunk')(fea_rrdb) <NEW_LINE> fea = fea + trunck <NEW_LINE> size_fea_h = tf.shape(fea)[1] if size is None else size <NEW_LINE> size_fea_w = tf.shape(fea)[2] if size is None else size <NEW_LINE> fea_resize = tf.image.resize(fea, [size_fea_h * 2, size_fea_w * 2], method='nearest', name='upsample_nn_1') <NEW_LINE> fea = conv_f(filters=nf, activation=lrelu_f(), name='upconv_1')(fea_resize) <NEW_LINE> fea_resize = tf.image.resize(fea, [size_fea_h * 4, size_fea_w * 4], method='nearest', name='upsample_nn_2') <NEW_LINE> fea = conv_f(filters=nf, activation=lrelu_f(), name='upconv_2')(fea_resize) <NEW_LINE> fea = conv_f(filters=nf, activation=lrelu_f(), name='conv_hr')(fea) <NEW_LINE> out = conv_f(filters=channels, name='conv_last')(fea) <NEW_LINE> return Model(inputs, out, name=name)
|
Residual-in-Residual Dense Block based Model
|
625941bc66656f66f7cbc091
|
def gower_distances(data, observation): <NEW_LINE> <INDENT> dtypes = data.dtypes <NEW_LINE> data = _normalize_mixed_data_columns(data) <NEW_LINE> observation = _normalize_mixed_data_columns(observation) <NEW_LINE> ranges = _calc_range_mixed_data_columns(data, observation, dtypes) <NEW_LINE> return np.array([_gower_dist(row, observation, ranges, dtypes) for row in data])
|
Return an array of distances between all observations and a chosen one
Based on:
https://sourceforge.net/projects/gower-distance-4python
https://beta.vu.nl/nl/Images/stageverslag-hoven_tcm235-777817.pdf
:type data: DataFrame
:type observation: pandas Series
|
625941bc1f037a2d8b9460e5
|
def test_config_metadata(self): <NEW_LINE> <INDENT> result = self.driver.get_config_metadata() <NEW_LINE> self.assert_(isinstance(result, dict)) <NEW_LINE> self.assert_(isinstance(result[ConfigMetadataKey.DRIVER], dict)) <NEW_LINE> self.assert_(isinstance(result[ConfigMetadataKey.COMMANDS], dict)) <NEW_LINE> self.assert_(isinstance(result[ConfigMetadataKey.PARAMETERS], dict)) <NEW_LINE> self.assertEquals(len(result[ConfigMetadataKey.DRIVER]), 1) <NEW_LINE> self.assertEquals(result[ConfigMetadataKey.DRIVER], {DriverDictKey.VENDOR_SW_COMPATIBLE:True}) <NEW_LINE> self.assertEquals(len(result[ConfigMetadataKey.COMMANDS]), 2) <NEW_LINE> self.assert_("cmd1" in result[ConfigMetadataKey.COMMANDS].keys()) <NEW_LINE> self.assert_("cmd2" in result[ConfigMetadataKey.COMMANDS].keys()) <NEW_LINE> self.assertEquals(len(result[ConfigMetadataKey.PARAMETERS]), 4) <NEW_LINE> self.assert_("foo" in result[ConfigMetadataKey.PARAMETERS].keys()) <NEW_LINE> self.assert_("bar" in result[ConfigMetadataKey.PARAMETERS].keys()) <NEW_LINE> self.assert_("baz" in result[ConfigMetadataKey.PARAMETERS].keys()) <NEW_LINE> self.assert_("bat" in result[ConfigMetadataKey.PARAMETERS].keys())
|
Test the metadata structure fetch
|
625941bc71ff763f4b54956e
|
def years_to_weeks(years): <NEW_LINE> <INDENT> return convert(years, 'years', 'weeks')
|
Convert years to weeks.
|
625941bc627d3e7fe0d68d35
|
def add_deployment_structure(cls): <NEW_LINE> <INDENT> script_dir = os.path.dirname(__file__) <NEW_LINE> source = script_dir+"/jboss-deployment-structure.xml" <NEW_LINE> ear_path = input('please enter the path to the src/main/application directory of the ear project : ') <NEW_LINE> subprocess.call(["mkdir", "-p", ear_path+"/META-INF"]) <NEW_LINE> subprocess.call(["cp", source, ear_path+"/META-INF"])
|
add jboss-deployment-structure to ear
|
625941bc6e29344779a624fb
|
def segments(self): <NEW_LINE> <INDENT> paired = zip(self.points, self.points[1:] + [self.points[0]]) <NEW_LINE> return [LineSegment3(a, b) for a, b in paired]
|
Returns all line segments composing this polygon's edges.
|
625941bc91af0d3eaac9b8fc
|
def test_message_delete_out(self): <NEW_LINE> <INDENT> response = self.client.get( reverse('messaging_message_delete', args=[self.message.id])) <NEW_LINE> self.assertRedirects(response, reverse('user_login'))
|
Test index page with login at /message/edit/<message_id>
|
625941bc16aa5153ce36235f
|
def test_submit(self): <NEW_LINE> <INDENT> pass
|
Test case for submit
Request document export
|
625941bcb57a9660fec33768
|
def getMatches(winner=False): <NEW_LINE> <INDENT> dbconn = connect() <NEW_LINE> cursor = dbconn.cursor() <NEW_LINE> if winner: <NEW_LINE> <INDENT> cursor.execute("SELECT player1_id, player2_id, winner_id FROM match;") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> cursor.execute("SELECT player1_id, player2_id FROM match;") <NEW_LINE> <DEDENT> matches = cursor.fetchall() <NEW_LINE> dbconn.close() <NEW_LINE> return matches
|
Return a list of matches played, optionally detailing who won.
|
625941bcf548e778e58cd463
|
@rule(logs=['duo:authentication'], outputs=['aws-s3:sample-bucket', 'pagerduty:sample-integration', 'slack:sample-channel']) <NEW_LINE> def duo_fraud(rec): <NEW_LINE> <INDENT> return rec['result'] == 'FRAUD'
|
author: airbnb_csirt
description: Alert on any Duo authentication logs marked as fraud.
reference: https://duo.com/docs/adminapi#authentication-logs
playbook: N/A
|
625941bc29b78933be1e5599
|
def start(self, application, port, host='0.0.0.0', backlog=128): <NEW_LINE> <INDENT> self._host = host <NEW_LINE> self._port = port <NEW_LINE> try: <NEW_LINE> <INDENT> info = socket.getaddrinfo(self._host, self._port, socket.AF_UNSPEC, socket.SOCK_STREAM)[0] <NEW_LINE> family = info[0] <NEW_LINE> bind_addr = info[-1] <NEW_LINE> self._socket = eventlet.listen(bind_addr, family=family, backlog=backlog) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> LOG.error("unable to bind the port : %s, maybe it's in use", self._port) <NEW_LINE> sys.exit(1) <NEW_LINE> <DEDENT> self._server = self.pool.spawn(self._run, application, self._socket)
|
Run a WSGI server with the given application.
|
625941bc8c0ade5d55d3e8a6
|
def create_task(self, message): <NEW_LINE> <INDENT> task = SpikeTask(message) <NEW_LINE> if not task.valid: <NEW_LINE> <INDENT> log.error("Failed to process task, task invalid") <NEW_LINE> raise ProcessingError("Failed to process task, task invalid") <NEW_LINE> <DEDENT> executor = self.get_executor(task) <NEW_LINE> if not executor: <NEW_LINE> <INDENT> log.error("Failed to create executor for task %r", message) <NEW_LINE> raise ProcessingError("Failed to process task, failed to create executor for task %r" % message) <NEW_LINE> <DEDENT> return task
|
Prepare executor for provided message
|
625941bce8904600ed9f1e10
|
def __init__(self, conf_file, fsuae_options, configuration): <NEW_LINE> <INDENT> super(ArchiveBase, self).__init__(conf_file, fsuae_options, configuration) <NEW_LINE> self.arch_filepath = None
|
Params:
conf_file: a relative path to provided configuration file
fsuae_options: is an CmdOption object created out of command line
parameters
configuration: is config dictionary created out of config file
|
625941bca79ad161976cc02c
|
def load_pipeline_proto(filename): <NEW_LINE> <INDENT> def _revise_name(filename, offset): <NEW_LINE> <INDENT> filename, postfix = filename.split('.record.') <NEW_LINE> filename = '{}.record.{}'.format(filename, (int(postfix) + offset) % 10) <NEW_LINE> return filename <NEW_LINE> <DEDENT> pipeline_proto = pipeline_pb2.Pipeline() <NEW_LINE> with open(filename, 'r') as fp: <NEW_LINE> <INDENT> text_format.Merge(fp.read(), pipeline_proto) <NEW_LINE> <DEDENT> if FLAGS.number_of_steps > 0: <NEW_LINE> <INDENT> pipeline_proto.train_config.number_of_steps = FLAGS.number_of_steps <NEW_LINE> pipeline_proto.eval_config.number_of_steps = FLAGS.number_of_steps <NEW_LINE> <DEDENT> pipeline_proto.example_reader.batch_size = 1 <NEW_LINE> pipeline_proto.example_reader.num_epochs = 1 <NEW_LINE> if FLAGS.split > -1: <NEW_LINE> <INDENT> for _ in xrange(4): <NEW_LINE> <INDENT> del pipeline_proto.example_reader.input_path[-1] <NEW_LINE> <DEDENT> n_files = len(pipeline_proto.example_reader.input_path) <NEW_LINE> for i in xrange(n_files): <NEW_LINE> <INDENT> pipeline_proto.example_reader.input_path[i] = _revise_name( pipeline_proto.example_reader.input_path[i], FLAGS.split + 6) <NEW_LINE> <DEDENT> <DEDENT> return pipeline_proto
|
Load pipeline proto file.
Args:
filename: path to the pipeline proto file.
Returns:
pipeline_proto: an instance of pipeline_pb2.Pipeline
|
625941bc3eb6a72ae02ec3bc
|
def get_icon_url(self): <NEW_LINE> <INDENT> return '/extensions/tags/gcb/resources/youtube.png'
|
Return the URL for the icon to be displayed in the rich text editor.
Images should be placed in a folder called 'resources' inside the main
package for the tag definitions.
|
625941bc3617ad0b5ed67ddf
|
def build_preprocessor(): <NEW_LINE> <INDENT> processor = PreProcessor() <NEW_LINE> return processor
|
Build the default preprocessors.
|
625941bc6e29344779a624fc
|
def add_request_schema(self, method, api_obj, description=None): <NEW_LINE> <INDENT> method = method.upper() <NEW_LINE> self.request_schemas[method] = schemas.RequestSchema(api_obj, description) <NEW_LINE> self._request_schemas[method] = api_obj
|
Add a request schema.
Args:
method (str): The HTTP method, usually GET or POST.
api_obj (:class:`porter.schemas.ApiObject`): The request data schema.
description (str): Description of the schema. Optional.
|
625941bcd8ef3951e3243424
|
def test_create_voucher_non_existan_redeem_product(self): <NEW_LINE> <INDENT> c = Customer(**self.new_customer) <NEW_LINE> c.save() <NEW_LINE> voucher_data = {"redeemed_with": 10} <NEW_LINE> url = self.get_url(self.VOUCH_LIST_ENDP, args=[c.pk+1]) <NEW_LINE> response = self.client.post(url, voucher_data) <NEW_LINE> self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
Test that tries to create a normal voucher to redeem a non existant product
This tests the POST from endpoint /loyal/customer/${id}/vouchers.
|
625941bc099cdd3c635f0b43
|
def wallet_status(self): <NEW_LINE> <INDENT> self._api_query('getwalletstatus')
|
Array of Wallet Statuses
Resultset contains:
currencyid Integer value representing a currency
name Name for this currency, for example: Bitcoin
code Currency code, for example: BTC
blockcount Blockcount of currency hot wallet as of lastupdate time
difficulty Difficulty of currency hot wallet as of lastupdate time
version Version of currency hotwallet as of lastupdate time
peercount Connected peers of currency hot wallet as of lastupdate time
hashrate Network hashrate of currency hot wallet as of lastupdate time
gitrepo Git Repo URL for this currency
withdrawalfee Fee charged for withdrawals of this currency
lastupdate Datetime (EST) the hot wallet information was last updated
|
625941bc15baa723493c3e5a
|
def hover(*args): <NEW_LINE> <INDENT> return SCREEN.hover(*args)
|
**hover** Move the mouse pointer to the given target (args[0])
if the target is
- not given, it will be lastMatch or center (if no lastMatch) of this Region
- an image-filename, a Pattern or an Image, it will first be searched and the valid Match's center/targetOffset will be the target
- a Match: target will be center/targetOffset of the Match
- a Region: target will be center of the Region
- a Location: will be the target
- (int, int): explicit target coordinates
:param args: see above
:return: int: 1 if done without errors, 0 otherwise
|
625941bc63f4b57ef0001007
|
def main(parents, offspring, snpmap): <NEW_LINE> <INDENT> snp_num = atcg_to_numeric(parents, snpmap) <NEW_LINE> par = get_parents(offspring) <NEW_LINE> founders = create_mpmap_mat(parents, snpmap, snp_num, 0, par=par) <NEW_LINE> final = create_mpmap_mat(offspring, snpmap, snp_num, 1) <NEW_LINE> m_map = create_map(snpmap) <NEW_LINE> founder_fname = os.path.basename(parents).replace('.ped', '_mpMap.txt') <NEW_LINE> handle = open(founder_fname, 'w') <NEW_LINE> for row in founders: <NEW_LINE> <INDENT> handle.write('\t'.join(row) + '\n') <NEW_LINE> <DEDENT> handle.close() <NEW_LINE> final_fname = os.path.basename(offspring).replace('.ped', '_mpMap.txt') <NEW_LINE> handle = open(final_fname, 'w') <NEW_LINE> for row in final: <NEW_LINE> <INDENT> handle.write('\t'.join(row) + '\n') <NEW_LINE> <DEDENT> handle.close() <NEW_LINE> map_fname = os.path.basename(snpmap).replace('.map', '_mpMap.map') <NEW_LINE> handle = open(map_fname, 'w') <NEW_LINE> for row in m_map: <NEW_LINE> <INDENT> handle.write('\t'.join(row) + '\n') <NEW_LINE> <DEDENT> handle.close() <NEW_LINE> return
|
Main function.
|
625941bcfff4ab517eb2f321
|
def test_created_with_attribute_accepts_only_string_or_None(self): <NEW_LINE> <INDENT> self.assertRaises(TypeError, setattr, self.test_version, 'created_with', 234)
|
testing if a TypeError will be raised if the created_with attribute
is set to a value other than a string or None
|
625941bc498bea3a759b9997
|
def get_most_recent_messages_per_user(telegram=False, **kwargs): <NEW_LINE> <INDENT> isp = inspect.stack() <NEW_LINE> th = TwitterHelpers.twitter_fpaths <NEW_LINE> df_uref = pd.read_parquet(th('user_ref')) <NEW_LINE> testing = kwargs.get('testing', False) <NEW_LINE> verbose = kwargs.get('verbose', False) <NEW_LINE> max_results = kwargs.get('max_results', 5) <NEW_LINE> offset = kwargs.get('offset', 75) <NEW_LINE> call_list = [] <NEW_LINE> for index, row in df_uref.iterrows(): <NEW_LINE> <INDENT> start_time = getDate.tz_aware_dt_now(offset=offset, rfcc=True, utc=True) <NEW_LINE> end_time = getDate.tz_aware_dt_now(rfcc=True, utc=True) <NEW_LINE> if verbose: <NEW_LINE> <INDENT> help_print_arg(f"Start time: {start_time}. End time: {end_time}") <NEW_LINE> <DEDENT> params = ({'username': row['username'], 'params': {'max_results': max_results, 'start_time': start_time, 'end_time': end_time, 'exclude': 'retweets,replies'}}) <NEW_LINE> call = TwitterAPI(method='user_tweets', **params) <NEW_LINE> time.sleep(.5) <NEW_LINE> df_view = call.df <NEW_LINE> if verbose: <NEW_LINE> <INDENT> crh = call.get.raw.getheaders() <NEW_LINE> msg = f"{row['username']} Twitter calls remaining {crh['x-rate-limit-remaining']}" <NEW_LINE> help_print_arg(msg) <NEW_LINE> call_list.append(call) <NEW_LINE> <DEDENT> if isinstance(df_view, pd.DataFrame): <NEW_LINE> <INDENT> if not df_view.empty: <NEW_LINE> <INDENT> gte_df = GetTimestampsForEachRelTweet(row['id'], testing=False, verbose=verbose).df <NEW_LINE> if verbose: <NEW_LINE> <INDENT> msg = f"there are {str(gte_df.shape[0])} timestamps needed" <NEW_LINE> help_print_arg(msg, isp) <NEW_LINE> <DEDENT> if telegram: <NEW_LINE> <INDENT> val = check_for_unsent_telegram_messages(user_id=row['id'], **kwargs) <NEW_LINE> if testing: <NEW_LINE> <INDENT> call_list.append(val) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> if testing: <NEW_LINE> <INDENT> return call_list
|
Get messages within the last 30 seconds.
|
625941bc91af0d3eaac9b8fd
|
def test_center_all(self): <NEW_LINE> <INDENT> chr1 = self.ex_cnr.in_range('chr1') <NEW_LINE> self.assertAlmostEqual(0, numpy.median(chr1.coverage), places=1) <NEW_LINE> chr1.center_all() <NEW_LINE> orig_chr1_cvg = numpy.median(chr1.coverage) <NEW_LINE> self.assertAlmostEqual(0, orig_chr1_cvg) <NEW_LINE> chr1plus2 = chr1.copy() <NEW_LINE> chr1plus2['coverage'] += 2.0 <NEW_LINE> chr1plus2.center_all() <NEW_LINE> self.assertAlmostEqual(numpy.median(chr1plus2.coverage), orig_chr1_cvg)
|
Test median-recentering.
|
625941bcff9c53063f47c0dc
|
def _u2B(self, tau): <NEW_LINE> <INDENT> solutionU = numpy.zeros(self.numberPoints, dtype=numpy.float64) <NEW_LINE> solutionV = numpy.zeros(self.numberPoints, dtype=numpy.float64) <NEW_LINE> tau2 = tau - self.tau0 <NEW_LINE> for point in range(self.numberPoints): <NEW_LINE> <INDENT> a1Prev = 0.0 <NEW_LINE> b1Prev = 0.0 <NEW_LINE> fact1Prev = 1.0 <NEW_LINE> a2Prev = 0.0 <NEW_LINE> b2Prev = 0.0 <NEW_LINE> fact2Prev = 1.0 <NEW_LINE> for term in range(self.numberTerms): <NEW_LINE> <INDENT> a1N, b1N, fact1N = self._timeCoeff(term, tau, a1Prev, b1Prev, fact1Prev) <NEW_LINE> a2N, b2N, fact2N = self._timeCoeff(term, tau2, a2Prev, b2Prev, fact2Prev) <NEW_LINE> daDt = tau2**term * math.exp(-tau2)/fact2N <NEW_LINE> solutionU[point] += self.pointCoeff[point, term] * (b2N - b1N + a2N) <NEW_LINE> solutionV[point] += self.pointCoeff[point, term] * (a2N/self.tau0 - a1N/self.tau0 + daDt) <NEW_LINE> a1Prev = a1N <NEW_LINE> b1Prev = b1N <NEW_LINE> fact1Prev = fact1N <NEW_LINE> a2Prev = a2N <NEW_LINE> b2Prev = b2N <NEW_LINE> fact2Prev = fact2N <NEW_LINE> <DEDENT> <DEDENT> solutionU *= 2.0 * self.velocity * self.recurrenceTime/math.pi <NEW_LINE> solutionV *= 2.0 * self.velocity * self.recurrenceTime/math.pi <NEW_LINE> return [solutionU, solutionV]
|
Computes viscoelastic solution for times greater than the recurrence time.
|
625941bc7cff6e4e8111786d
|
def main_layout_header(): <NEW_LINE> <INDENT> return html.Div( [ make_header(), dbc.Container( dbc.Row(dbc.Col(id=server.config["CONTENT_CONTAINER_ID"])), fluid=True, style={'margin-top': '12px'}, ), dcc.Location(id=server.config["LOCATION_COMPONENT_ID"], refresh=False), ] )
|
Dash layout with a top-header
|
625941bc0a50d4780f666d77
|
def main(): <NEW_LINE> <INDENT> sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) <NEW_LINE> sock.bind(('', 8989,)) <NEW_LINE> sock.listen(1) <NEW_LINE> while True: <NEW_LINE> <INDENT> connection, client_address = sock.accept() <NEW_LINE> print('new connection') <NEW_LINE> process = multiprocessing.Process(target=handle, args=(connection, client_address)) <NEW_LINE> process.start()
|
`Hello, World!` http server.
|
625941bc3346ee7daa2b2c51
|
def _deep_merge_defaults(data, defaults): <NEW_LINE> <INDENT> changes = 0 <NEW_LINE> for key, default_value in defaults.items(): <NEW_LINE> <INDENT> if key in data: <NEW_LINE> <INDENT> if isinstance(data[key], collections.Mapping): <NEW_LINE> <INDENT> child_data, child_changes = _deep_merge_defaults(data[key], default_value) <NEW_LINE> data[key] = child_data <NEW_LINE> changes += child_changes <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> data[key] = default_value <NEW_LINE> changes += 1 <NEW_LINE> <DEDENT> <DEDENT> return data, changes
|
Recursivly merge data and defaults, preferring data.
Only handles nested dicts and scalar values.
Modifies `data` in place.
|
625941bc711fe17d82542258
|
def read_rad_scheme_kernels(rad_scheme): <NEW_LINE> <INDENT> datadir = '../kernel_data' <NEW_LINE> kernel_fn = rad_scheme + '_t_kernel_monthly.nc' <NEW_LINE> t_lw_kernel = -read_kernel(os.path.join(datadir, kernel_fn), 'lw_kernel') <NEW_LINE> t_sw_kernel = read_kernel(os.path.join(datadir, kernel_fn), 'sw_kernel') <NEW_LINE> if 'frierson' not in rad_scheme.lower(): <NEW_LINE> <INDENT> kernel_fn = rad_scheme + 'wv_kernel_monthly.nc' <NEW_LINE> wv_lw_kernel = -read_kernel(os.path.join(datadir, kernel_fn), 'lw_kernel') <NEW_LINE> wv_sw_kernel = read_kernel(os.path.join(datadir, kernel_fn), 'sw_kernel') <NEW_LINE> <DEDENT> ts_file_name = os.path.join(datadir, rad_scheme + '_sfc_ts_kernel_monthly.nc') <NEW_LINE> ds = xr.open_dataset(ts_file_name, decode_times=False) <NEW_LINE> ts_lw_kernel = -ds.lw_kernel <NEW_LINE> try: <NEW_LINE> <INDENT> ts_lw_kernel = ts_lw_kernel.rename({'index':'month'}) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> print('Time index is month already.') <NEW_LINE> <DEDENT> coslats = np.cos(np.deg2rad(ts_lw_kernel.lat)) <NEW_LINE> ts_lw_kernel_gm = np.average(ts_lw_kernel.mean(('month', 'lon')), weights=coslats, axis=0) <NEW_LINE> print('ts_kerenl: Global mean = '+str(ts_lw_kernel_gm) + ' W/K/m^2.') <NEW_LINE> if 'frierson' in rad_scheme.lower(): <NEW_LINE> <INDENT> lw_kernels = {'ts':ts_lw_kernel, 't':t_lw_kernel} <NEW_LINE> sw_kernels = {'t':t_sw_kernel} <NEW_LINE> total_kernels['t'] = lw_kernels['t'] + sw_kernels['t'] <NEW_LINE> total_kernels['ts'] = lw_kernels['ts'] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> lw_kernels['wv'] = wv_lw_kernel <NEW_LINE> sw_kernels['wv'] = wv_sw_kernel <NEW_LINE> total_kernels['wv'] = lw_kernels['wv'] + sw_kernels['wv'] <NEW_LINE> <DEDENT> return lw_kernels, sw_kernels, total_kernels
|
rad_scheme can be 'frierson', 'byrne' or 'rrtm'
|
625941bc0383005118ecf4cc
|
def global_correlation(func_reorient, func_mask): <NEW_LINE> <INDENT> import scipy <NEW_LINE> import numpy as np <NEW_LINE> from dvars import load <NEW_LINE> zero_variance_func = load(func_reorient, func_mask) <NEW_LINE> list_of_ts = zero_variance_func.transpose() <NEW_LINE> demeaned_normed = [] <NEW_LINE> for ts in list_of_ts: <NEW_LINE> <INDENT> demeaned_normed.append(scipy.stats.mstats.zscore(ts)) <NEW_LINE> <DEDENT> demeaned_normed = np.asarray(demeaned_normed) <NEW_LINE> volume_list = demeaned_normed.transpose() <NEW_LINE> avg_ts = [] <NEW_LINE> for voxel in volume_list: <NEW_LINE> <INDENT> avg_ts.append(voxel.mean()) <NEW_LINE> <DEDENT> avg_ts = np.asarray(avg_ts) <NEW_LINE> gcor = (avg_ts.transpose().dot(avg_ts)) / len(avg_ts) <NEW_LINE> return gcor
|
Calculate the global correlation (GCOR) of the functional timeseries.
- From "Correcting Brain-Wide Correlation Differences in Resting-State
fMRI", Ziad S. Saad et al. More info here:
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3749702
:type func_reorient: str
:param func_reorient: Filepath to the deobliqued, reoriented functional
timeseries NIFTI file.
:type func_mask: str
:param func_mask: Filepath to the functional brain mask NIFTI file.
:rtype: float
:return: The global correlation (GCOR) value.
|
625941bc097d151d1a222d43
|
def init_plot_style(): <NEW_LINE> <INDENT> plt.style.use("seaborn-notebook") <NEW_LINE> sns.set_palette("deep")
|
Initialize plot style for RankEval visualization utilities.
Returns
-------
|
625941bc63f4b57ef0001008
|
def set_seed(args, test_config): <NEW_LINE> <INDENT> seed = 1 <NEW_LINE> if args.seed is not None: <NEW_LINE> <INDENT> seed = args.seed <NEW_LINE> <DEDENT> elif 'seed' in test_config: <NEW_LINE> <INDENT> seed = test_config['seed'] <NEW_LINE> <DEDENT> log_info("Running test with seed {seed}".format(seed=seed)) <NEW_LINE> random.seed(seed)
|
Set the seed from the config file, unless overridden by the command-line
|
625941bc046cf37aa974cc31
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.