content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import functools
def _map_windows(
df, time, method="between", periodvar="Shift Date", byvars=["PERMNO", "Date"]
):
"""
Returns the dataframe with an additional column __map_window__ containing the index of the window
in which the observation resides. For example, if the windows are
[[1],[2,3]], and the periods are 1/1/2000, 1/2/2000, 1/3/2000 for PERMNO 10516 with byvar
'a', the df rows would be as follows:
(10516, 'a', '1/1/2000', 0),
(10516, 'a', '1/2/2000', 1),
(10516, 'a', '1/3/2000', 1),
"""
df = df.copy() # don't overwrite original dataframe
wm = functools.partial(window_mapping, time, method=method)
if byvars:
df = groupby_merge(df, byvars, "transform", (wm), subset=periodvar)
else:
df[periodvar + '_transform'] = wm(df[periodvar])
return df.rename(columns={periodvar + "_transform": "__map_window__"}) | 31b4780ba7f67dde12dcc75af9abbcf88a1b269a | 3,629,900 |
def unpack_context(msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['client_timeout'] = msg.pop('_timeout', None)
return RpcContext.from_dict(context_dict) | 03c42f2e137e219dd15591588d28cf3be897e2fa | 3,629,901 |
def concat_cols(*args):
"""
takes some col vectors and aggregetes them to a matrix
"""
col_list = []
for a in args:
if isinstance(a, list):
# convenience: interpret a list as a column Matrix:
a = sp.Matrix(a)
if not a.is_Matrix:
# convenience: allow stacking scalars
a = sp.Matrix([a])
if a.shape[1] == 1:
col_list.append( list(a) )
continue
for i in range(a.shape[1]):
col_list.append( list(a[:,i]) )
m = sp.Matrix(col_list).T
return m | ca0731bdd35909ec544e76b80dce73ef96863a7a | 3,629,902 |
import sys
import random
def accuracy_analogy(wv, questions, most_similar, evalVocab, topn=10, case_insensitive=True,usePhrase=True, sample=0):
"""
Compute accuracy of the model. `questions` is a filename where lines are
4-tuples of words, split into sections by ": SECTION NAME" lines.
See questions-words.txt in https://storage.googleapis.com/google-code-archive-source/v2/code.google.com/word2vec/source-archive.zip for an example.
The accuracy is reported (=printed to log and returned as a list) for each
section separately, plus there's one aggregate summary at the end.
Use `restrict_vocab` to ignore all questions containing a word not in the first `restrict_vocab`
words (default 30,000). This may be meaningful if you've sorted the vocabulary by descending frequency.
In case `case_insensitive` is True, the first `restrict_vocab` words are taken first, and then
case normalization is performed.
Use `case_insensitive` to convert all words in questions and vocab to their uppercase form before
evaluating the accuracy (default True). Useful in case of case-mismatch between training tokens
and question words. In case of multiple case variants of a single word, the vector for the first
occurrence (also the most frequent if vocabulary is sorted) is taken.
sample: use part of the evaluation data, for test only
This method corresponds to the `compute-accuracy` script of the original C word2vec.
"""
self=wv
ok_vocab = self.vocab
ok_vocab = dict((w.lower(), v) for w, v in ok_vocab.items()) if case_insensitive else ok_vocab
sections, section = [], None
for line_no, line in enumerate(utils.smart_open(questions)):
# TODO: use level3 BLAS (=evaluate multiple questions at once), for speed
line = utils.to_unicode(line).lower()
if line.startswith(': '):
# a new section starts => store the old section
if section:
sections.append(section)
self.log_accuracy(section)
print("", file=sys.stderr)
section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []}
else:
if usePhrase == False and "_" in line: continue
if sample > 0 and random.random() > sample: continue # sample question, for test only
if not section:
raise ValueError("missing section header before line #%i in %s" % (line_no, questions))
try:
if case_insensitive:
a, b, c, expected = [word.lower() for word in line.split()]
else:
a, b, c, expected = [word for word in line.split()]
except:
print("skipping invalid line in %s, %s" % (section['section'], line.strip()), file=sys.stderr)
continue
original_vocab = self.vocab
self.vocab = ok_vocab
a2 = safe_phrase(wv,a,most_similar,evalVocab)
b2 = safe_phrase(wv,b,most_similar,evalVocab)
c2 = safe_phrase(wv,c,most_similar,evalVocab)
expected2 = safe_phrase(wv,expected,most_similar,evalVocab)
if len(a2)==0 or len(b2)==0 or len(c2)==0 or len(expected2)==0:
print("skipping line in %s with OOV words: %s" % (section['section'], line.strip()), file=sys.stderr)
print(a2,b2,c2,expected2,file=sys.stderr)
continue
#print("%s found words: %s\n" % (section['section'], line.strip()), file=sys.stderr)
ignore = set(a2+b2+c2) # input words to be ignored
predicted = None
# find the most likely prediction, ignoring OOV words and input words
sims = most_similar(self, positive=b2+c2, negative=a2, topn=topn, restrict_vocab=None)
self.vocab = original_vocab
for word, dist in sims:
predicted = word.lower() if case_insensitive else word
if predicted in ok_vocab and predicted not in ignore:
if predicted in expected2:
break # found.
if predicted in expected2:
section['correct'].append((a, b, c, expected))
else:
section['incorrect'].append((a, b, c, expected))
print("\r%s: %0.2f%% correct %d, incorrect %d" %\
(section['section'],100.0*len(section['correct'])/(len(section['correct'])+len(section['incorrect'])), len(section['correct']), len(section['incorrect'])),\
end='', file=sys.stderr)
if section:
# store the last section, too
sections.append(section)
self.log_accuracy(section)
# total = {
# 'section': 'total',
# 'correct': sum((s['correct'] for s in sections), []),
# 'incorrect': sum((s['incorrect'] for s in sections), []),
# }
# self.log_accuracy(total)
# sections.append(total)
return sections | 6e0af67d04cdb09ccd5c490018bfcba17dfbd908 | 3,629,903 |
def gen_connected_locations(shape, count, separation, margin=0):
""" Generates `count` number of positions within `shape` that are touching.
If a `margin` is given, positions will be inside this margin. Margin may be
tuple-valued. """
margin = validate_tuple(margin, len(shape))
center_pos = margin + np.round(np.subtract(shape, margin)/2.0)
indices = np.arange(0, count, 1) - np.round(count/2.0)
pos = np.array([np.add(center_pos, np.multiply(i, separation)) for i in indices])
return pos | 9432d4df6e6b1bc5ff2fe762e240f051c07ae81e | 3,629,904 |
import warnings
def transp():
"""
Instantiates the Transp() class, and shows the widget.
Runs only in Jupyter notebook or JupyterLab. Requires bqplot.
"""
warnings.simplefilter(action='ignore', category=FutureWarning)
return Transp().widget | f8f86eab3e428ebdee2be5a2fc83fe3bd3d61702 | 3,629,905 |
def oidc_supported(transfer_hop: DirectTransferDefinition) -> bool:
"""
checking OIDC AuthN/Z support per destination and source RSEs;
for oidc_support to be activated, all sources and the destination must explicitly support it
"""
# assumes use of boolean 'oidc_support' RSE attribute
if not transfer_hop.dst.rse.attributes.get('oidc_support', False):
return False
for source in transfer_hop.sources:
if not source.rse.attributes.get('oidc_support', False):
return False
return True | 5bb49689b75e1329e2dda5a813ecbd11a346541e | 3,629,906 |
import os
def find_jest_configuration_file(file_name, folders):
"""
Find the first Jest configuration file.
Jest's configuration can be defined in the package.json file of your project,
or through a jest.config.js, or jest.config.ts, we only search the last two files.
"""
debug_message('find configuration for \'%s\' ...', file_name)
debug_message(' found %d folders %s', len(folders) if folders else 0, folders)
if file_name is None:
return None
if not isinstance(file_name, str):
return None
if not len(file_name) > 0:
return None
if folders is None:
return None
if not isinstance(folders, list):
return None
if not len(folders) > 0:
return None
ancestor_folders = [] # type: list
common_prefix = os.path.commonprefix(folders)
parent = os.path.dirname(file_name)
while parent not in ancestor_folders and parent.startswith(common_prefix):
ancestor_folders.append(parent)
parent = os.path.dirname(parent)
ancestor_folders.sort(reverse=True)
debug_message(' found %d possible locations %s', len(ancestor_folders), ancestor_folders)
candidate_configuration_file_names = ['jest.config.js', 'jest.config.ts']
debug_message(' looking for %s ...', candidate_configuration_file_names)
for folder in ancestor_folders:
for file_name in candidate_configuration_file_names:
configuration_file = os.path.join(folder, file_name)
if os.path.isfile(configuration_file):
debug_message(' found configuration \'%s\'', configuration_file)
return configuration_file
debug_message(' no configuration found')
return None | 710ff3ba2b9aa24d504f9e51a102ca7083e295c6 | 3,629,907 |
import sympy
def replace_heaviside(formula):
"""Set Heaviside(0) = 0
Differentiating sympy Min and Max is giving Heaviside:
Heaviside(x) = 0 if x < 0 and 1 if x > 0, but
Heaviside(0) needs to be defined by user.
We set Heaviside(0) to 0 because in general there is no sensitivity. This
done by setting the second argument to zero.
"""
if not isinstance(formula, sympy.Expr):
return formula
w = sympy.Wild("w")
return formula.replace(sympy.Heaviside(w), sympy.Heaviside(w, 0)) | d1aff5e4a2dd68ba53ced487b665e485dab4b54d | 3,629,908 |
import click
import time
def check_enrolled_factors(ctx, users):
"""Check for users that have no MFA factors enrolled"""
users_without_mfa = []
msg = (
f"Checking enrolled MFA factors for {len(users)} users. This may take a while to avoid exceeding API "
f"rate limits"
)
LOGGER.info(msg)
index_event(ctx.obj.es, module=__name__, event_type="INFO", event=msg)
click.echo(f"[*] {msg}")
# Don't put print statements under click.progressbar otherwise the progress bar will be interrupted
with click.progressbar(users, label="[*] Checking for users without MFA enrolled") as users:
for okta_user in users:
user = OktaUser(okta_user)
factors, error = user.list_enrolled_factors(ctx, mute=True)
# Stop trying to check enrolled MFA factors if the current API token doesn't have that permission
if error:
return
if not factors:
users_without_mfa.append(user.obj)
msg = f'User {user.obj["id"]} does not have any MFA factors enrolled'
LOGGER.info(msg)
index_event(ctx.obj.es, module=__name__, event_type="INFO", event=msg)
# Sleep for 1s to avoid exceeding API rate limits
time.sleep(1)
if users_without_mfa:
msg = f"Found {len(users_without_mfa)} users without any MFA factors enrolled"
LOGGER.info(msg)
index_event(ctx.obj.es, module=__name__, event_type="INFO", event=msg)
click.secho(f"[*] {msg}", fg="green")
if click.confirm("[*] Do you want to print information for users without MFA?", default=True):
for user in users_without_mfa:
okta_user = OktaUser(user)
okta_user.print_info()
if click.confirm("[*] Do you want to save users without any MFA factors enrolled to a file?", default=True):
file_path = f"{ctx.obj.data_dir}/{ctx.obj.profile_id}_users_without_mfa"
write_json_file(file_path, users_without_mfa)
else:
msg = "No users found without any MFA factors enrolled"
LOGGER.info(msg)
index_event(ctx.obj.es, module=__name__, event_type="INFO", event=msg)
click.echo(f"[*] {msg}")
return users_without_mfa | 6ab747684d4b39622149b2ead072b1fd1ac2fc6a | 3,629,909 |
def entry_cmp(sqlite_file1, sqlite_file2):
"""
Compare two sqlite file entries
in zookeeper to know the ordering
"""
seq_id1 = _get_journal_seqid(sqlite_file1)
seq_id2 = _get_journal_seqid(sqlite_file2)
return sequence_cmp(seq_id1, seq_id2) | d6ff9ee5e3aad62c096ed17106fb56d10b9d7de8 | 3,629,910 |
import functools
def fill_cn(bcm, n_metal2, max_search=50, low_first=True, return_n=None,
verbose=False):
"""
NOTE: Most likely broken - still need to extend to polymetallic cases
Algorithm to fill the lowest (or highest) coordination sites with 'metal2'
Args:
bcm (atomgraph.AtomGraph | atomgraph.BCModel): bcm obj
n_metal2 (int): number of dopants
KArgs:
max_search (int): if there are a number of possible structures with
partially-filled sites, the function will search
max_search options and return lowest CE structure
(Default: 50)
low_first (bool): if True, fills low CNs, else fills high CNs
(Default: True)
return_n (bool): if > 0, function will return a list of possible
structures
(Default: None)
verbose (bool): if True, function will print info to console
(Default: False)
Returns:
if return_n > 0:
(list): list of chemical ordering np.ndarrays
else:
(np.ndarray), (float): chemical ordering np.ndarray with its calculated CE
Raises:
ValueError: not enough options to produce <return_n> sample size
"""
def ncr(n: int, r: int) -> int:
"""
N choose r function (combinatorics)
Args:
n: from 'n' choices
r: choose r without replacement
Returns:
total combinations
"""
r = min(r, n - r)
numer = functools.reduce(op.mul, range(n, n - r, -1), 1)
denom = functools.reduce(op.mul, range(1, r + 1), 1)
return numer // denom
# handle monometallic cases efficiently
if n_metal2 in [0, bcm.num_atoms]:
# all '0's if no metal2, else all '1' since all metal2
struct_min = [np.zeros, np.ones][bool(n_metal2)](bcm.num_atoms)
struct_min = struct_min.astype(int)
ce = bcm.getTotalCE(struct_min)
checkall = True
else:
cn_list = bcm.cns
cnset = sorted(set(cn_list))
if not low_first:
cnset = cnset[::-1]
struct_min = np.zeros(bcm.num_atoms).astype(int)
ce = None
for cn in cnset:
spots = np.where(cn_list == cn)[0]
if len(spots) == n_metal2:
struct_min[spots] = 1
checkall = True
break
elif len(spots) > n_metal2:
low = 0
low_struct = None
# check to see how many combinations exist
options = ncr(len(spots), n_metal2)
# return sample of 'return_n' options
if return_n:
if return_n > options:
raise ValueError('not enough options to '
'produce desired sample size')
sample = []
while len(sample) < return_n:
base = struct_min.copy()
pick = np.random.sample(list(spots), n_metal2)
base[pick] = 1
sample.append(base)
return sample
# if n combs < max_search, check them all
if options <= max_search:
if verbose:
print('Checking all options')
searchopts = it.combinations(spots, n_metal2)
checkall = True
else:
if verbose:
print("Checking {0:.2%}".format(max_search / options))
searchopts = range(max_search)
checkall = False
# stop looking after 'max_search' counts
for c in searchopts:
base = struct_min.copy()
if checkall:
pick = list(c)
else:
pick = np.random.sample(list(spots), n_metal2)
base[pick] = 1
checkce = bcm.getTotalCE(base)
if checkce < low:
low = checkce
low_struct = base.copy()
struct_min = low_struct
ce = low
break
else:
struct_min[spots] = 1
n_metal2 -= len(spots)
if not ce:
ce = bcm.getTotalCE(struct_min)
if return_n:
return [struct_min]
return struct_min, ce | 8b4ac05e0abbadc34f154e4917959627600dcf0d | 3,629,911 |
import functools
def _clear_caches_after_call(func):
"""
Clear caches just before returning a value.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
result = func(*args, **kwds)
_clear_caches()
return result
return wrapper | cdc0342230d09d86021aafb23e4ae883dd30fee0 | 3,629,912 |
import os
def is_dicom(filename):
"""Returns True if the file in question is a DICOM file, else False. """
# Per the DICOM specs, a DICOM file starts with 128 reserved bytes
# followed by "DICM".
# ref: DICOM spec, Part 10: Media Storage and File Format for Media
# Interchange, 7.1 DICOM FILE META INFORMATION
if os.path.isfile(filename):
f = open(filename, "rb")
s = f.read(132)
f.close()
pattern = "DICM"
binary_pattern = pattern.encode()
return s.endswith(binary_pattern)
else:
return False | 014c15481224413d6757c950a0888fb60e0f94d5 | 3,629,913 |
import random
import _bisect
def choices(population, weights=None, cum_weights=None, k=1):
"""Return a k sized list of population elements chosen with replacement.
If the relative weights or cumulative weights are not specified,
the selections are made with equal probability.
"""
n = len(population)
if cum_weights is None:
if weights is None:
_int = int
n += 0.0 # convert to float for a small speed improvement
return [population[_int(random.random() * n)] for i in _repeat(None, k)]
cum_weights = list(_accumulate(weights))
elif weights is not None:
raise TypeError('Cannot specify both weights and cumulative weights')
if len(cum_weights) != n:
raise ValueError('The number of weights does not match the population')
bisect = _bisect
total = cum_weights[-1] + 0.0 # convert to float
hi = n - 1
return [population[bisect(cum_weights, random.random() * total, 0, hi)]
for i in _repeat(None, k)] | 1161b6c43fb54b32e54bf3c45d81abfa1c7261d8 | 3,629,914 |
def summarize(text: str) -> str:
"""Summarizes the text (local mode).
:param text: The text to summarize.
:type text: str
:return: The summarized text.
:rtype: str
"""
if _summarizer is None:
load_summarizer()
assert _summarizer is not None
tokenizer = get_summarizer_tokenizer()
prompt = summarizer_prompt.format(text=text)
information = {"prompt_length": len(tokenizer.encode(prompt))}
parameters = format_parameters_to_local(summarizer_parameters, information)
response = _summarizer(prompt, **parameters)
return cut_on_stop(response[0]["generated_text"], summarizer_parameters["stop"]) | cc3fbee1ef27332733915d27758cdcaf045cd2c8 | 3,629,915 |
def linreg(array, dim=None, coord=None):
"""
Compute a linear regression using a least-square method
Parameters
----------
x : xarray.DataArray
The array on which the linear regression is computed
dim : str, optional
The dimension along which the data will be fitted. If not precised,
the first dimension will be used
coord : xarray.Coordinate, optional
The coordinates used to based the linear regression on.
Returns
-------
res : xr.Dataset
A Dataset containing the slope and the offset of the linear regression
"""
linfit = polyfit(array, dim=dim, coord=coord, deg=1)
offset = linfit.sel(degree=0, drop=True)
slope = linfit.sel(degree=1, drop=True)
return xr.Dataset({'offset': offset, 'slope': slope}) | 27ff54eccf92b3e1924316d022aeaac5abdb0bb3 | 3,629,916 |
import pkg_resources
def _gte(version):
""" Return ``True`` if ``pymongo.version`` is greater than or equal to
`version`.
:param str version: Version string
"""
return (pkg_resources.parse_version(pymongo.version) >=
pkg_resources.parse_version(version)) | f92d062d2d2ff37184bd7bd5740c8c586bf3e521 | 3,629,917 |
def read_data(path, format="turtle"):
"""
Read an RDFLib graph from the given path
Arguments:
path (str): path to a graph file
Keyword Arguments:
format (str): RDFLib format string (default="turtle")
Returns:
rdflib.Graph: a parsed rdflib.Graph
"""
g = rdflib.Graph()
g.parse(path, format=format)
return g | 31965e0ccc43d873ce06e248ab9e771227b54aba | 3,629,918 |
def exact_change_recursive(amount,coins):
""" Return the number of different ways a change of 'amount' can be
given using denominations given in the list of 'coins'
>>> exact_change_recursive(10,[50,20,10,5,2,1])
11
>>> exact_change_recursive(100,[100,50,20,10,5,2,1])
4563
"""
assert amount >= 0
if amount==0:
# no amount left -> 1 solution exists with no coins
return 1
elif not coins:
# amount>0, no coins left -> no solution possibe
return 0
else:
current_denomination = coins[0]
remaining_coins = coins[1:]
running_total = 0
solutions_count = 0
# Create solutions with increasing numbers of coins of the current_denomination
while running_total<= amount:
# reaming_amount needs to be achieved with remaining_coins
remaining_amount = amount - running_total
solutions_count += exact_change_recursive(remaining_amount,remaining_coins)
running_total += current_denomination
return solutions_count | f18cd10802ba8e384315d43814fcb1dcd6472d78 | 3,629,919 |
from numpy import diff, where, array
def detectGap(date, gapThres):
"""
Detects gap in a date vector based on the user defined threshold.
Parameters
----------
date: list
Dates in UTCDateTime format to detect gaps within.
gapThres: float
Threshold in seconds over which to detect a gap.
Outputs
---------
gapIndex: list
Indicies of gaps
"""
datediff = array(diff(date))
gapIndex = where(datediff>gapThres)
# Print gap information to screen (or somewhere)
print '%d gaps found of greater than %s seconds' % (len(gapIndex[0]),gapThres)
for i in gapIndex[0]:
gapLength = date[i+1]-date[i]
print 'Gap: %0.4f seconds at %s' % (gapLength, date[i].strftime('%Y-%m-%d_%H:%M:%S'))
return gapIndex[0] | 5cdbeb42d4110469b1a619118e014a0984bdc6c6 | 3,629,920 |
def enum(*sequential, **named):
"""
Enum implementation that supports automatic generation and also supports converting the values
of the enum back to names
>>> nums = enum('ZERO', 'ONE', THREE='three')
>>> nums.ZERO
# 0
>>> nums.reverse_mapping['three']
# 'THREE'
"""
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.items())
enums['reverse_mapping'] = reverse
return type(str('Enum'), (), enums) | 804801e5b94f0e559283deecdc808aea0446fb63 | 3,629,921 |
import warnings
import warnings
from dolo.algos.steady_state import find_steady_state
from dolo.numeric.extern.lmmcp import lmmcp
from dolo.numeric.optimize.newton import newton
def deterministic_solve(
model,
exogenous=None,
s0=None,
m0=None,
T=100,
ignore_constraints=False,
maxit=100,
initial_guess=None,
verbose=True,
solver="ncpsolve",
keep_steady_state=False,
s1=None, # deprecated
shocks=None, # deprecated
tol=1e-6,
):
"""
Computes a perfect foresight simulation using a stacked-time algorithm.
Typical simulation exercises are:
- start from an out-of-equilibrium exogenous and/or endogenous state: specify `s0` and or `m0`. Missing values are taken from the calibration (`model.calibration`).
- specify an exogenous path for shocks `exogenous`. Initial exogenous state `m0` is then first value of exogenous values. Economy is supposed to have been at the equilibrium for $t<0$, which pins
down initial endogenous state `s0`. `x0` is a jump variable.
If $s0$ is not specified it is then set
equal to the steady-state consistent with the first value
The initial state is specified either by providing a series of exogenous
shocks and assuming the model is initially in equilibrium with the first
value of the shock, or by specifying an initial value for the states.
Parameters
----------
model : Model
Model to be solved
exogenous : array-like, dict, or pandas.DataFrame
A specification for the path of exogenous variables (aka shocks). Can be any of the
following (note by "declaration order" below we mean the order
of `model.symbols["exogenous"]`):
- A 1d numpy array-like specifying a time series for a single
exogenous variable, or all exogenous variables stacked into a single array.
- A 2d numpy array where each column specifies the time series
for one of the shocks in declaration order. This must be an
`N` by number of shocks 2d array.
- A dict where keys are strings found in
`model.symbols["exogenous"]` and values are a time series of
values for that shock. For exogenous variables that do not appear in
this dict, the shock is set to the calibrated value. Note
that this interface is the most flexible as it allows the user
to pass values for only a subset of the model shocks and it
allows the passed time series to be of different lengths.
- A DataFrame where columns map shock names into time series.
The same assumptions and behavior that are used in the dict
case apply here
If nothing is given here, `exogenous` is set equal to the
calibrated values found in `model.calibration["exogenous"]` for
all periods.
If the length of any time-series in shocks is less than `T`
(see below) it is assumed that that particular shock will
remain at the final given value for the duration of the
simulation.
s0 : None or ndarray or dict
If vector with the value of initial states
If an exogenous timeseries is given for exogenous shocks, `s0` will be computed as the steady-state value that is consistent with its first value.
T : int
horizon for the perfect foresight simulation
maxit : int
maximum number of iteration for the nonlinear solver
verbose : boolean
if True, the solver displays iterations
tol : float
stopping criterium for the nonlinear solver
ignore_constraints : bool
if True, complementarity constraints are ignored.
keep_steady_state : bool
if True, initial steady-states and steady-controls are appended to the simulation with date -1.
Returns
-------
pandas dataframe
a dataframe with T+1 observations of the model variables along the
simulation (states, controls, auxiliaries). The simulation should return to a steady-state
consistent with the last specified value of the exogenous shocks.
"""
if shocks is not None:
warnings.warn("`shocks` argument is deprecated. Use `exogenous` instead.")
exogenous = shocks
if s1 is not None:
warnings.warn("`s1` argument is deprecated. Use `s0` instead.")
s0 = s1
# definitions
n_s = len(model.calibration["states"])
n_x = len(model.calibration["controls"])
p = model.calibration["parameters"]
if exogenous is not None:
epsilons = _shocks_to_epsilons(model, exogenous, T)
m0 = epsilons[0, :]
# get initial steady-state
start_state = find_steady_state(model, m=m0)
s0 = start_state["states"]
x0 = start_state["controls"]
m1 = epsilons[1, :]
s1 = model.functions["transition"](m0, s0, x0, m1, p)
else:
if s0 is None:
s0 = model.calibration["states"]
if m0 is None:
m0 = model.calibration["exogenous"]
# if m0 is None:
# m0 = np.zeros(len(model.symbols['exogenous']))
# we should probably do something here with the nature of the exogenous process if specified
# i.e. compute nonlinear irf
epsilons = _shocks_to_epsilons(model, exogenous, T)
x0 = model.calibration["controls"]
m1 = epsilons[1, :]
s1 = model.functions["transition"](m0, s0, x0, m1, p)
s1 = np.array(s1)
x1_g = model.calibration["controls"] # we can do better here
sT_g = model.calibration["states"] # we can do better here
xT_g = model.calibration["controls"] # we can do better here
if initial_guess is None:
start = np.concatenate([s1, x1_g])
final = np.concatenate([sT_g, xT_g])
initial_guess = np.row_stack(
[start * (1 - l) + final * l for l in linspace(0.0, 1.0, T + 1)]
)
else:
if isinstance(initial_guess, pd.DataFrame):
initial_guess = np.array(
initial_guess[model.symbols["states"] + model.symbols["controls"]]
)
initial_guess = initial_guess[1:, :]
initial_guess = initial_guess[:, : n_s + n_x]
sh = initial_guess.shape
if model.x_bounds and not ignore_constraints:
initial_states = initial_guess[:, :n_s]
[lb, ub] = [u(epsilons[:, :], initial_states, p) for u in model.x_bounds]
lower_bound = initial_guess * 0 - np.inf
lower_bound[:, n_s:] = lb
upper_bound = initial_guess * 0 + np.inf
upper_bound[:, n_s:] = ub
test1 = max(lb.max(axis=0) - lb.min(axis=0))
test2 = max(ub.max(axis=0) - ub.min(axis=0))
if test1 > 0.00000001 or test2 > 0.00000001:
msg = "Not implemented: perfect foresight solution requires that "
msg += "controls have constant bounds."
raise Exception(msg)
else:
ignore_constraints = True
lower_bound = None
upper_bound = None
det_residual(model, initial_guess, s1, xT_g, epsilons)
if not ignore_constraints:
def ff(vec):
return det_residual(
model, vec.reshape(sh), s1, xT_g, epsilons, jactype="sparse"
)
v0 = initial_guess.ravel()
if solver == "ncpsolve":
sol, nit = ncpsolve(
ff,
lower_bound.ravel(),
upper_bound.ravel(),
initial_guess.ravel(),
verbose=verbose,
maxit=maxit,
tol=tol,
jactype="sparse",
)
else:
sol = lmmcp(
lambda u: ff(u)[0],
lambda u: ff(u)[1].todense(),
lower_bound.ravel(),
upper_bound.ravel(),
initial_guess.ravel(),
verbose=verbose,
)
nit = -1
sol = sol.reshape(sh)
else:
def ff(vec):
ll = det_residual(model, vec.reshape(sh), s1, xT_g, epsilons, diff=True)
return ll
v0 = initial_guess.ravel()
# from scipy.optimize import root
# sol = root(ff, v0, jac=True)
# sol = sol.x.reshape(sh)
sol, nit = newton(ff, v0, jactype="sparse")
sol = sol.reshape(sh)
# sol = sol[:-1, :]
if (exogenous is not None) and keep_steady_state:
sx = np.concatenate([s0, x0])
sol = np.concatenate([sx[None, :], sol], axis=0)
epsilons = np.concatenate([epsilons[:1:], epsilons], axis=0)
index = range(-1, T + 1)
else:
index = range(0, T + 1)
# epsilons = np.concatenate([epsilons[:1,:], epsilons], axis=0)
if "auxiliary" in model.functions:
colnames = (
model.symbols["states"]
+ model.symbols["controls"]
+ model.symbols["auxiliaries"]
)
# compute auxiliaries
y = model.functions["auxiliary"](epsilons, sol[:, :n_s], sol[:, n_s:], p)
sol = np.column_stack([sol, y])
else:
colnames = model.symbols["states"] + model.symbols["controls"]
sol = np.column_stack([sol, epsilons])
colnames = colnames + model.symbols["exogenous"]
ts = pd.DataFrame(sol, columns=colnames, index=index)
return ts | d12b50a53e4c03cba4d1e1980c9b0eac41412311 | 3,629,922 |
def find_check_string_output( # type: ignore
ctx, class_name, method_name, as_python=True, fuzzy_match=False, pbcopy=True
):
"""
Find output of `check_string()` in the test running
class_name::method_name.
E.g., for `TestResultBundle::test_from_config1` return the content of the file
`./core/dataflow/test/TestResultBundle.test_from_config1/output/test.txt`
:param as_python: if True return the snippet of Python code that replaces the
`check_string()` with a `assert_equal`
:param fuzzy_match: if True return Python code with `fuzzy_match=True`
:param pbcopy: save the result into the system clipboard (only on macOS)
"""
_report_task()
_ = ctx
hdbg.dassert_ne(class_name, "", "You need to specify a class name")
hdbg.dassert_ne(method_name, "", "You need to specify a method name")
# Look for the directory named `class_name.method_name`.
cmd = f"find . -name '{class_name}.{method_name}' -type d"
# > find . -name "TestResultBundle.test_from_config1" -type d
# ./core/dataflow/test/TestResultBundle.test_from_config1
_, txt = hsysinte.system_to_string(cmd, abort_on_error=False)
file_names = txt.split("\n")
if not txt:
hdbg.dfatal(f"Can't find the requested dir with '{cmd}'")
if len(file_names) > 1:
hdbg.dfatal(f"Found more than one dir with '{cmd}':\n{txt}")
dir_name = file_names[0]
# Find the only file underneath that dir.
hdbg.dassert_dir_exists(dir_name)
cmd = f"find {dir_name} -name 'test.txt' -type f"
_, file_name = hsysinte.system_to_one_line(cmd)
hdbg.dassert_file_exists(file_name)
# Read the content of the file.
_LOG.info("Found file '%s' for %s::%s", file_name, class_name, method_name)
txt = hio.from_file(file_name)
if as_python:
# Package the code snippet.
if not fuzzy_match:
# Align the output at the same level as 'exp = r...'.
num_spaces = 8
txt = hprint.indent(txt, num_spaces=num_spaces)
output = f"""
act =
exp = r\"\"\"
{txt}
\"\"\".lstrip().rstrip()
self.assert_equal(act, exp, fuzzy_match={fuzzy_match})
"""
else:
output = txt
# Print or copy to clipboard.
_to_pbcopy(output, pbcopy)
return output | 33b8c0d8ebd7399ef63a870f88ddfdfca671b686 | 3,629,923 |
from re import T
def index():
""" Dashboard """
if session.error:
return dict()
mode = session.s3.hrm.mode
if mode is not None:
redirect(URL(f="person"))
# Load Models
s3mgr.load("hrm_skill")
tablename = "hrm_human_resource"
table = db.hrm_human_resource
if ADMIN not in roles:
orgs = session.s3.hrm.orgs or [None]
org_filter = (table.organisation_id.belongs(orgs))
else:
# Admin can see all Orgs
org_filter = (table.organisation_id > 0)
s3mgr.configure(tablename,
insertable=False,
list_fields=["id",
"person_id",
"job_title",
"type",
"site_id"])
s3.filter = org_filter
# Parse the Request
r = s3base.S3Request(s3mgr, prefix="hrm", name="human_resource")
# Pre-process
# Only set the method to search if it is not an ajax dataTable call
# This fixes a problem with the dataTable where the the filter had a
# distinct in the sql which cause a ticket to be raised
if r.representation != "aadata":
r.method = "search"
r.custom_action = human_resource_search
# Execute the request
output = r()
if r.representation == "aadata":
return output
# Post-process
s3.actions = [dict(label=str(T("Details")),
_class="action-btn",
url=URL(f="person",
args=["human_resource"],
vars={"human_resource.id": "[id]"}))]
if r.interactive:
output.update(module_name=response.title)
if session.s3.hrm.orgname:
output.update(orgname=session.s3.hrm.orgname)
response.view = "hrm/index.html"
query = (table.deleted != True) & \
(table.status == 1) & org_filter
ns = db(query & (table.type == 1)).count()
nv = db(query & (table.type == 2)).count()
output.update(ns=ns, nv=nv)
module_name = deployment_settings.modules[module].name_nice
output.update(title=module_name)
return output | a19e62b3c3541b05bb066b54f91b46e9a1566c91 | 3,629,924 |
import requests
import os
import hashlib
def call_movebank_api(params):
"""
Authenticate with Movebank API and return the Response content
"""
response = requests.get('https://www.movebank.org/movebank/service/direct-read',
params=params,
auth=(os.getenv("MBUSER"),
os.getenv("MBPASS"))
)
if response.status_code == 200:
if '📰 License Terms:' in str(response.content):
hash = hashlib.md5(response.content).hexdigest()
params = params + (('license-md5', hash),)
response = requests.get('https://www.movebank.org/movebank/service/direct-read',
params=params,
cookies=response.cookies,
auth=(os.getenv("MBUSER"), os.getenv("MBPASS"))
)
if response.status_code == 403:
print("📛 Incorrect hash")
return ''
return response.content.decode('utf-8')
print(str(response.content))
return '' | 5828508075e4ab09b053b79416b9db11bf1ba6b8 | 3,629,925 |
def Vfun(X, deriv = 0, out = None, var = None):
"""
expected order : r1, r2, R, a1, a2, tau
"""
x = n2.dfun.X2adf(X, deriv, var)
r1 = x[0]
r2 = x[1]
R = x[2]
a1 = x[3]
a2 = x[4]
tau = x[5]
# Define reference values
Re = 1.45539378 # Angstroms
re = 0.96252476 # Angstroms
ae = 101.08194717 # degrees
q1 = (r1 - re) / r1 # Simons-Parr-Finlan coordinates
q2 = (r2 - re) / r2
q3 = (R - Re) / R
q4 = (a1 - ae) * np.pi/180.0 # radians
q5 = (a2 - ae) * np.pi/180.0 # radians
q6 = tau * np.pi/180.0 # radians
# Calculate surface
v = calcsurf([q1,q2,q3,q4,q5,q6]) * n2.constants.Eh
return n2.dfun.adf2array([v], out) | 4b8b666c60900355a8a215728ae53feb37dd8313 | 3,629,926 |
def all_events(number=-1, etag=None):
"""Iterate over public events.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.all_events` instead.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Event <github3.events.Event>`
"""
return gh.all_events(number, etag) | 0b620e00ceffe93b7a6bdf579f031942222d3f10 | 3,629,927 |
def mock_datetime(monkeypatch: MonkeyPatch) -> FakeDatetime:
"""Mocks dt.datetime
Returns:
FakeDatetime(2021, 3, 20)
"""
fake_datetime = FakeDatetime(2021, 3, 20)
fake_datetime.set_fake_now(dt.datetime(2021, 3, 20))
monkeypatch.setattr(dt, "datetime", FakeDatetime)
return fake_datetime | 2b0bbc1b62f80636ea1622fcf4cfa6abfbda929b | 3,629,928 |
def Normalize(v):
"""
Normalizes vectors so length of vector is 1.
Parameters
----------
v : 2D numpy array, floats
Returns
-------
2D numpy array, floats
Normalized v.
"""
norm = np.zeros(v.shape[0])
for i, vector in enumerate(v):
norm[i] = np.linalg.norm(vector)
norm[np.where(norm == 0)[0]] = 1
return v / norm.reshape(norm.size, 1) | f68bf8bfd2999b8755c3ace00154377fa19fe04c | 3,629,929 |
import requests
def get_instance_details(instance_id):
"""
Returns json detail of specific instance on slate
:return: json object of slate instance details
"""
query = {"token": slate_api_token, "detailed": "true"}
instance_detail = requests.get(
slate_api_endpoint + "/v1alpha3/instances/" + instance_id, params=query
)
instance_detail = instance_detail.json()
return instance_detail | 77476be1de353079fdbca2b041b49b604e59929b | 3,629,930 |
def admin_lexers(request):
"""Form to configure lexers for file extensions."""
formset = AdminLexersFormSet.for_config()
if request.method == 'POST':
formset = AdminLexersFormSet.for_config(request.POST)
if formset.is_valid():
formset.save()
messages.success(request, u'Saved configuration')
return redirect('admin_lexers')
context = {
'formset': formset,
'section': admin,
'page_title': u'Configure highlighting',
}
return render(request, 'pasty/admin_lexers.html', context) | ef56353c0d25ebb4b5eb59dd1dad356a443b5a70 | 3,629,931 |
def ignore_module_import_frame(file_name, name, line_number, line):
"""
Ignores the frame, where the test file was imported.
Parameters
----------
file_name : `str`
The frame's respective file's name.
name : `str`
The frame's respective function's name.
line_number : `int`
The line's index where the exception occurred.
line : `str`
The frame's respective stripped line.
Returns
-------
should_show_frame : `bool`
Whether the frame should be shown.
"""
return (file_name != VAMPYTEST_TEST_FILE_PATH) or (name != '_get_module') or (line != '__import__(import_route)') | 048283ec4a6aa0b1e51aadc033c0438ff125b102 | 3,629,932 |
import copy
import os
def validate_args(args):
"""
Validate parameters (args) passed in input through the CLI.
If necessary, perform transformations of parameter values to the simulation space.
:param args: [dict] Parsed arguments.
:return: [dict] Validated arguments.
"""
# note: input data types are already checked by the parser object.
# here we check input values
if args['florida_landfall_rate'] <= 0:
raise ValueError(f"Expect florida_landfall_rate>0, got {args['florida_landfall_rate']}")
if args['florida_mean'] <= 0:
raise ValueError(f"Expect florida_mean>0, got {args['florida_mean']}")
if args['florida_stddev'] <= 0:
raise ValueError(f"Expect florida_stddev>0, got {args['florida_stddev']}")
if args['gulf_landfall_rate'] <= 0:
raise ValueError(f"Expect gulf_landfall_rate>0, got {args['gulf_landfall_rate']}")
if args['gulf_mean'] < 0:
raise ValueError(f"Expect gulf_mean>0, got {args['gulf_mean']}")
if args['gulf_stddev'] < 0:
raise ValueError(f"Expect gulf_stddev>0, got {args['gulf_stddev']}")
if args['simulator_id'] < 0:
raise ValueError(f"Expect simulator_id>=0, got {args['simulator_id']}")
# deepcopy ensures mutable items are copied too
validated_args = copy.deepcopy(args)
# validate parameters
# compute natural log of the LogNormal means
validated_args.update({
"florida_mean": np.log(args['florida_mean']),
"gulf_mean": np.log(args['gulf_mean']),
})
# log validated parameter values
logger.info("Validated parameters: ")
numerical_args = [
"florida_landfall_rate",
"florida_mean",
"florida_stddev",
"gulf_landfall_rate",
"gulf_mean",
"gulf_stddev",
]
for arg_k in numerical_args:
logger.info(f"{arg_k:>30s} = {validated_args[arg_k]:>10.5f}")
if os.getenv("TIMEIT"):
if os.getenv("TIMEIT_LOGFILE"):
logger.info(
f"Found TIMEIT and TIMEIT_LOGFILE: timings will be logged in {os.getenv('TIMEIT_LOGFILE')}")
else:
logger.info("Found TIMEIT: logging timings to the console.")
return validated_args | 50be941326e149a1bc0baa99f688a09b627a68b7 | 3,629,933 |
def knapsack(val,wt,W,n):
"""
Consider W=5,n=4
wt = [5, 3, 4, 2]
val = [60, 50, 70, 30]
So, for any value we'll consider between maximum of taking wt[i] and not taking it at all.
taking 0 to W in column 'line 1' taking wt in rows 'line 2'
two cases ->
* cur_wt<=total wt in that column otherwise matrix[cur_wt][max_wt]= matrix[cur_wt-1][max_wt] # not taking wt[cur_wt] at all
* take matrix[cur_wt][max_wt] = max(matrix[cur_wt-1][max_wt-wt[cur_wt]] , matrix[cur_wt-1][max_wt]) # max b/w not taking wt[cur_wt] & considering it
* ^> means taken just above value
| | 0 | 1 | 2 | 3 | 4 | 5 |
| 5 | 0 | 0 | 0 | 0 | 0 | 60 |
| 3 | ^>0 | ^>0 | ^>0 | max(0,0+50) | 50 | max(0+50,60)=60 |
| 4 | ^>0 | ^>0 | ^>0 | ^50 | max(50,0+70) | max(60,0+70) |
| 2 | ^>0 | ^>0 | max(0,0+30) | max(50,0+30) | max(70,0+30) | max(70,50+30) |
ans = max(70,50+30)=80
"""
matrix = [[0 for i in range(W+1)] for i in range(n)]
for i in range(W+1):
if wt[0]<=i:
matrix[0][i]=val[0]
else:
matrix[0][i]=0
for i in range(1,n): #line 2
for j in range(W+1): # line 1
if j==0:
matrix[i][j]=0
continue
if j>=wt[i]:
matrix[i][j] = max(matrix[i-1][j],matrix[i-1][j-wt[i]]+val[i])
else:
matrix[i][j]=matrix[i-1][j]
return matrix[n-1][W] | d030a57e8c7040cbd1f7a3556f21d599ac049428 | 3,629,934 |
def CNN_model_basic(img_height, img_width,OPTIMIZER):
"""
This is a customized function for generating a Keras model built-in Keras module
with pre-defined parameters and model architecture.
Parameters
-----------------
img_height,img_width = input image dimensions
OPTIMIZER = keras optimizer to be used for the model. More info here:
https://keras.io/api/optimizers/
Returns
------------------
model = the model that can be used to build, train, fit and evaluate on data
Notes
------------------
The model architecture is fixed here that can be changed from the internal
fucntions if needed. Ther purpose is to use same settings across different
models for comparison.
References
-----------------
More information about Keras models:
https://www.tensorflow.org/api_docs/python/tf/keras/Model
"""
model_basic = Sequential([
Conv2D(16, 1, padding='same', activation='relu', input_shape=(img_height, img_width, 1)),
MaxPooling2D(),
Conv2D(64, 5, padding='same', activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.8),
Dropout(0.3),
Dense(1, activation='sigmoid')
])
model_basic.compile(optimizer=OPTIMIZER,
loss='binary_crossentropy',
metrics=['accuracy', 'Precision', 'Recall'])
return model_basic | f1123f2dfd07915d91eb68895a35a67d8f65e99b | 3,629,935 |
def get_filter_df(df, filter_col, targets, greater_than=True):
"""
Filter dataframe based on target column
Returns dataframe
"""
if filter_col in ["transactions", "category"]:
df_filter = get_filter_indicator_df(df, filter_col, targets)
elif filter_col == "rating":
if greater_than:
df_filter = df.loc[df[filter_col] >= targets]
else:
df_filter = df.loc[df[filter_col] <= targets]
else:
df_filter = df.loc[df[filter_col] == targets]
return df_filter | 6e8ad5c9efe65142dcb8d8f842e0b81a2946251b | 3,629,936 |
def filter_graph(graph, n_epochs):
"""
Filters graph, so that no entry is too low to yield at least one sample during optimization.
:param graph: sparse matrix holding the high-dimensional similarities
:param n_epochs: int Number of optimization epochs
:return:
"""
graph = graph.copy()
graph.data[graph.data < graph.data.max() / float(n_epochs)] = 0
graph.eliminate_zeros()
return graph | 04af2804e208b8ce582440b2d0306fe651a583b0 | 3,629,937 |
def create_file_with_maximum_util(folder_file):
"""
from a folder with multiple .xls-files, this function creates a file with maximum values for each traffic counter
based on all .xls-files (ASFINAG format)
:param folder_file: String
:return: pandas.DataFrame
"""
# collect all .xls files as dataframes in a list
# create one reference dataframe where max values are collected
# for each row of these -> extract from each in list -> get max (iterative -- replace max_val if val > curr_max_val)
#
files = [
f
for f in listdir(folder_file)
if isfile(join(folder_file, f)) and (f[-4:] == ".xls" and not f[:4] == "Jahr")
]
processed_files = [
edit_asfinag_file(folder_file + "/" + f[: len(f) - 4]) for f in files
]
# finding largest file and making it to reference file
ind_max_len = np.argmax([len(f) for f in processed_files])
ref_file = processed_files[ind_max_len]
old_ref_file = ref_file.copy()
# unique keys: 'Unnamed: 0', 'Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4','Unnamed: 6'
highways = ref_file["Unnamed: 0"].to_list()
locations = ref_file["Unnamed: 2"].to_list()
traffic_counter_ids = ref_file["Unnamed: 3"].to_list()
direction = ref_file["Unnamed: 4"].to_list()
car_type = ref_file["Unnamed: 6"].to_list()
indices = ref_file.index.to_list()
for ij in range(0, len(ref_file)):
hw = highways[ij]
lc = locations[ij]
tc = traffic_counter_ids[ij]
dir = direction[ij]
ct = car_type[ij]
ind = indices[ij]
current_max_val = -1
for file in processed_files:
curr_extract_f = file[
(file["Unnamed: 0"] == hw)
& (file["Unnamed: 2"] == lc)
& (file["Unnamed: 3"] == tc)
& (file["Unnamed: 4"] == dir)
& (file["Unnamed: 6"] == ct)
]
if len(curr_extract_f) > 0:
if curr_extract_f["Kfz/24h"].to_list()[0] > current_max_val:
current_max_val = curr_extract_f["Kfz/24h"].to_list()[0]
ref_file.at[ind, "Kfz/24h"] = current_max_val
file_with_max_vals = ref_file.copy()
return file_with_max_vals | 67a41549d00f4230b24fe0c45a5b4184912fe3ff | 3,629,938 |
def join_detectionlimit_to_value(df, **kwargs):
"""Put sign and numeric value together. For example: "<" + "100" = "<100")."""
df['Value'] = np.where(df['Value_sign'].isnull(), df['Value_num'], df['Value_sign'] + df['Value_num'].astype(str))
return df | d8b83ff4412408379e3c37a94b5c8b98eb8e129e | 3,629,939 |
def read_data(datapath, metadatapath, label_key='Schizophrenia'):
"""read_data
:param datapath: path to data file (gene)
:param metadatapath: path to meta data file of patients
:output x: data of shape n_patient * n_features
:output y: label of shape n_patients, label[i] == 1 means that the patient has label_key.
:output patient_id: list of patient id
:output feature_name: list of feature name
Read data from the 2 above files.
A few notes:
- Some patients exist in one file but not in another, they are removed
- Some patients in data file has more than one row in the metadata file, only one is kept (assume that the diagnosis is the same for both rows)
"""
# Reading data file
csv_file = pd.read_csv(datapath, u'\t', header=None)
data = csv_file.values
patient_id = data[0,1:]
feature_name = data[1:,0]
x = data[1:,1:].astype(np.float32).T
n_patient = x.shape[0]
# Reading metadata file
csv_file = pd.read_csv(metadatapath, header=None)
metadata = csv_file.values
header = metadata[0,:]
metadata = metadata[1:,:]
individualID = metadata[:, np.where(header == 'individualID')].flatten()
diagnosis = metadata[:, np.where(header == 'diagnosis')].flatten()
y = np.zeros((n_patient, ), dtype=np.int32) - 1
# Match metadata with rows in data
mismatch = []
for i in range(n_patient):
patient_id_i = patient_id[i]
if patient_id_i[0:4] == 'X201':
patient_id_i = patient_id_i[1:5] + '-' + patient_id_i[6:]
metadata_index = np.where(individualID == patient_id_i)[0]
if (metadata_index.size == 0):
mismatch += [patient_id_i]
continue
else:
metadata_index = metadata_index[0]
if (diagnosis[metadata_index] != label_key):
y[i] = 0
else:
y[i] = 1
x = x[y != -1, :]
patient_id = patient_id[y != -1]
y = y[y != -1]
return x, y, patient_id, feature_name | 5c0d72f6d4f0ae75befe6bb760ee6a1034cc85a3 | 3,629,940 |
def show(tournament, match_id):
"""Retrieve a single match record for a tournament."""
return api.fetch_and_parse(
"GET",
"tournaments/%s/matches/%s" % (tournament, match_id)) | ef168fd4c7ab06e0091bb9797ef953e1ec720a45 | 3,629,941 |
import json
def get_Frequency(ids):
"""
Restituisce la frequenze presente sul DB con un ID specifico
"""
db = Database()
db_session = db.session
data = db_session.query(db.frequency).filter(db.frequency.id == ids).all()
data_dumped = json.dumps(data, cls=AlchemyEncoder)
db_session.close()
return data_dumped | 0f07c926626f0c5eb6fb6cdf14c81469fff6a108 | 3,629,942 |
import torch
def ycbcr_to_rgb_jpeg(image):
""" Converts YCbCr image to RGB JPEG
Input:
image(tensor): batch x height x width x 3
Outpput:
result(tensor): batch x 3 x height x width
"""
matrix = np.array(
[[1., 0., 1.402], [1, -0.344136, -0.714136], [1, 1.772, 0]],
dtype=np.float32).T
shift = [0, -128, -128]
result = torch.tensordot(image + shift, matrix, dims=1)
#result = torch.from_numpy(result)
result.view(image.shape)
return result.permute(0, 3, 1, 2) | 3dcd8aaa32d7d558e8aa27a5f7f65ee85a105941 | 3,629,943 |
from sys import path
def gen_dist_train_test(train_df, test_df, pivot_table, gen_se_dist, gen_pro_cli_dist, external_info):
"""generate dist information on training data, merge the distribution with both training data and test data
The Data flow should look like this:
train_df ==> pivot_table ==> gen_se_dist ====> get_dist_info ==> merge info(train and test)
| | |
| | |
| ==> create_lag ====|
| |
| |
==> gen_pro_cli_dist ==============|
| |
| |
==> external_info =================|
So after these process, the train_df.shape[0] will decrease, while test_df.shape[0] will not change
"""
df_cli_pro_dist = gen_pro_cli_dist(train_df)
train_pivot = pivot_table(train_df)
df_se_pro_cli_dist, df_se_pro_dist, df_se_cli_dist = gen_se_dist(train_pivot)
df_lag = create_lag(train_pivot)
dist_list = [df_cli_pro_dist, df_lag, df_se_pro_cli_dist, df_se_pro_dist, df_se_cli_dist]
for dist in dist_list:
dist_join_key = [ele for ele in dist.columns if ele.split("_")[0] != "label"]
train_df = train_df.merge(dist, on = dist_join_key, how = 'left')
test_df = test_df.merge(dist, on = dist_join_key, how = 'left')
town_state, producto_tabla = external_info(path)
train_df = train_df.merge(town_state, on = "Agencia_ID", how = 'left')
train_df = train_df.merge(producto_tabla, on = "Producto_ID", how = 'left')
test_df = test_df.merge(town_state, on = "Agencia_ID", how = 'left')
test_df = test_df.merge(producto_tabla, on = "Producto_ID", how = 'left')
train_df.drop(["Semana", "Agencia_ID", "Canal_ID", "Ruta_SAK", "Cliente_ID", "Producto_ID", "label"], axis = 1, inplace = True)
train_df.drop_duplicates(inplace = True)
test_df.drop(["Semana", "Agencia_ID", "Canal_ID", "Ruta_SAK", "Cliente_ID", "Producto_ID"], axis = 1, inplace = True)
return train_df, test_df | 35ec8b30679d41a53b88d07d98262adeecb32d7f | 3,629,944 |
def get_global_step(hparams):
"""Returns the global optimization step."""
step = tf.to_float(tf.train.get_or_create_global_step())
multiplier = hparams.optimizer_multistep_accumulate_steps
if not multiplier:
return step
return step / tf.to_float(multiplier) | 82440d93ecae202ced3fbbc98e7d0033e9fc0af4 | 3,629,945 |
import os
def search_file():
"""
Fonction effectuant une recherche récursive dans les dossiers de l'utilisateur
à l'aide de la commande LINUX 'find'
"""
if "username" not in session:
return redirect("/")
if request.method == "POST":
search = request.form['sb']
res=[]
res2=[]
[folders, files] = files_rec(session["chemin"], default_dir+"/"+session["username"]+"/")
for i in files:
if search in i:
res.append(i)
for i in folders:
if search in i:
res2.append(i)
path_show = session["chemin"].replace(session["default_dir"], "")
if path_show == "": path_show = "/"
size_list=[convert_octets(os.path.getsize(session["chemin"]+"/"+i)) for i in res]
return render_template('index.html', files=res, folders=res2, path=path_show, username=session["username"], size_list=size_list)
else:
return redirect("/") | 47d97dce7ff4d31a1c3d6328e95b36887bc4f8e4 | 3,629,946 |
def getValues(astr, begInd=0):
"""
Extracts all values (zero or more) for a keyword.
Inputs:
astr: the string to parse
begInd: index of start, must point to "=" if the keyword has any values
or ";" if the keyword has no values. Initial whitespace is skipped.
Returns a duple consisting of:
a tuple of values (empty if there are no values)
the index of the beginning of the next keyword, or None if end of string
Exceptions:
If astr[begInd] is not "=" or ";" then raises a SyntaxError
"""
if begInd is None:
return ((), None)
mo = _StartRE.match(astr, begInd)
if mo is None:
raise SyntaxError("cannot find value(s) starting at %d in :%s:" % \
(begInd, astr))
sepChar = mo.group('first')
nextInd = mo.start('next')
if nextInd < 0:
# no values and line finished
return ((), None)
if sepChar == ';':
# no values; line not finished
return ((), nextInd)
valueList = []
prevInd = nextInd
# print "data = :%s:, begInd = %d" % (astr, begInd)
while True:
# print "scanning :%s:, i.e. nextInd = %d" % (astr[nextInd:], nextInd)
nextIsKey = False
if astr[nextInd] in "\'\"":
# value is a delimited string
# print "looking for a delimited string"
(value, nextInd) = GetString.getString(astr, nextInd)
valueList.append(value)
elif astr[nextInd] != ';':
# value is an undelimited word (e.g. a number, NaN, etc.)
# print "looking for an undelimited word starting at %d" % (nextInd)
mo = _UndelimWordRE.match(astr, nextInd)
if mo is None:
raise SyntaxError("cannot find an undelimited word starting at %d in :%s:" % \
(nextInd, astr))
value = mo.group('str')
nextInd = mo.start('next')
if (nextInd < 0):
nextInd = None
valueList.append(value)
# print "valueList =", valueList, "nextInd =", nextInd,
# if nextInd is not None:
# print "char at nextInd =", astr[nextInd]
# else:
# print ""
if nextInd is None:
# done with line
break
# nextInd points to comma or semicolon
if astr[nextInd] == ';':
nextIsKey = True
elif astr[nextInd] != ',':
print("bug; expected comma or semicolon as next token; giving up on line")
nextInd = None
break
if (nextInd <= prevInd) and not nextIsKey:
print("bug: nextInd = %d <= prevInd = %d" % (nextInd, prevInd))
nextInd = None
break
# find index of next character
for ind in range(nextInd+1, len(astr)):
if astr[ind] not in ' \t':
nextInd = ind
break
else:
print("ignoring separator \"%s\" at end of data :%s:" % \
(astr[nextInd], astr))
nextInd = None
break
if nextInd >= len(astr):
break
if nextIsKey:
break
prevInd = nextInd
return (tuple(valueList), nextInd) | adf37a9e8ef31ea5ff09d3c33836243cc2f0f49d | 3,629,947 |
def rev_find(revs, attr, val):
"""Search from a list of TestedRev"""
for i, rev in enumerate(revs):
if getattr(rev, attr) == val:
return i
raise ValueError("Unable to find '{}' value '{}'".format(attr, val)) | 6b9488023d38df208013f51ed3311a28dd77d9b8 | 3,629,948 |
def untempering(p):
"""
see https://occasionallycogent.com/inverting_the_mersenne_temper/index.html
>>> mt = MersenneTwister(0)
>>> mt.tempering(42)
168040107
>>> untempering(168040107)
42
"""
e = p ^ (p >> 18)
e ^= (e << 15) & 0xEFC6_0000
e ^= (e << 7) & 0x0000_1680
e ^= (e << 7) & 0x000C_4000
e ^= (e << 7) & 0x0D20_0000
e ^= (e << 7) & 0x9000_0000
e ^= (e >> 11) & 0xFFC0_0000
e ^= (e >> 11) & 0x003F_F800
e ^= (e >> 11) & 0x0000_07FF
return e | 4118b55fd24008f9e96a74db937f6b41375484c3 | 3,629,949 |
def single_run(var='dt', val=1e-1, k=5, serial=True):
"""
A simple test program to do PFASST runs for the heat equation
"""
# initialize level parameters
level_params = dict()
level_params[var] = val
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'IE' # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['QE'] = 'PIC'
problem_params = {
'Vs': 100.,
'Rs': 1.,
'C1': 1.,
'Rpi': 0.2,
'C2': 1.,
'Lpi': 1.,
'Rl': 5.,
}
# initialize step parameters
step_params = dict()
step_params['maxiter'] = k
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = log_data
controller_params['use_extrapolation_estimate'] = True
controller_params['use_embedded_estimate'] = True
controller_params['mssdc_jac'] = False
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = piline # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params
# set time parameters
t0 = 0.0
if var == 'dt':
Tend = 30 * val
else:
Tend = 2e1
if serial:
num_procs = 1
else:
num_procs = 30
# instantiate controller
controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
e_extrapolated = np.array(sort_stats(filter_stats(stats, type='e_extrapolated'), sortby='time'))[:, 1]
results = {
'e_embedded': sort_stats(filter_stats(stats, type='e_embedded'), sortby='time')[-1][1],
'e_extrapolated': e_extrapolated[e_extrapolated != [None]][-1],
var: val,
}
return results | eba87530288fd600aec0ea349794d2b123c33ba4 | 3,629,950 |
def tofloat(img):
"""
Convert a uint8 image to float image
:param img: numpy image, uint8
:return: float image
"""
return img.astype(np.float) / 255 | 8e51259e478d30c8cfa01fb397ba022ca071c018 | 3,629,951 |
def get_session() -> requests_cache.CachedSession:
"""Convenience function that returns request-cache session singleton."""
if not hasattr(get_session, "session"):
get_session.session = requests_cache.CachedSession(
cache_name=str(CACHE_PATH), expire_after=518_400 # 6 days
)
adapter = HTTPAdapter(max_retries=3)
get_session.session.mount("http://", adapter)
get_session.session.mount("https://", adapter)
return get_session.session | d2b5f1be76c4a35adbede1a1bb280bf9ad43b06e | 3,629,952 |
def parse_html(html):
"""
Take a string that contains HTML and turn it into a Python object structure
that can be easily compared against other HTML on semantic equivalence.
Syntactical differences like which quotation is used on arguments will be
ignored.
"""
parser = Parser()
parser.feed(html)
parser.close()
document = parser.root
document.finalize()
# Removing ROOT element if it's not necessary
if len(document.children) == 1 and not isinstance(document.children[0], str):
document = document.children[0]
return document | 923cec59495b9e5e80c00e8af532040ec49a95fe | 3,629,953 |
from typing import List
def get_atomic_num_one_hot(atom: RDKitAtom,
allowable_set: List[int],
include_unknown_set: bool = True) -> List[float]:
"""Get a one-hot feature about atomic number of the given atom.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
allowable_set: List[int]
The range of atomic numbers to consider.
include_unknown_set: bool, default False
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A one-hot vector of atomic number of the given atom.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(atom.GetAtomicNum() - 1, allowable_set,
include_unknown_set) | e323aa738321970ff49f555a41dea66d5280c328 | 3,629,954 |
from datetime import datetime
def ds_to_1Darr(varname,ds,srate='reg',dataw='notrend'):
"""
var is a string, varibale name from the ds
# choose how much data wrangling to do : remove mean or remove mean and trend
dataw = 'nomean' or 'notrend'
# choose the sampling rate: raw/unchanged or regular 10/9 days
srate = reg' or 'raw'
"""
# select var data
var = ds[varname]
# select top 150m
var = var.isel(N_LEVELS=slice(-100,-1))
# avg over depth
var_avg = var.mean(dim='N_LEVELS',skipna=True).values
# create lists of date in readable format
date_list = []
date_obj = []
for profile in ds.N_PROF:
date_str = ds.mon_day_yr.values[profile].decode("utf-8")
time_str = ds.hh_mm.values[profile].decode("utf-8")
date_list.append(date_str + '/' + time_str)
date_obj.append(datetime.datetime.strptime(date_str + '/' + time_str, '%m/%d/%Y/%H:%M'))
# make arrays for the time in days and hours
time_days=np.zeros(len(date_obj))
time_days[0] = 0
for i in range(len(date_obj)-1):
time_days[i+1] = (date_obj[i+1]-date_obj[0]).days
time_hours = time_days*24.
# compute the interval in time between each data point
inter = np.diff(time_days)
# inter = np.zeros(len(date_obj)-1)
# for i in range(len(date_obj)-1):
# inter[i] = (date_obj[i+1]-date_obj[i]).days
# find the index where the first value close to 10 is (there all only 5s near the start)
ind = np.where(inter >5)[0][0] # MODIFY IF CHANGE FILE
# compute a new list of dates to have a regular interval
date_obj_10 = date_obj.copy()
var_avg_10 = var_avg.copy()
for t in np.arange(ind-2,0,-2):
del date_obj_10[t]
# same for o2
var_avg_10=np.delete(var_avg_10, np.arange(ind-2,0,-2))
time_days_10=np.zeros(len(date_obj_10))
time_days_10[0] = 0
for i in range(len(date_obj_10)-1):
time_days_10[i+1] = (date_obj_10[i+1]-date_obj_10[0]).days
time_hours_10 = time_days_10*24.
# apply parameter
if srate == 'raw':
pass
elif srate == 'reg':
var_avg = var_avg_10
time_days = time_days_10
else:
print('Choice of srate is not valid.')
#remove the nan values from dic and o2 and time
#inds = np.where(~np.isnan(dic_avg))[0]
if varname == 'DIC_LIAR':
ind_end = 149 #MODIFY IF CHANGE FILE
var_avg = var_avg[:ind_end]
time_days = time_days[:ind_end]
# remove the mean from the data
var_mean = np.nanmean(var_avg)
var_anom = var_avg - var_mean
# find the least squares regression line
slope, intercept, r_value, p_value, std_err = stats.linregress(time_days,var_avg)
y = intercept + slope*time_days
if dataw == 'notrend':
# remove the trend from the data
var_anom = var_avg - y
return inter, var_anom, time_days, var_avg | 4d5faa04a7c1a0bc0fdf29c3be0fe8a534953615 | 3,629,955 |
import inspect
def fs_check(**arguments):
"""Abstracts common checks over your file system related functions.
To reduce the boilerplate of expanding paths, checking for existence or ensuring non empty values.
Checks are defined for each argument separately in a form of a set
e.g
@fs_check(path={'required', 'exists', 'expand'})
@fs_check(path1={'required', 'exists', 'expand'}, path2={'required', 'exists', 'expand'})
Available checks:
- `required`: ensures the argument is passed with a non-empty value.
- `expand` : expands the tilde `~` in path.
- `exists` : the path exists.
- `file` : the path is a file.
- `dir` : the path is a dir.
"""
for argument, validators in arguments.items():
if not isinstance(validators, set):
raise ValueError(f"Expected tuple of validators for argument {argument}")
for validator in validators:
if validator not in {"required", "exists", "file", "dir", "expand"}:
raise ValueError(f"Unsupported validator '{validator}' for argument {argument}")
def decorator(func):
signature = inspect.signature(func)
for argument in arguments:
if signature.parameters.get(argument) is None:
raise j.exceptions.Value(f"Argument {argument} not found in function declaration of {func.__name__}")
def wrapper(*args, **kwargs):
args = list(args)
position = 0
for parameter in signature.parameters.values():
if parameter.name in arguments:
value = args[position] if position < len(args) else kwargs[parameter.name]
if isinstance(value, str):
value = expanduser(expandvars(value))
if position < len(args):
args[position] = value
else:
kwargs[parameter.name] = value
validators = arguments[parameter.name]
if value and validators.intersection({"exists", "file", "dir"}) and not exists(value):
msg = f"Argument {parameter.name} in {func.__name__} expects an existing path value! {value} does not exist."
raise j.exceptions.Value(msg)
if "required" in validators and (value is None or value.strip() == ""):
raise j.exceptions.Value(
f"Argument {parameter.name} in {func.__name__} should not be None or empty string!"
)
if "required" in validators:
value = norm_path(value)
if position < len(args):
args[position] = value
else:
kwargs[parameter.name] = value
if value and validators.intersection({"file"}) and not isfile(value):
raise j.exceptions.Value(
f"Argument {parameter.name} in {func.__name__} expects a file path! {value} is not a file."
)
if value and validators.intersection({"dir"}) and not isdir(value):
raise j.exceptions.Value(
f"Argument {parameter.name} in {func.__name__} expects a directory path! {value} is not a directory."
)
position += 1
return fun(*args, **kwargs)
return wrapper
return decorator | 655e9eacc5557e1d71e301f9672ea8013450dca3 | 3,629,956 |
def old_pgp_edition(editions):
"""output footnote and source information in a format similar to
old pgp metadata editor/editions."""
if editions:
# label as translation if edition also supplies translation;
# include url if any
edition_list = [
"%s%s%s"
% (
"and trans. " if Footnote.TRANSLATION in fn.doc_relation else "",
fn.display().strip("."),
" %s" % fn.url if fn.url else "",
)
for fn in editions
]
# combine multiple editons as Ed. ...; also ed. ...
return "".join(["Ed. ", "; also ed. ".join(edition_list), "."])
return "" | 633fd75c82d02893e82bebb1d1d8fe3ba1d19c72 | 3,629,957 |
def ConstructApiDef(api_name,
api_version,
is_default,
base_pkg='googlecloudsdk.third_party.apis'):
"""Creates and returns the APIDef specified by the given arguments.
Args:
api_name: str, The API name (or the command surface name, if different).
api_version: str, The version of the API.
is_default: bool, Whether this API version is the default.
base_pkg: str, Base package from which generated API files are accessed.
Returns:
APIDef, The APIDef created using the given args.
"""
# pylint:disable=protected-access
api_name, _ = apis_internal._GetApiNameAndAlias(api_name)
client_cls_name = _CamelCase(api_name) + _CamelCase(api_version)
class_path = '{base}.{api_name}.{api_version}'.format(
base=base_pkg, api_name=api_name, api_version=api_version,)
common_fmt = '{api_name}_{api_version}_'
client_cls_path_fmt = common_fmt + 'client.{api_client_class}'
client_cls_path = client_cls_path_fmt.format(api_name=api_name,
api_version=api_version,
api_client_class=client_cls_name)
messages_mod_path_fmt = common_fmt + 'messages'
messages_mod_path = messages_mod_path_fmt.format(api_name=api_name,
api_version=api_version)
return apis_map.APIDef(class_path, client_cls_path,
messages_mod_path, is_default) | b7b73e386d97d9195c64f6d3de9d642c0708fb9c | 3,629,958 |
def L2struct_array(L,dtype={'names':('score','col','S_init','tree'),'formats':('f4','S10000','S10000','S10000')}):
"""
Convert list output from extract_elite to structured Numpy array.
Contracts initial conditions string with tree string. Converts column string to int (after removing brackets).
Inputs:
L: list of short lists, each containing score, col, S_init and tree.
"""
return np.array([ (e[0], int(e[1].strip("()")), "%s %s"%(e[2], e[3]) ) for e in L ], dtype = [('score', 'f4'), ('col','i4'), ('ic_tree','S10000') ]) | b53982b190ae392db4187073247ebe5a94c7a19f | 3,629,959 |
def build_model(x_train_text, x_train_numeric, **kwargs):
"""Build TF model."""
max_features = 5000
sequence_length = 100
encoder = preprocessing.TextVectorization(max_tokens=max_features,
output_sequence_length=sequence_length)
encoder.adapt(x_train_text.values)
normalizer = preprocessing.Normalization()
normalizer.adapt(x_train_numeric.values)
text_input = tf.keras.Input(shape=(None,), name='text', dtype='string')
embedded = encoder(text_input)
embedded = layers.Embedding(input_dim=max_features, output_dim=128)(embedded)
# LSTM doesn't improved performance
# embedded = layers.LSTM(128)(embedded)
embedded = layers.GlobalAveragePooling1D()(embedded)
numeric_shape = x_train_numeric.shape[1:]
numeric_input = tf.keras.Input(shape=numeric_shape, name='numeric')
normalized = normalizer(numeric_input)
if 'only_numeric' in kwargs and kwargs['only_numeric']:
print('\nBuilding TF model with only numeric data ...')
inputs = numeric_input
x = normalized
elif 'only_text' in kwargs and kwargs['only_text']:
print('\nBuilding TF model with only text data ...')
inputs = text_input
x = embedded
else:
print('\nBuilding TF model with both numeric and text data ...')
inputs = [text_input, numeric_input]
x = layers.concatenate([embedded, normalized])
print('#' * 65)
x = layers.Dropout(0.3)(x)
x = layers.Dense(256, activation='relu')(x)
x = layers.Dropout(0.3)(x)
output = layers.Dense(1)(x)
model = tf.keras.Model(inputs=inputs, outputs=output)
model.compile(optimizer='adam', loss='mse', metrics=['mape', 'mae'])
return model | 2ddc501d1a73527b57da20cb90ce8d47d82f1996 | 3,629,960 |
def find_group(name):
"""Make a special case of finding a group.
NB This uses ambiguous name resolution so only use it for a casual match
"""
return root().find_group(name) | d69646dba11c9b7925ac403453100b13ea874a98 | 3,629,961 |
def multiplicar(a, b):
"""
MULTIPLICAR realiza la multiplicacion de dos numeros
Parameters
----------
a : float
Valor numerico `a`.
b : float
Segundo valor numerico `b`.
Returns
-------
float
Retorna la suma de `a` + `b`
"""
return a*b | 2d1a56924e02f05dcf20d3e070b17e4e602aecf6 | 3,629,962 |
import pkg_resources
def email_vertices():
"""Return the email_vertices dataframe
Contains the following fields:
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 1005 non-null int64
1 dept 1005 non-null int64
"""
stream = pkg_resources.resource_stream(__name__, 'data/email_vertices.csv')
return pd.read_csv(stream) | 5ccba798717b69c367befb4bf3ab92d9d2fab37a | 3,629,963 |
def pascal_row(n):
"""returns the pascal triangle row of the given integer n"""
def triangle(n, lst):
if lst ==[]:
lst = [[1]]
if n == 1:
return lst
else:
oldRow = lst[-1]
def helpRows(lst1, lst2):
if lst1 == [] or lst2 == []:
return []
return [(lst1[0], lst2[0])] + helpRows(lst1[1:], lst2[1:])
def summation(lst):
if lst == []:
return []
return [sum(lst[0])] + summation(lst[1:])
newRow = [1] + summation(helpRows(oldRow, oldRow[1:])) + [1]
return triangle(n - 1, lst + [newRow])
return triangle(n + 1, [])[n] | 030fe3e574f4261c862a882e7fdeee836a1dffb7 | 3,629,964 |
def _apply_homography(H: np.ndarray, vdata: np.ndarray) -> tuple :
"""
Apply a homography, H, to pixel data where only v of (u,v,1) is needed.
Apply a homography to pixel data where only v of the (u,v,1) vector
is given. It is assumed that the u coordinate begins at 0.
The resulting vector (x,y,z) is normalized by z to find (x,y,1)
"""
if not isinstance(H,np.ndarray):
raise TypeError("H must be a numpy array")
elif not (H.shape==(3,3)):
raise ValueError("H must have shape (3,3)")
elif not isinstance(vdata,np.ma.MaskedArray):
raise TypeError("vdata must be a numpy masked array")
elif not (len(vdata.shape)==2):
raise ValueError("vdata must have two dimensions")
# build stack of (u,v,1) vectors
N,M = vdata.shape
u, v = np.arange(0,M,1), np.arange(0,N,1)
udata = np.ma.array(np.meshgrid(u,v)[0] ,mask=vdata.mask)
wdata = np.ma.array(np.ones(vdata.shape),mask=vdata.mask)
data = np.ma.stack((udata.ravel(),vdata.ravel(),wdata.ravel()),axis=-1).T
# apply H but ignore columns which have any masked values
valid_data = np.matmul(H,np.ma.compress_cols(data))
# normalize by the second index
for i in range(3):
valid_data[i,:] = np.divide(valid_data[i,:],valid_data[2,:])
# extract valid values into array with original shape
idx = np.ma.array(np.arange(data.shape[1]),mask=vdata.ravel().mask)
valid_idx = np.ma.compressed(idx)
data = np.zeros((2,data.shape[1]))
data[0,valid_idx] = valid_data[0,:]
data[1,valid_idx] = valid_data[1,:]
data = data.reshape(2,N,M)
return np.ma.array(data[0,:,:],mask=vdata.mask), \
np.ma.array(data[1,:,:],mask=vdata.mask) | 3f83dc9da32d03da8c35aabbdee8603264b31918 | 3,629,965 |
import logging
def asts(repo):
"""A dict {filename: ast} for all .py files."""
asts = {}
for src_fn, src in repo._calc('source_contents').iteritems():
try:
ast = pyast.parse(src)
except:
#if their code does not compile, ignore it
#TODO should probably be more strict against this,
#could really throw off num_ast-relative features
#maybe don't consider repos with non-compiling code?
logging.exception("file %s/%s does not compile",
repo.name, src_fn)
else:
#otherwise, include it
asts[src_fn] = ast
return asts | 46c912fd48832c68a0a441e33cf249a0fff14951 | 3,629,966 |
import re
def is_matching_layer(layer):
"""Returns true if the name of the given layer meets the criteria for
processing."""
return (
re.match(LAYER_PREFIX_TO_MATCH, layer.name()) and
re.match(f'.*{LAYER_SUBSTRING_TO_MATCH}.*', layer.name()) and
re.search(SUFFIX_CLEANABLE, layer.name()) and
not re.search(SUFFIX_CLEANED, layer.name())) | e07c271f99db60ce06bff5d39ee91478937d272e | 3,629,967 |
def parse_call_no(field: Field, library: str) -> namedtuple:
"""
Parses call number data per each system rules
Args:
field: call number field, instance of pymarc.Field
library: library system
Returns:
"""
if library == "bpl":
callNo_data = parse_bpl_call_no(field)
elif library == "nypl":
callNo_data = parse_nypl_call_no(field)
return callNo_data | f5c787acfaa360315a4f49bf825410762d812e51 | 3,629,968 |
from re import A
def ni(num,tem):
""" num: density cm^-3 """
b = zeros( Aij.shape[0] + 2 , dtype='float64')
b[-1] = 10
# this line is REALLY STUPID, but for some pointless reason linalg experiences
# precision errors and thinks that A is singular when it is obviously not.
if tem > 30:
myni = dot(linalg.pinv(A(num,tem)),b)
else:
scale = 1e6
myni = linalg.solve( scale * (A(num,tem)) , b.T) / scale
# myni = linalg.solve((A(num,tem)[:-1,:-1]), ones(Aij.shape[0]+1))
return abs(myni)[:-1] | 83e5d918752c1241ce5e6917824526f5726edad2 | 3,629,969 |
def _parse_example_configuration(config, regexps):
"""
Parse configuration lines against a set of comment regexps
Args:
config(_io.TextIOWrapper): Example configuration file to parse
regexps(dict[str, list[tuple[re.__Regex, str]]]):
Yields:
str: Parsed configuration lines
"""
# What section are we currently in?
in_section = None
def iter_regexps(_line):
nonlocal regexps
nonlocal in_section
for section, sect_cfg in regexps.items():
sect_regex = sect_cfg[0][0] # type: re.__Regex
if sect_regex.match(line):
in_section = section
return line.lstrip('#')
raise RuntimeError('No section opening regexps matched')
def iter_section_regexps(_line):
nonlocal regexps
nonlocal in_section
for key_regex, kv in regexps[in_section]:
# Skip the section regex
if kv is None:
continue
# Does the key match?
if key_regex.match(_line):
return "{key} = {value}\n".format(key=kv[0], value=kv[1])
raise RuntimeError('No key=value regexps matched')
for line in config:
# Are we in a section yet?
if in_section:
try:
yield iter_section_regexps(line)
except RuntimeError:
# No key=value match? Are we venturing into a new section?
try:
yield iter_regexps(line)
except RuntimeError:
# Still nothing? Return the unprocessed line then
yield line
# Not in a section yet? Are we venturing into our first one then?
else:
try:
yield iter_regexps(line)
except RuntimeError:
# Not yet? Return the unprocessed line then
yield line | 86ff5db080f12dc624feb4ce20dcf659dcc2cb49 | 3,629,970 |
def readCosmicRayInformation(lengths, totals):
"""
Reads in the cosmic ray track information from two input files.
Stores the information to a dictionary and returns it.
:param lengths: name of the file containing information about the lengths of cosmic rays
:type lengths: str
:param totals: name of the file containing information about the total energies of cosmic rays
:type totals: str
:return: cosmic ray track information
:rtype: dict
"""
crLengths = np.loadtxt(lengths)
crDists = np.loadtxt(totals)
return dict(cr_u=crLengths[:, 0], cr_cdf=crLengths[:, 1], cr_cdfn=np.shape(crLengths)[0],
cr_v=crDists[:, 0], cr_cde=crDists[:, 1], cr_cden=np.shape(crDists)[0]) | 3d20f9e4763050169008875eb12195ec135030f4 | 3,629,971 |
import re
def get_package_version():
"""get version from top-level package init"""
version_file = read('pyshadoz/__init__.py')
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.') | 35d16c607ccbf9b0102e47cccf77e532269aea38 | 3,629,972 |
def setup_default_abundances(filename=None):
"""
Read default abundance values into global variable.
By default, data is read from the following file:
https://hesperia.gsfc.nasa.gov/ssw/packages/xray/dbase/chianti/xray_abun_file.genx
To load data from a different file, see Notes section.
Parameters
----------
filename: `str` (optional)
URL or file location of the .genx abundance file to be used.
"""
if filename:
with manager.override_file("xray_abundance", uri=filename):
return load_xray_abundances()
else:
return load_xray_abundances() | b024fa2517b5578f8ee077eecd00fecf6583819d | 3,629,973 |
def resolve_conflicts2_next(pid):
"""
update page number to db
update kapr to db
flush related cache in redis
"""
user = current_user
assignment_id = pid + '-' + user.username
# find if this project exist
assignment = storage_model.get_conflict_project(mongo=mongo, username=user.username, pid=pid)
if not assignment:
return page_not_found('page_not_found')
# increase page number to db
storage_model.increase_conflict_page_pairidx(mongo=mongo, username=user.username, pid=pid)
# update kapr to db
KAPR_key = assignment_id + '_KAPR'
kapr = r.get(KAPR_key)
storage_model.update_kapr_conflicts(mongo=mongo, username=user.username, pid=pid, kapr=kapr)
# flush related cache in redis
# dont flush yet, because resolve conflicts need these data
# TODO: flush these data when resolve conflict finished
# storage_model.clear_working_page_cache(assignment_id, r)
# check if the project is completed
completed = storage_model.is_conflict_project_completed(mongo=mongo, pid=pid)
if completed:
# combine resolve_conflict_result with result
storage_model.update_resolve_conflicts(mongo, pid)
# update result file to int_file
storage_model.update_result(mongo=mongo, pid=pid)
storage_model.delete_resolve_conflict(mongo, pid)
# flush redis data
storage_model.clear_working_page_cache(assignment_id, r)
flask.flash('You have completed resolve conflicts of this project.', 'alert-success')
return redirect(url_for('project', pid=pid))
return redirect(url_for('resolve_conflicts2', pid=pid)) | 092ac06d78357e8ddfba18e0c775a8293e4603c4 | 3,629,974 |
def load_dataset(datapath):
"""
Load dataset at given datapath. Datapath is expected to be a list of
directories to follow.
"""
inFN = abspath(join(dirname(__file__), datapath))
return rs.read_mtz(inFN) | 02ab263a13b9118d3943031e5165c943cc0c529f | 3,629,975 |
def displace_vertices(vertices, directions, length=1., mask=True):
"""
Displaces vertices by given length along directions where mask is True
Parameters
----------
vertices: (n, d) float
Mesh vertices
directions: (n, d) float
Directions of displacement (e.g. the mesh normals)
length: scalar, (n) or (n, d) float
Length of displacement
mask: (n) bool
Mask of which vertices will be displaced
Returns
-------
displaced_vertices: (n, d) float
Displaced vertices
"""
# Multiplicating length by mask beforehand to allow broadcasting
return vertices + np.atleast_1d(length * mask)[:, None] * directions | a3481b8ac7366279207dee36b0ce7723276aec56 | 3,629,976 |
def point_in_poly(x,y,poly):
"""" Ray Casting Method:
Drawing a line from the point in question and stop drawing it when the line
leaves the polygon bounding box. Along the way you count the number of times
you crossed the polygon's boundary. If the count is an odd number the point
must be inside. If it's an even number the point is outside the polygon.
So in summary, odd=in, even=out
"""
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside | a13be4c712a4705829780dbfc847467d45897552 | 3,629,977 |
def can_comment(request, entry):
"""Check if current user is allowed to comment on that entry."""
return entry.allow_comments and \
(entry.allow_anonymous_comments or
request.user.is_authenticated()) | 04bcd019af083cff0367e236e720f4f7b00f7a65 | 3,629,978 |
def tianqin_psd(f, L=np.sqrt(3) * 1e5 * u.km, t_obs=None, approximate_R=None, confusion_noise=None):
"""Calculates the effective TianQin power spectral density sensitivity curve
Using Eq. 13 from Huang+20, calculate the effective TianQin PSD for the sensitivity curve
Note that this function includes an extra factor of 10/3 compared Eq. 13 in Huang+20, since Huang+20
absorbs the factor into the waveform but we instead follow the same convention as Robson+19 for
consistency and include it in this 'effective' PSD function instead.
Parameters
----------
f : `float/array`
Frequencies at which to evaluate the sensitivity curve
L : `float`
Arm length
t_obs : `float`
Ignored for this function
approximate_R : `boolean`
Ignored for this function
confusion_noise : `various`
Ignored for this function
Returns
-------
psd : `float/array`
Effective power strain spectral density
"""
fstar = const.c / (2 * np.pi * L)
Sa = 1e-30 * u.m**2 * u.s**(-4) * u.Hz**(-1)
Sx = 1e-24 * u.m**2 * u.Hz**(-1)
psd = 10 / (3 * L**2) * (4 * Sa / (2 * np.pi * f)**4 * (1 + (1e-4 * u.Hz / f)) + Sx) \
* (1 + 0.6 * (f / fstar)**2)
return psd.to(u.Hz**(-1)) | ac211b96aff1d1bf2eeb8685944274820217d295 | 3,629,979 |
def __get_request_body(file: BytesIO, file_path: str, repo_url: str) -> dict[str, str]:
"""Creates request body for GitHub API.
Parameters:
file_path: path where file is to be uploaded (e.g. /folder1/folder2/file.html)
file: File-like object
repo_url: url of SuttaCentral editions repo
"""
return {
"message": f"Uploading {file_path}",
"content": b64encode(file.read()).decode("ascii"),
"sha": __get_file_sha(file_path, repo_url),
} | 270ddaccde3f78c25849eccf3b5060d558f17d59 | 3,629,980 |
from typing import Union
import struct
def _write_header(buf: Union[memoryview, bytearray],
dtype: np.dtype,
shape: tuple):
"""
Write the header data into the shared memory
:param buf: Shared memory buffer
:type buf: bytes
:param dtype: Data format
:type dtype: np.dtype
:param shape: Array dimension
:type shape: tuple
:return: Offset to the start of the array
:rtype: int
"""
size, off, n_count = _calculate_size(shape, dtype)
if dtype not in _JULIA_WA_IDENTS:
raise TypeError(f'Type {dtype} is not supported for WrappedArray')
eltype = _JULIA_WA_IDENTS[dtype]
if len(buf) < size:
raise MemoryError(
"Shared memory buffer is too small for wrapped array")
struct.pack_into(_JULIA_WA_HEADER_FORMAT, buf,
0, np.uint32(_JULIA_WA_MAGIC), np.uint16(eltype),
np.uint16(n_count), np.int64(off))
for idx, val in enumerate(shape):
struct.pack_into('q', buf, int(_JULIA_WA_HEADER_SIZEOF + idx *
np.dtype('int64').itemsize),
np.int64(val))
return int(off) | 4719f145e8e189d1c2b285b3ec33bfa5b4a8865f | 3,629,981 |
def _padright(width, s):
"""Flush left.
>>> _padright(6, u'\u044f\u0439\u0446\u0430') == u'\u044f\u0439\u0446\u0430 '
True
"""
fmt = u"{0:<%ds}" % width
return fmt.format(s) | d9333650a76fb8861f576f5e5f17c1929392c210 | 3,629,982 |
def single(mjd, hist=[], **kwargs):
"""cadence requirements for single-epoch
Request: single epoch
mjd: float or int should be ok
hist: list, list of previous MJDs
"""
# return len(hist) == 0
sn = kwargs.get("sn", 0)
return sn <= 1600 | 8734916221f0976d73386ac662f6551c25accfc3 | 3,629,983 |
def accuracies(diffs, FN, FP, TN, TP):
"""INPUT:
- np.array (diffs), label - fault probability
- int (FN, FP, TN, TP) foor keeping track of false positives, false negatives, true positives and true negatives"""
for value in diffs:
if value < 0:
if value < -0.5:
FP+=1
else:
TN +=1
else:
if value < 0.5:
TP+=1
else:
FN+=1
return FN, FP, TN, TP | 001cebd169589f9f1494d9833c1fc49d8ba9964b | 3,629,984 |
from typing import Optional
import torch
from typing import Tuple
def group_te_ti_b_values(
parameters: np.ndarray, data: Optional[torch.Tensor] = None
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Group DWI gradient direction by b-values and TI and TE parameters if applicable.
This function is necessary for the S^2 fourier transforms.
Args:
parameters: ungrouped gradients directions.
data: Optional data to group. Defaults to None.
Returns:
grouped gradient directions of shape {TI, TE, b-value, gradient directions, xyz} and optionally grouped data of
shape {batch size, TI, TE, gradient directions, b-values}
"""
# FIXME: This function is slow and is executed a lot. Maybe some caching would help?
b_s = np.unique(parameters[:, 3])
b_s = b_s[b_s != 0]
ti_s = np.unique(parameters[:, 4]) if parameters.shape[1] > 4 else None
te_s = np.unique(parameters[:, 5]) if parameters.shape[1] > 5 else None
gradients = np.zeros(
(
ti_s.shape[0] if ti_s is not None else 1,
te_s.shape[0] if te_s is not None else 1,
b_s.shape[0],
1, # initial number of gradient direction per b-value, will resize later
3,
)
)
data_grouped = (
np.zeros(
(
data.shape[0],
ti_s.shape[0] if ti_s is not None else 1,
te_s.shape[0] if te_s is not None else 1,
1, # initial number of gradient direction per b-value, will resize later
b_s.shape[0],
)
)
if data is not None
else None
)
data_ti = None
data_te = None
if ti_s is not None and te_s is not None:
for ti_idx, ti in enumerate(ti_s):
parameters_ti = parameters[parameters[:, 4] == ti]
if data is not None:
data_ti = data[:, parameters[:, 4] == ti]
for te_idx, te in enumerate(te_s):
parameters_te = parameters_ti[parameters_ti[:, 5] == te]
if data is not None:
data_te = data_ti[:, parameters_ti[:, 5] == te]
gradients, data_grouped = group_b_values(
gradients, parameters_te, b_s, ti_idx, te_idx, data_grouped, data_te
)
else:
gradients, data_grouped = group_b_values(gradients, parameters, b_s, data_grouped=data_grouped, data=data)
if data_grouped is not None:
return gradients, data_grouped
else:
return gradients | e393085659667d2e916ca31d62811823a5bbba9f | 3,629,985 |
def compress(s):
"""param s: string to compress
count the runs in s switching
from counting runs of zeros to counting runs of ones
return compressed string"""
#the largest number of bits the compress algorithm can use
#to encode a 64-bit string or image is 320 bits
#I tested the penguin and got a compression ratio of 1.484375
#the smile gave a ratio of 1.328125
#the five gave a ratio of 1.015625
#if there is a compression algorithm that could always make a file smaller.
#then the compression operation would be able to reduce a file to 0 bytes
#and retain all of the data. However, because 0 bytes does not have any information.
#there cannot be a compression algorithm to make it smaller. Hence it does not exist.
def lenHelp(s):
if len(s) == COMPRESSED_BLOCK_SIZE:
return s
return lenHelp('0' + s)
def compress_help(s, c):
if s == "":
return ""
x = lenHelp(numToBinary(countRun(s,c,MAX_RUN_LENGTH, 0)))
return x + compress_help(s[countRun(s, c, MAX_RUN_LENGTH, 0):], '0' if c == '1' else '1')
return compress_help(s, '0') | 7a937503d27a240b1cc867b72e6db458e7626355 | 3,629,986 |
def data_scaling(Y):
"""Scaling of the data to have pourcent of baseline change columnwise
Parameters
----------
Y: array of shape(n_time_points, n_voxels)
the input data
Returns
-------
Y: array of shape(n_time_points, n_voxels),
the data after mean-scaling, de-meaning and multiplication by 100
mean: array of shape(n_voxels), the data mean
"""
mean = Y.mean(0)
Y = 100 * (Y / mean - 1)
return Y, mean | 94b550386b8411a96b9ccd3f5e93098560c327e1 | 3,629,987 |
def _process_normalizations(model_dict, dimensions, labels):
"""Process the normalizations of intercepts and factor loadings.
Args:
model_dict (dict): The model specification. See: :ref:`model_specs`
dimensions (dict): Dimensional information like n_states, n_periods, n_controls,
n_mixtures. See :ref:`dimensions`.
labels (dict): Dict of lists with labels for the model quantities like
factors, periods, controls, stagemap and stages. See :ref:`labels`
Returns:
normalizations (dict): Nested dictionary with information on normalized factor
loadings and intercepts for each factor. See :ref:`normalizations`.
"""
normalizations = {}
for factor in labels["latent_factors"]:
normalizations[factor] = {}
norminfo = model_dict["factors"][factor].get("normalizations", {})
for norm_type in ["loadings", "intercepts"]:
candidate = norminfo.get(norm_type, [])
candidate = fill_list(candidate, {}, dimensions["n_periods"])
normalizations[factor][norm_type] = candidate
return normalizations | 852eec42ec9813bf642cafa89fc2460c1af1a010 | 3,629,988 |
def versioned_static(path):
"""
Wrapper for Django's static file finder to append a cache-busting query parameter
that updates on each Wagtail version
"""
return versioned_static_func(path) | 5eace52819755f01300a90007a0853766c57e1b1 | 3,629,989 |
def calculate_estimated_energy_consumption(motor_torques, motor_velocities,
sim_time_step, num_action_repeat):
"""Calculates energy consumption based on the args listed.
Args:
motor_torques: Torques of all the motors
motor_velocities: Velocities of all the motors.
sim_time_step: Simulation time step length (seconds).
num_action_repeat: How many steps the simulation repeats the same action.
Returns:
Total energy consumption of all the motors (watts).
"""
return np.abs(np.dot(motor_torques,
motor_velocities)) * sim_time_step * num_action_repeat | 00fbfd10e21de3fd7ce4b66dd369228951f77f62 | 3,629,990 |
def affine_to_shift(affine_matrix, volshape, shift_center=True, indexing='ij'):
"""
transform an affine matrix to a dense location shift tensor in tensorflow
Algorithm:
- get grid and shift grid to be centered at the center of the image (optionally)
- apply affine matrix to each index.
- subtract grid
Parameters:
affine_matrix: ND+1 x ND+1 or ND x ND+1 matrix (Tensor)
volshape: 1xN Nd Tensor of the size of the volume.
shift_center (optional)
Returns:
shift field (Tensor) of size *volshape x N
TODO:
allow affine_matrix to be a vector of size nb_dims * (nb_dims + 1)
"""
if isinstance(volshape, (tf.Dimension, tf.TensorShape)):
volshape = volshape.as_list()
if affine_matrix.dtype != 'float32':
affine_matrix = tf.cast(affine_matrix, 'float32')
nb_dims = len(volshape)
if len(affine_matrix.shape) == 1:
if len(affine_matrix) != (nb_dims * (nb_dims + 1)):
raise ValueError('transform is supposed a vector of len ndims * (ndims + 1).'
'Got len %d' % len(affine_matrix))
affine_matrix = tf.reshape(affine_matrix, [nb_dims, nb_dims + 1])
if not (affine_matrix.shape[0] in [nb_dims, nb_dims + 1] and affine_matrix.shape[1] == (nb_dims + 1)):
raise Exception('Affine matrix shape should match'
'%d+1 x %d+1 or ' % (nb_dims, nb_dims) +
'%d x %d+1.' % (nb_dims, nb_dims) +
'Got: ' + str(volshape))
# list of volume ndgrid
# N-long list, each entry of shape volshape
mesh = volshape_to_meshgrid(volshape, indexing=indexing)
mesh = [tf.cast(f, 'float32') for f in mesh]
if shift_center:
mesh = [mesh[f] - (volshape[f] - 1) / 2 for f in range(len(volshape))]
# add an all-ones entry and transform into a large matrix
flat_mesh = [flatten(f) for f in mesh]
flat_mesh.append(tf.ones(flat_mesh[0].shape, dtype='float32'))
mesh_matrix = tf.transpose(tf.stack(flat_mesh, axis=1)) # 4 x nb_voxels
# compute locations
loc_matrix = tf.matmul(affine_matrix, mesh_matrix) # N+1 x nb_voxels
loc_matrix = tf.transpose(loc_matrix[:nb_dims, :]) # nb_voxels x N
loc = tf.reshape(loc_matrix, list(volshape) + [nb_dims]) # *volshape x N
# loc = [loc[..., f] for f in range(nb_dims)] # N-long list, each entry of shape volshape
# get shifts and return
return loc - tf.stack(mesh, axis=nb_dims) | 3851647c791ab6b663a34b32bf2faa60e595c527 | 3,629,991 |
import json
def get_item_details(args, doc=None, for_validate=False, overwrite_warehouse=True):
"""
args = {
"item_code": "",
"warehouse": None,
"customer": "",
"conversion_rate": 1.0,
"selling_price_list": None,
"price_list_currency": None,
"plc_conversion_rate": 1.0,
"doctype": "",
"name": "",
"supplier": None,
"transaction_date": None,
"conversion_rate": 1.0,
"buying_price_list": None,
"is_subcontracted": "Yes" / "No",
"ignore_pricing_rule": 0/1
"project": ""
"set_warehouse": ""
}
"""
args = process_args(args)
for_validate = process_string_args(for_validate)
overwrite_warehouse = process_string_args(overwrite_warehouse)
item = frappe.get_cached_doc("Item", args.item_code)
validate_item_details(args, item)
out = get_basic_details(args, item, overwrite_warehouse)
if isinstance(doc, string_types):
doc = json.loads(doc)
if doc and doc.get('doctype') == 'Purchase Invoice':
args['bill_date'] = doc.get('bill_date')
if doc:
args['posting_date'] = doc.get('posting_date')
args['transaction_date'] = doc.get('transaction_date')
get_item_tax_template(args, item, out)
out["item_tax_rate"] = get_item_tax_map(args.company, args.get("item_tax_template") if out.get("item_tax_template") is None \
else out.get("item_tax_template"), as_json=True)
get_party_item_code(args, item, out)
set_valuation_rate(out, args)
update_party_blanket_order(args, out)
if not doc or cint(doc.get('is_return')) == 0:
# get price list rate only if the invoice is not a credit or debit note
get_price_list_rate(args, item, out)
if args.customer and cint(args.is_pos):
out.update(get_pos_profile_item_details(args.company, args))
if (args.get("doctype") == "Material Request" and
args.get("material_request_type") == "Material Transfer"):
out.update(get_bin_details(args.item_code, args.get("from_warehouse")))
elif out.get("warehouse"):
out.update(get_bin_details(args.item_code, out.warehouse))
# update args with out, if key or value not exists
for key, value in iteritems(out):
if args.get(key) is None:
args[key] = value
data = get_pricing_rule_for_item(args, out.price_list_rate,
doc, for_validate=for_validate)
out.update(data)
update_stock(args, out)
if args.transaction_date and item.lead_time_days:
out.schedule_date = out.lead_time_date = add_days(args.transaction_date,
item.lead_time_days)
if args.get("is_subcontracted") == "Yes":
out.bom = args.get('bom') or get_default_bom(args.item_code)
get_gross_profit(out)
if args.doctype == 'Material Request':
out.rate = args.rate or out.price_list_rate
out.amount = flt(args.qty * out.rate)
return out | d8f70e3d978a19af0739d7b0b03d0a845eb670c7 | 3,629,992 |
from typing import Type
def _get_store(cls: Type[BaseStore]) -> BaseStore:
"""Get store object from cls
:param cls: store class
:return: store object
"""
if jinad_args.no_store:
return cls()
else:
try:
return cls.load()
except Exception:
return cls() | d223a1354c67eb9e6603b07e890258600bd24412 | 3,629,993 |
async def connections_send_message(request: web.BaseRequest):
"""
Request handler for sending a basic message to a connection.
Args:
request: aiohttp request object
"""
context = request.app["request_context"]
connection_id = request.match_info["conn_id"]
outbound_handler = request.app["outbound_message_router"]
params = await request.json()
try:
connection = await ConnectionRecord.retrieve_by_id(context, connection_id)
except StorageNotFoundError:
raise web.HTTPNotFound()
if connection.is_ready:
msg = BasicMessage(content=params["content"])
await outbound_handler(msg, connection_id=connection_id)
return web.json_response({}) | 4e5e7d736ee93e1c6817a003c4c9412fd6603781 | 3,629,994 |
import json
def load_json(path: str):
"""Load json file from given path and return data"""
with open(path) as f:
data = json.load(f)
return data | d165d087c78a0ba88d318a6dbe8b2ac8f9a8c4b5 | 3,629,995 |
def process():
"""
The main function which responds to the html form submission.
The Flask server has it under the /process address.
"""
# 1. Obtain inputs from the webpage
code = request.form.get('Python_Code', '', type=str)
graph = request.form.get('Figure_Parameters', '', type=str)
if graph == '' or len(df) == 0:
# 1a. Execute the code
output = code_execute(code)
# 2a. Return an 'empty' picture
result = jsonify(src='data:image/png;base64,',
params=js.dumps(dict()),
repr=output,
codes=code_h_format(codes),
graphs=code_h_format(pictures),
slide=1)
else:
# 1b. Create a picture
global fig, ax
f, ax = create_figure(graph)
fig = f
# 2b. Execute additional code (after creating the picture):
output = code_execute(code)
# 3b. Convert figure and return
fig_png = covert_figure(f)
params = {}
params['figure'] = {'dpi': f.dpi,
'frameon': f.frameon,
'facecolor': f.get_facecolor(),
'edgecolor': f.get_edgecolor(),
'figsize': [f.get_figwidth(), f.get_figheight()]}
params['axes'] = {'xticks': [float(_) for _ in ax.get_xticks()],
'xticklabels': [_.get_text() for _ in ax.get_xticklabels()],
'xlim': [float(_) for _ in ax.get_xlim()],
'frame_on': ax.get_frame_on()}
result = jsonify(src=fig_png,
params=js.dumps(params),
repr=output,
codes=code_h_format(codes),
graphs=code_h_format(pictures),
slide=0)
return result | 51f30dc54d7a507a68523859b4bfdbbc6934fc36 | 3,629,996 |
import functools
def check_admin_access(func):
"""Wrap a handler with admin checking.
This decorator must be below post(..) and get(..) when used.
"""
@functools.wraps(func)
def wrapper(self):
"""Wrapper."""
if not auth.is_current_user_admin():
raise helpers.AccessDeniedException('Admin access is required.')
return func(self)
return wrapper | e4f96f5f82d9d8fefcfff56ba28f317ec73e9272 | 3,629,997 |
def genericSearch(problem, fringe, heuristic=None):
"""
A generic search algorithm to solve the Pacman Search
:param problem: The problem
:param fringe: The fringe, either of type:
- Stash, for DFS. A Last-In-First-Out type of stash.
- Queue, for BFS. A First-In-First-Out type of stash.
- PriorityQueue, for UCS (and the A* search). Drops items from the stash based
on the priority/cost given to them
:param heuristic: The type of heuristic being used. Default = None
:return []: Empty array (Safe exit, should not happen)
:return actions: The actions done to get to the goal state.
"""
visited = [] # The nodes that have already been visited
actions = [] # The actions that have already been made
initial = problem.getStartState() # The initial state of the problem
# Push the initial start state + blank action (because we've done nothing to get there) to the fringe
if isinstance(fringe, util.Stack) or isinstance(fringe, util.Queue):
fringe.push((initial, actions))
# If using the PriorityQueue, calculate the priority according to the given heuristic
elif isinstance(fringe, util.PriorityQueue):
fringe.push((initial, actions), heuristic(initial, problem))
"""
Go through the fringe.
Remove the current value from the fringe
If the node was NOT visited, see if it's goal
If not goal, add node's successors to fringe
"""
while fringe:
# If using Stack (DFS) or Queue (BFS)
if isinstance(fringe, util.Stack) or isinstance(fringe, util.Queue):
node, actions = fringe.pop() # Record the node and the actions taken, remove them from the fringe
# If using PriorityQueue (UCS and A*)
elif isinstance(fringe, util.PriorityQueue):
node, actions = fringe.pop() # Record the node and the actions taken, remove them from the fringe
# If the node has NOT been visited
if node not in visited:
visited.append(node) # Add the node to visited
# If at goal --> return with the path (actions) taken
if problem.isGoalState(node):
return actions
"""
The code below only executes if the node wasn't the goal
"""
successors = problem.getSuccessors(node) # Save the successor nodes of the current node
# Cycle through the successors
for successor in successors:
coordinate, direction, cost = successor # Record the values of the current (successor) node
new_actions = actions + [direction] # Expand the actions done so far
# Stack (DFS) and Queue (BFS):
if isinstance(fringe, util.Stack) or isinstance(fringe, util.Queue):
fringe.push((coordinate, new_actions)) # Add the new actions and the coordinate into the fringe
# PriorityQueue (UCS and A*):
elif isinstance(fringe, util.PriorityQueue):
# The new cost is the previous cost + the heuristic factor (which is 0 for UCS)
new_cost = problem.getCostOfActions(new_actions) + heuristic(coordinate, problem)
# Add the new actions, coordinate + cost into the Fringe
fringe.push((coordinate, new_actions), new_cost)
"""
At this point, we have looped through all of the new node's successors and
added them into the fringe according to the Stack (DFS)/Queue (BFS)/PriorityQueue (UCS, A*).
Next we will cycle into the next item in the fringe and start again.
If the next item in the fringe was a node we had already visited, we'll jump over it.
"""
return [] | 1628cfbbb6d39e0aebf01dc738ee49afdf2e79ae | 3,629,998 |
def post_processing(
predicted: xr.Dataset,
) -> xr.DataArray:
"""
filter prediction results with post processing filters.
:param predicted: The prediction results
"""
dc = Datacube(app='whatever')
#grab predictions and proba for post process filtering
predict=predicted.Predictions
# mask out classification beyond AEZ boundary
gdf = gpd.read_file('data/Western.geojson')
with HiddenPrints():
mask = xr_rasterize(gdf, predicted)
predict = predict.where(mask,0)
# mask with WDPA
url_wdpa="s3://deafrica-input-datasets/protected_areas/WDPA_western.tif"
wdpa=rio_slurp_xarray(url_wdpa, gbox=predicted.geobox)
wdpa = wdpa.astype(bool)
predict = predict.where(~wdpa, 0)
#mask with WOFS
wofs=dc.load(product='ga_ls8c_wofs_2_summary',like=predicted.geobox)
wofs=wofs.frequency > 0.2 # threshold
predict=predict.where(~wofs, 0)
#mask steep slopes
url_slope="https://deafrica-data.s3.amazonaws.com/ancillary/dem-derivatives/cog_slope_africa.tif"
slope=rio_slurp_xarray(url_slope, gbox=predicted.geobox)
slope=slope > 35
predict=predict.where(~slope, 0)
#mask where the elevation is above 3600m
elevation=dc.load(product='dem_srtm', like=predicted.geobox)
elevation=elevation.elevation > 3600 # threshold
predict=predict.where(~elevation.squeeze(), 0)
#set dtype
predict=predict.astype(np.int8)
return predict | db57d3ab72984ecf497809df39c3032db57710c4 | 3,629,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.