content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def run_multistage_cu_hkust1(cp2k_code, cu_hkust1_structuredata): # pylint: disable=redefined-outer-name
"""Run Cp2kMultistageWorkChain on Cu-HKUST-1."""
# testing user change of parameters and protocol
parameters = Dict(dict={'FORCE_EVAL': {'DFT': {'MGRID': {'CUTOFF': 250,}}}})
# Construct process builder
builder = Cp2kMultistageWorkChain.get_builder()
builder.structure = cu_hkust1_structuredata
builder.protocol_tag = Str('standard')
builder.cp2k_base.cp2k.parameters = parameters
builder.cp2k_base.cp2k.code = cp2k_code
builder.cp2k_base.cp2k.metadata.options.resources = {
'num_machines': 1,
#'num_mpiprocs_per_machine': 1,
}
#builder.cp2k_base.cp2k.metadata.options.withmpi = False # comment this for parallel cp2k executable
builder.cp2k_base.cp2k.metadata.options.max_wallclock_seconds = 1 * 60 * 60
# The following is not needed, if the files are available in the data directory of your CP2K executable
cp2k_dir = DATA_DIR / 'cp2k'
builder.cp2k_base.cp2k.file = {
'basis': SinglefileData(file=str(cp2k_dir / 'BASIS_MOLOPT')),
'pseudo': SinglefileData(file=str(cp2k_dir / 'GTH_POTENTIALS')),
'dftd3': SinglefileData(file=str(cp2k_dir / 'dftd3.dat')),
}
results, node = engine.run_get_node(builder)
import pdb
pdb.set_trace()
assert node.is_finished_ok, results
output_parameters = results['output_parameters'].get_dict()
assert output_parameters['step_info']['scf_converged'][-1] | 26,900 |
def get_spell_slots(pcClass, level):
"""Return a list containing the available spell slots for each spell level."""
spell_slots = []
if pcClass.casefold() == "Magic-User".casefold():
highest_spell_level = min(math.ceil(level / 2), 9)
# MU_SPELL_SLOTS[level - 1] gives first level spell slots for the given character
# level. The spell slots for subsequent spell levels move two steps down the
# list each time. So we move two steps down the list for each spell level we
# need beyond the first by subtracting 2 * i from the index.
for i in range(highest_spell_level):
spell_slots.append(MU_SPELL_SLOTS[(level - 1) - (2 * i)])
if pcClass.casefold() == "Cleric".casefold():
# Cleric spell slots are a little strange: they have access to level 1 spells
# if they're 3rd level or lower. Otherwise, they use the same progression as
# magic-users (except that Clerics' highest spell level is 7, not 9).
highest_spell_level = 1 if level <= 3 else min(math.ceil(level / 2), 7)
# Here's the really painful bit. Cleric spell slots ALMOST follow the nice easy
# Magic-User pattern of moving two steps down each time you go up a spell level.
# Almost.
# They actually move 3 steps down the first time (from spell level 1 to spell
# level 2), and then a nice even 2 steps down for every spell level after that.
# Special cases, UGH.
for i in range(highest_spell_level):
if i <= 1:
spell_slots.append(CLERIC_SPELL_SLOTS[(level - 1) - (3 * i)])
else:
spell_slots.append(CLERIC_SPELL_SLOTS[(level - 1) - (2 * i)])
# Sigh. Level 20 is a special case that doesn't follow any handy pattern that I
# could find.
if level == 20:
spell_slots = [8, 7, 7, 6, 5, 5, 4]
return spell_slots | 26,901 |
def determine_dates_to_query_on_matomo(dates_in_database):
"""
Determines which dates need to be queried on Matomo to update the dataset.
"""
from datetime import datetime, timedelta
# determines which dates are missing from the database and could be queried on Matomo
# NOTE: start date was set to 2020-05-01 as May is when the portal started to be live
start_date = datetime.strptime('2020-05-01', '%Y-%m-%d').date()
end_date = (datetime.today() - timedelta(1)).date()
delta = timedelta(days=1)
dates_to_process = []
while start_date <= end_date:
if str(start_date) not in dates_in_database:
dates_to_process.append(str(start_date))
start_date += delta
return dates_to_process | 26,902 |
def handleMsg(msgJ):
"""Process the message in msgJ.
Parameters:
msgJ: dict
Dictionary with command sent from client
Returns:
string
JSON string with command response
Commands are of the form:
{'cmd' : 'getCCC', 'param0': 'param0val', ...}
Response is a string of the form (note that JSON is picky that keys
and strings should be enclosed in double quotes:
'{"cmd" : "getCmd", "cmd" : "<response>"}'
{'cmd':'getHello'} -> {"cmd":"getHello", "hello": "world"}
{'cmd':'getSegyHdrs', filename: f} ->
{"cmd":"getSegyHdrs", "segyhdrs":
{ns:nsamps, dt:dt: hdrs:[hdr1, hdr2...]}}
FIXME FIXME - this currently returns "segy", not "ensemble" as the key
WARNING - you must call getSegyHdrs first
flo and fhi are optional. If they are not present, no filtering
{'cmd':'getEnsemble', filename:f, ensemble:n, [flo:flo, fhi: fhi]} ->
{"cmd":"getEnsemble", "segy":
{ns:nsamps, dt:dt: traces:[trc1, trc2...]}}
"""
print('msgJ: {}'.format(msgJ))
if msgJ['cmd'].lower() == 'getsegyhdrs':
filename = msgJ['filename']
print('getting segyhdr >{}<, filename: {}'.format(msgJ, filename))
t0 =datetime.now()
if segy.filename != filename:
# new file - open it
try:
s = _read_su(filename, endian='>', headonly=True)
segy.filename = filename
segy.segyfile = s
except:
ret = json.dumps({"cmd":"readSegy", "error": "Error reading file {}".format(filename)})
return ret
print("ntrcs = {}".format(len(segy.segyfile.traces)))
hdrs = [segy.getTrc(i, headonly=True) for i in range(len(segy.segyfile.traces))]
nsamps = segy.segyfile.traces[0].header.number_of_samples_in_this_trace
dt = segy.segyfile.traces[0].header.sample_interval_in_ms_for_this_trace/(1000.*1000.)
segy.nsamps = nsamps
segy.dt = dt
segy.hdrs = hdrs
ret = json.dumps({"cmd": "getSegyHdrs",
"segyHdrs" : json.dumps({"dt":dt, "ns":nsamps,
"filename": segy.filename,
"hdrs":hdrs})})
return ret
if msgJ['cmd'].lower() == 'getnmo':
# assumes getSegyHdrs called already. needed?
print('nmo getting ens', msgJ)
if segy.segyfile is None:
ret = json.dumps({"cmd":"getNMO", "error": "Error doing NMO: call getSegyHdrs first."})
return ret
try:
vnmo = msgJ['vnmo']
tnmo = msgJ['tnmo']
print('got nmo', vnmo, tnmo)
except:
vnmo = 'vnmo=2000'
tnmo = 'tnmo=0'
try:
ens = int(msgJ['ensemble'])
try:
# open a tmp file
tmpf = tempfile.NamedTemporaryFile(delete=False) # output
print('opened', tmpf.name)
# and the segy input file
with open(msgJ['filename'], 'rb') as sf: # input
# and do the nmo
p1 = sp.Popen(['suwind', 'key=cdp', 'min={}'.format(ens), 'max={}'.format(ens)], stdin=sf, stdout=sp.PIPE)
p2 = sp.Popen(['sugain', "tpow=1.5"], stdin=p1.stdout, stdout=sp.PIPE)
p3 = sp.Popen(['sunmo', vnmo, tnmo], stdin=p2.stdout, stdout=tmpf)
print('p2 ok')
p1.stdout.close()
p2.stdout.close()
out,err = p3.communicate()
print('suwind/sugain/nmo', out, err)
#print('nmo call', ret)
tmpf.close()
# nmo segy file
nsegy = Segy()
nsegy.filename = tmpf.name
nsegy.segyfile = _read_su(tmpf.name, headonly=False)
nmontrcs = len(nsegy.segyfile.traces)
#print('nmo ntrcs', nmontrcs)
nmotrcs = [nsegy.getTrc(i, headonly=False, trctype='seismic') for i in range(nmontrcs)]
# delete the tmp file
#os.unlink(tmpf.name)
print('nmo trcs', len(nmotrcs))
except:
print('err nmo', ens)
ret = json.dumps({"cmd":"getNMO", "error": "Error performing NMO"})
return ret
ntrc = len(nmotrcs)
except:
print('err ens', msgJ)
ret = json.dumps({"cmd":"getNMO", "error": "Error reading ensemble number"})
return ret
print("ens = {} ntrc={}".format(ens, len(nmotrcs)))
# dt/nsamps could change from the original due to decimation
dt = nmotrcs[0]["dt"]
nsamps = nmotrcs[0]["nsamps"]
print('dt, nsamps', dt, nsamps)
#print(json.dumps(traces[0]))
ret = json.dumps({"cmd": "getNMO",
"NMO" : json.dumps({"dt":dt, "ns":nsamps,
"filename": nsegy.filename,
"traces":nmotrcs})})
return ret
if msgJ['cmd'].lower() == 'getvelan':
if segy.segyfile is None:
ret = json.dumps({"cmd":"getEnsemble", "error": "Error reading ensemble"})
return ret
try:
ens = int(msgJ['ensemble'])
print('in velan', ens)
except:
print('no ens')
return json.dumps({"cmd":"getVelan", "error": "Error reading ensemble number"})
try:
dv = msgJ['dv']
fv = msgJ['fv']
nv = msgJ['nv']
except:
fv=1500
dv=100
nv=50
dvstr = "dv={}".format(dv)
fvstr = "fv={}".format(fv)
nvstr = "nv={}".format(nv)
tmpf = tempfile.NamedTemporaryFile(delete=False) # output
with open(segy.filename, 'rb') as sf:# input
#tmpfname = tmpf.name
p1 = sp.Popen(['suwind', 'key=cdp', 'min={}'.format(ens), 'max={}'.format(ens)], stdin=sf, stdout=sp.PIPE)
p2 = sp.Popen(['sugain', "tpow=1.5"], stdin=p1.stdout, stdout=sp.PIPE)
p3 = sp.Popen(['suvelan', dvstr, fvstr, nvstr], stdin=p2.stdout, stdout=tmpf)
print('p3 ok')
p1.stdout.close()
p2.stdout.close()
out,err = p3.communicate()
print('suwind/sugain/velan', out, err)
#ret = sp.call(['suvelan', dvstr, fvstr, nvstr], stdin=sf, stdout=tmpf)
#print('wrote suvelan file', ret, tmpf.name)
tmpf.close()
vsegy = Segy()
vsegy.filename=tmpf.name
vsegy.segyfile = _read_su(tmpf.name, headonly=False)
vtrcs = [vsegy.getTrc(i, headonly=False, trctype='velocity', v0=fv, dv=dv) for i in range(len(vsegy.segyfile.traces)) if vsegy.segyfile.traces[i].header.ensemble_number == ens]
print('nvel trcs', len(vtrcs))
dt = vtrcs[0]["dt"]
nsamps = vtrcs[0]["nsamps"]
print('dt, nsamps', dt, nsamps)
#print(json.dumps(traces[0]))
ret = json.dumps({"cmd": "getVelan",
"velan" : json.dumps({"dt":dt, "ns":nsamps, "fv":fv,
"dv":dv, "nv":nv,
"filename": vsegy.filename,
"traces":vtrcs})})
#ret = json.dumps({"cmd": "velan", "velan": "test"})
return ret
if msgJ["cmd"].lower() == "gethello":
ret = json.dumps({"cmd": "hello", "hello": "world"})
return ret | 26,903 |
def calc_total_energy(electron_energy, atomic_distance, energy0):
"""
Calculates the total energy of H2 molecule from electron_energy by
adding proton-proton Coulomb energy and defining the zero energy
energy0. The formula:
E = E_el + E_p - E_0
where e is the total energy, E_el is the electronic energy
E_p = 1 / R, where R is atomic distance and E_0 is the chosen
zero energy.
:param electron_energy: list of energies of H2 molecule without
proton-proton Coulomb energy
:param atomic_distance: list of distances between two H atoms
of H2 molecule
:param energy0: The energy that we take as zero energy
:return: list of total energy of H2 molecule in MJ mol^{-1}
"""
total_energy = [0]*len(electron_energy)
for dot in range(len(electron_energy)):
# proton-proton Coulomb energy
proton_energy = proton_proton(atomic_distance[dot])
total_energy_hartree = electron_energy[dot] + proton_energy - energy0
total_energy[dot] = hartree_to_MJmol(total_energy_hartree)
return total_energy | 26,904 |
def k8s_stats_response():
"""
Returns K8s /stats/summary endpoint output from microk8s on Jetson Nano.
"""
with open("tests/resources/k8s_response.json", "r") as response_file:
response = response_file.read()
return response | 26,905 |
def test_crop():
"""Test similarly cropping one image to another"""
numpy.random.seed(0)
image1 = numpy.random.uniform(size=(20, 20))
i1 = cellprofiler_core.image.Image(image1)
crop_mask = numpy.zeros((20, 20), bool)
crop_mask[5:16, 5:16] = True
i2 = cellprofiler_core.image.Image(image1[5:16, 5:16], crop_mask=crop_mask)
workspace, module = make_workspace(i1, i2)
module.run(workspace)
m = workspace.measurements
mi = module.get_measurement_images(
None, "Image", "Correlation", "Correlation"
)
corr = m.get_current_measurement(
"Image", "Correlation_Correlation_%s" % mi[0]
)
assert round(abs(corr - 1), 7) == 0 | 26,906 |
def test_coerce__no_value(value):
"""If the value fails a truthyness test, it should be returned."""
assert configure.coerce_to_expected(value, "foo", str) == value | 26,907 |
def display_credentials():
"""
Function to display saved credentials.
"""
return Credentials.display_credential() | 26,908 |
def evaluate_absence_of_narrow_ranges(
piece: Piece, min_size: int = 9,
penalties: Optional[Dict[int, float]] = None
) -> float:
"""
Evaluate melodic fluency based on absence of narrow ranges.
:param piece:
`Piece` instance
:param min_size:
minimum size of narrow range (in line elements)
:param penalties:
mapping from width of a range (in scale degrees) to penalty
applicable to ranges of not greater width
:return:
multiplied by -1 count of narrow ranges weighted based on their width
"""
penalties = penalties or {2: 1, 3: 0.5}
pitches = [x.scale_element.position_in_degrees for x in piece.counterpoint]
rolling_mins = rolling_aggregate(pitches, min, min_size)[min_size-1:]
rolling_maxs = rolling_aggregate(pitches, max, min_size)[min_size-1:]
borders = zip(rolling_mins, rolling_maxs)
score = 0
for lower_border, upper_border in borders:
range_width = upper_border - lower_border
curr_penalties = [v for k, v in penalties.items() if k >= range_width]
penalty = max(curr_penalties) if curr_penalties else 0
score -= penalty
return score | 26,909 |
def collect_scalar_summands(cls, ops, kwargs):
"""Collect :class:`.ScalarValue` and :class:`.ScalarExpression` summands.
Example::
>>> srepr(collect_scalar_summands(Scalar, (1, 2, 3), {}))
'ScalarValue(6)'
>>> collect_scalar_summands(Scalar, (1, 1, -1), {})
One
>>> collect_scalar_summands(Scalar, (1, -1), {})
Zero
>>> Psi = KetSymbol("Psi", hs=0)
>>> Phi = KetSymbol("Phi", hs=0)
>>> braket = BraKet.create(Psi, Phi)
>>> collect_scalar_summands(Scalar, (1, braket, -1), {})
<Psi|Phi>^(0)
>>> collect_scalar_summands(Scalar, (1, 2 * braket, 2, 2 * braket), {})
((3, 4 * <Psi|Phi>^(0)), {})
>>> collect_scalar_summands(Scalar, (2 * braket, -braket, -braket), {})
Zero
"""
# This routine is required because there is no
# "ScalarTimesQuantumExpression" for scalars: we have to extract
# coefficiencts from ScalarTimes instead
from qalgebra.core.scalar_algebra import (
One,
Scalar,
ScalarTimes,
ScalarValue,
Zero,
)
a_0 = Zero
coeff_map = OrderedDict()
for op in ops:
if isinstance(op, ScalarValue) or isinstance(op, Scalar._val_types):
a_0 += op
continue
elif isinstance(op, ScalarTimes):
if isinstance(op.operands[0], ScalarValue):
coeff = op.operands[0]
term = op.operands[1]
for sub_op in op.operands[2:]:
term *= sub_op
else:
coeff, term = One, op
else:
coeff, term = One, op
if term in coeff_map:
coeff_map[term] += coeff
else:
coeff_map[term] = coeff
if a_0 == Zero:
fops = []
else:
fops = [a_0]
for (term, coeff) in coeff_map.items():
op = coeff * term
if not op.is_zero:
fops.append(op)
if len(fops) == 0:
return cls._zero
elif len(fops) == 1:
return fops[0]
else:
return tuple(fops), kwargs | 26,910 |
def sge_submit(
tasks, label, tmpdir, options="-q hep.q", dryrun=False, quiet=False,
sleep=5, request_resubmission_options=True, return_files=False,
dill_kw={"recurse": False},
):
"""
Submit jobs to an SGE batch system. Return a list of the results of each
job (i.e. the return values of the function calls)
Parameters
----------
tasks : list
A list of dictrionaries with the keys: task, args and kwargs. Each
element is run on a node as task(*args, **kwargs).
label : str
Label given to the qsub submission script through -N.
tmpdir : str
Path to temporary directory (doesn't have to exist) where pysge stores
job infomation. Each call will have a unique identifier in the form
tpd_YYYYMMDD_hhmmss_xxxxxxxx. Within this directory exists all tasks in
separate directories with a dilled file, stdout and stderr for that
particular job.
options : str (default = "-q hep.q")
Additional options to pass to the qsub command. Take care since the
following options are already in use: -wd, -V, -e, -o and -t.
dryrun : bool (default = False)
Create directories and files but don't submit the jobs.
quiet : bool (default = False)
Don't print tqdm progress bars. Other prints are controlled by logging.
sleep : float (default = 5)
Minimum time between queries to the batch system.
request_resubmission_options : bool (default = True)
When a job fails the master process will expect an stdin from the user
to alter the submission options (e.g. to increase walltime or memory
requested). If False it will use the original options.
return_files : bool (default = False)
Instead of opening the output files and loading them into python, just
send the paths to the output files and let the user deal with them.
dill_kw : dict
Kwargs to pass to dill.dump
"""
if not _validate_tasks(tasks):
logger.error(
"Invalid tasks. Ensure tasks=[{'task': .., 'args': [..], "
"'kwargs': {..}}, ...], where 'task' is callable."
)
return []
area = WorkingArea(os.path.abspath(tmpdir))
submitter = SGETaskSubmitter(" ".join(['-N {}'.format(label), options]))
monitor = JobMonitor(submitter)
results = []
area.create_areas(tasks, quiet=quiet, dill_kw=dill_kw)
try:
submitter.submit_tasks(area.task_paths, dryrun=dryrun, quiet=quiet)
if not dryrun:
results = monitor.monitor_jobs(
sleep=sleep, request_user_input=request_resubmission_options,
)
except KeyboardInterrupt as e:
submitter.killall()
if return_files:
return results
results_not_files = []
for path in results:
with gzip.open(path, 'rb') as f:
results_not_files.append(dill.load(f))
return results_not_files | 26,911 |
def frames_per_second():
"""Timer for computing frames per second"""
from timeit import default_timer as timer
last_time = timer()
fps_prev = 0.0
while True:
now = timer()
dt = now - last_time
last_time = now
fps = 1.0 / dt
s = np.clip(3 * dt, 0, 1)
fps_prev = fps_prev * (1 - s) + fps * s
yield fps_prev | 26,912 |
def _is_domain_interval(val):
""" Check if a value is representing a valid domain interval
Args:
val: Value to check
Returns:
True if value is a tuple representing an interval
"""
if not isinstance(val, tuple):
return False
if not (is_int(val[0]) and is_int(val[1]) and (val[1] >= val[0])):
return False
vl = len(val)
if vl == 2:
return True
if vl == 3:
return val[2] == _HOLE_MARKER
return False | 26,913 |
def upgrade():
"""
Changing the log table columns to use uuid to reference remote objects and log entries.
Upgrade function.
"""
connection = op.get_bind()
# Clean data
export_and_clean_workflow_logs(connection)
# Create the dbnode_id column and add the necessary index
op.add_column('db_dblog', sa.Column('dbnode_id', sa.INTEGER(), autoincrement=False, nullable=True))
# Transfer data to dbnode_id from objpk
connection.execute(text("""UPDATE db_dblog SET dbnode_id=objpk"""))
op.create_foreign_key(
None,
"db_dblog",
"db_dbnode", ['dbnode_id'], ['id'],
ondelete=u'CASCADE',
initially=u'DEFERRED',
deferrable=True)
# Update the dbnode_id column to not nullable
op.alter_column('db_dblog', 'dbnode_id', nullable=False)
# Remove the objpk column
op.drop_column('db_dblog', 'objpk')
# Remove the objname column
op.drop_column('db_dblog', 'objname')
# Remove objpk and objname from metadata dictionary
connection.execute(text("""UPDATE db_dblog SET metadata = metadata - 'objpk' - 'objname' """)) | 26,914 |
def rndcaps(n):
"""
Generates a string of random capital letters.
Arguments:
n: Length of the output string.
Returns:
A string of n random capital letters.
"""
return "".join([choice(_CAPS) for c in range(n)]) | 26,915 |
def cutByWords(text, chunkSize, overlap, lastProp):
"""
Cuts the text into equally sized chunks, where the segment size is measured by counts of words,
with an option for an amount of overlap between chunks and a minim
um proportion threshold for the last chunk.
Args:
text: The string with the contents of the file.
chunkSize: The size of the chunk, in words.
overlap: The number of words to overlap between chunks.
lastProp: The minimum proportional size that the last chunk has to be.
Returns:
A list of string that the text has been cut into.
"""
chunkList = [] # The list of the chunks (a.k.a a list of list of strings)
chunkSoFar = Queue() # The rolling window representing the (potential) chunk
currChunkSize = 0 # Index keeping track of whether or not it's time to make a chunk out of the window
tillNextChunk = chunkSize - overlap # The distance between the starts of chunks
splitText = splitKeepWhitespace(text)
# Create list of chunks (chunks are lists of words and whitespace) by using a queue as a rolling window
for token in splitText:
if token in WHITESPACE:
chunkSoFar.put(token)
else:
currChunkSize += 1
if currChunkSize > chunkSize:
chunkList.append(list(chunkSoFar.queue))
stripLeadingWords(wordQueue=chunkSoFar, numWords=tillNextChunk)
currChunkSize -= tillNextChunk
chunkSoFar.put(token)
# Making sure the last chunk is of a sufficient proportion
lastChunk = list(chunkSoFar.queue) # Grab the final (partial) chunk
if (float(countWords(lastChunk)) / chunkSize) < lastProp: # If the proportion of the last chunk is too low
if len(chunkList)==0:
chunkList.extend(lastChunk)
else:
chunkList[-1].extend(lastChunk)
else:
chunkList.append(lastChunk)
# Make the list of lists of strings into a list of strings
countSubList = 0
stringList=[]
for subList in chunkList:
stringList.extend([''.join(subList)])
if type(subList) is ListType:
countSubList+=1
# Prevent there isn't subList inside chunkList
if countSubList==0:
stringList = []
stringList.extend([''.join(chunkList)])
return stringList | 26,916 |
def raise_business_exception(error_code, error_message=None, error_data=None):
"""抛出业务异常"""
error_message = error_message if error_message else ERROR_PHRASES.get(error_code)
raise BusinessException(
error_code=error_code,
error_message=error_message,
error_data=error_data,
error_app='bsm_config',
) | 26,917 |
def dsu_sort2(list, index, reverse=False):
"""
This function sorts only based on the primary element, not on secondary elements in case of equality.
"""
for i, e in enumerate(list):
list[i] = e[index]
if reverse:
list.sort(reverse=True)
else:
list.sort()
for i, e in enumerate(list):
list[i] = e[1]
return list | 26,918 |
def roca_view(full, partial, **defaults):
"""
Render partal for XHR requests and full template otherwise
"""
templ = defaults.pop('template_func', template)
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if request.is_xhr:
tpl_name = partial
else:
tpl_name = full
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return templ(tpl_name, **tplvars)
elif result is None:
return templ(tpl_name, defaults)
return result
return wrapper
return decorator | 26,919 |
async def _shuffle(s, workers, dfs_nparts, dfs_parts, column):
"""
Parameters
----------
s: dict
Worker session state
workers: set
Set of ranks of all the participants
dfs_nparts: list of dict
List of dict that for each worker rank specifices the
number of partitions that worker has. If the worker doesn't
have any partitions, it is excluded from the dict.
E.g. `dfs_nparts[0][1]` is how many partitions of the "left"
dataframe worker 1 has.
dfs_parts: list of lists of Dataframes
List of inputs, which in this case are two dataframe lists.
column : label or list, or array-like
The bases of the rearrangement.
"""
assert s["rank"] in workers
df_parts = dfs_parts[0]
# Trimming such that all participanting workers get a rank within 0..len(workers)
trim_map = {}
for i in range(s["nworkers"]):
if i in workers:
trim_map[i] = len(trim_map)
rank = trim_map[s["rank"]]
eps = {trim_map[i]: s["eps"][trim_map[i]] for i in workers if i != s["rank"]}
df = df_concat(df_parts)
return await shuffle(len(workers), rank, eps, df, column) | 26,920 |
def list_objects_or_buckets(client, args):
"""
Lists buckets or objects
"""
parser = argparse.ArgumentParser(PLUGIN_BASE+' ls')
parser.add_argument('bucket', metavar='NAME', type=str, nargs='?',
help="Optional. If not given, lists all buckets. If given, "
"lists the contents of the given bucket.")
parsed = parser.parse_args(args)
if parsed.bucket:
# list objects
try:
bucket = client.get_bucket(parsed.bucket)
except S3ResponseError:
print('No bucket named '+parsed.bucket)
sys.exit(2)
data = []
for c in bucket.list():
if c.key.count('/') > 1 or ('/' in c.key and not c.key.endswith('/')):
continue
size = c.size
if size == 0:
size = 'DIR'
datetime = _convert_datetime(c.last_modified) if size != 'DIR' else ' '*16
data.append([datetime, size, c.name])
if data:
tab = _borderless_table(data)
print(tab.table)
sys.exit(0)
else:
# list buckets
buckets = client.get_all_buckets()
data = [
[_convert_datetime(b.creation_date), b.name] for b in buckets
]
tab = _borderless_table(data)
print(tab.table)
sys.exit(0) | 26,921 |
def test_compile_hourly_statistics_unavailable(
hass_recorder, caplog, device_class, unit, value
):
"""Test compiling hourly statistics, with the sensor being unavailable."""
zero = dt_util.utcnow()
hass = hass_recorder()
recorder = hass.data[DATA_INSTANCE]
setup_component(hass, "sensor", {})
attributes = {
"device_class": device_class,
"state_class": "measurement",
"unit_of_measurement": unit,
}
four, states = record_states_partially_unavailable(
hass, zero, "sensor.test1", attributes
)
_, _states = record_states(hass, zero, "sensor.test2", attributes)
states = {**states, **_states}
hist = history.get_significant_states(hass, zero, four)
assert dict(states) == dict(hist)
recorder.do_adhoc_statistics(start=four)
wait_recording_done(hass)
stats = statistics_during_period(hass, four, period="5minute")
assert stats == {
"sensor.test2": [
{
"statistic_id": "sensor.test2",
"start": process_timestamp_to_utc_isoformat(four),
"end": process_timestamp_to_utc_isoformat(four + timedelta(minutes=5)),
"mean": approx(value),
"min": approx(value),
"max": approx(value),
"last_reset": None,
"state": None,
"sum": None,
}
]
}
assert "Error while processing event StatisticsTask" not in caplog.text | 26,922 |
def play_game(board:GoBoard):
"""
Run a simulation game to the end fromt the current board
"""
while True:
# play a random move for the current player
color = board.current_player
move = GoBoardUtil.generate_random_move(board,color)
board.play_move(move, color)
# current player is passing
if move is None:
break
# get winner
winner = GoBoardUtil.opponent(color)
return winner | 26,923 |
def logical_array(ar):
"""Convert ndarray (int, float, bool) to array of 1 and 0's"""
out = ar.copy()
out[out!=0] = 1
return out | 26,924 |
def otp_route(
in_gdf,
mode,
date_time = datetime.now(),
trip_name = '',
):
"""
Return a GeoDataFrame with detailed trip information for the best option.
Parameters
----------
in_gdf : GeoDataFrame
It should only contain two records, first record is origina and
the second record is destination. If more than two records only
the first two records are considered.
mode : string
Indicates transport modes. Modes that can be used
include 'public_transport', 'car_in_traffic', 'car_free_flow',
'walk', 'cycle'
trip_name : string
gives the trip a name which is stored in the trip_name in output
GeoDataFrame.
date_time : a datetime object
Sets the start time of a trip. Only important if the mode is
transit or a subset of transit.
Returns
-------
GeoDataFrame
Has the structure
-``trip_name`` the name given as an input to the trip.
-``leg_id`` A counter for each trip leg
-``mode`` returns the mode for each trip leg
-``from`` the shaply point data in WSG84 for the origin location
-``from_name`` the interim stop id on the network or 'Origin'
-``to`` the shaply point data in WSG84 for the destination location
-``to_name`` the interim stop id on the network or 'Destination'
-``route_id`` the route id for the trip leg if the mode is transit
-``trip_id`` the trip id for the trip leg if the mode is transit
-``distance`` Distance traveled in meters for the trip leg
-``duration`` Travel time for the trip leg in seconds
-``startTime`` time stamp for the start time of the trip leg
-``endTime`` time stamp for the end time of the trip leg
-``waitTime`` Wait time for the trip leg in seconds
-``geometry`` The goemetry of the trip leg in shaply object and WGS84
"""
# The mode parameter is not validated by the Maps API
# Check here to prevent silent failures.
if mode not in list(cs.otp_modes.keys()):
raise ValueError("{0} is an invalid travel mode.".format(mode))
if in_gdf.crs.name != 'WGS 84':
# Check the cooridnate is WGS84
raise ValueError("Invalid coordinate system.")
if mode == 'public_transport' and not date_time:
date_time = datetime.now()
#get from and to location from locations_gdf
orig = in_gdf['geometry'].iat[0]
dest = in_gdf['geometry'].iat[-1]
orig_text = "{0}, {1}".format(orig.y, orig.x)
dest_text = "{0}, {1}".format(dest.y, dest.x)
t = date_time.strftime("%H:%M%p")
d = date_time.strftime("%m-%d-%Y")
#send query to api
url = 'http://localhost:8080/otp/routers/default/plan'
query = {
"fromPlace":orig_text,
"toPlace":dest_text,
"time":t,
"date":d,
"mode":cs.otp_modes[mode],
}
r = requests.get(url, params=query)
#check for request error
r.raise_for_status()
#if error then return emptly GeoDataFrame
if not 'error' in r.json():
#convert request output ot a GeoDataFrame
df = pd.DataFrame(r.json()['plan']['itineraries'][0]['legs']).reset_index()
df = df.rename(columns={
'index': 'leg_id',
'mode': 'mode',
'routeId': 'route_id',
'tripId': 'trip_id',
'startTime': 'start_time',
'endTime': 'end_time',
'wait_time': 'waitTime',
})
df['geometry'] = df['legGeometry'].map(
lambda x: geom.LineString([(p['lng'], p['lat']) for p in decode_polyline(x['points'])])
)
df['from_name'] = df['from'].map(lambda x: x['stopId'] if 'stopId' in x else x['name'])
df['to_name'] = df['to'].map(lambda x: x['stopId'] if 'stopId' in x else x['name'])
df['from'] = df['from'].map(lambda x: geom.Point(x['lon'], x['lat']))
df['to'] = df['to'].map(lambda x: geom.Point(x['lon'], x['lat']))
df['start_time'] = df['start_time'].map(lambda x: datetime.fromtimestamp(x/1000))
df['end_time'] = df['end_time'].map(lambda x: datetime.fromtimestamp(x/1000))
#calculate wait time
df['wait_time'] = df['start_time'].shift(-1)
df['wait_time'] = df['wait_time']-df['end_time']
df['trip_name'] = trip_name
for column in cs.columns:
if column not in df.columns.values:
df[column] = ''
#reorder the fields
df = df[cs.columns].copy()
gdf = gpd.GeoDataFrame(df, crs = cs.WGS84)
else:
gdf = gpd.GeoDataFrame()
gdf = gdf[gdf['geometry'].notnull()].copy()
return gdf | 26,925 |
def add_ada_tab(nodes=None):
"""
Add an ada tab to a given list of nodes. The tab is the instructions for what Ada should do to this node.
Args:
nodes (list): List of nuke node objects (including root).
"""
if nodes is None:
nodes = nuke.selectedNodes()
elif not isinstance(nodes, list):
nodes = [nodes]
for node in nodes:
if node.Class() == "Viewer" or has_ada_tab(node):
continue
ada_tab = nuke.Tab_Knob("ada", "Ada")
bake_knobs_boolean = nuke.Boolean_Knob("bake_knobs", " ")
bake_knobs_boolean.setValue(False)
bake_knobs_boolean.setTooltip("bake knobs to bake")
bake_knobs_boolean.setFlag(nuke.STARTLINE)
knobs_to_bake_string = nuke.EvalString_Knob(
"knobs_to_bake", "knobs to bake "
)
knobs_to_bake_string.clearFlag(nuke.STARTLINE)
knobs_to_bake_string.setTooltip(
"comma-separated list of knobs to bake, or values "
"to assign. eg: 'value=10, file=[pcrn input 1]'"
)
set_knobs_boolean = nuke.Boolean_Knob("set_knobs", " ")
set_knobs_boolean.setValue(False)
set_knobs_boolean.setTooltip("set knobs to bake")
set_knobs_boolean.setFlag(nuke.STARTLINE)
knobs_to_set_string = nuke.EvalString_Knob(
"knobs_to_set", "knobs to set "
)
knobs_to_set_string.clearFlag(nuke.STARTLINE)
knobs_to_set_string.setTooltip(
"assign value. eg: 'value=10, file=[pcrn input 1]'"
)
execute_buttons_boolean = nuke.Boolean_Knob("execute_knobs", " ")
execute_buttons_boolean.setValue(False)
execute_buttons_boolean.setTooltip("execute knobs/buttons")
execute_buttons_boolean.setFlag(nuke.STARTLINE)
execute_buttons_string = nuke.EvalString_Knob(
"knobs_to_execute", "knobs to execute "
)
execute_buttons_string.clearFlag(nuke.STARTLINE)
execute_buttons_string.setTooltip(
"comma-separated list of knobs (buttons) to execute()"
)
exeucte_code_boolean = nuke.Boolean_Knob("execute_code", " ")
exeucte_code_boolean.setValue(False)
exeucte_code_boolean.setTooltip("run the code to exec")
exeucte_code_boolean.setFlag(nuke.STARTLINE)
execute_code_string = nuke.Multiline_Eval_String_Knob(
"code_to_execute", "code to execute "
)
execute_code_string.setTooltip(
"python code to exec()\nnuke.thisNode() " "is available to the code"
)
execute_code_string.clearFlag(nuke.STARTLINE)
queue_order_int = nuke.Int_Knob("queue_order", "queue order")
queue_order_int.clearFlag(nuke.STARTLINE)
queue_order_int.setTooltip(
"Nodes are baked from the lowest order to the "
"highest. Default value is 0"
)
do_not_bake_boolean = nuke.Boolean_Knob(
"do_not_bake", " do not bake this node "
)
do_not_bake_boolean.setValue(False)
do_not_bake_boolean.setTooltip("do not bake this node")
do_not_bake_boolean.setFlag(nuke.STARTLINE)
knobs_to_serialise = nuke.Multiline_Eval_String_Knob(
"knobs_to_serialise", "knobs to serialise"
)
knobs_to_serialise.setTooltip(
"these knobs will be saved with the template and "
"then can be set externally"
)
node.addKnob(ada_tab)
node.addKnob(bake_knobs_boolean)
node.addKnob(knobs_to_bake_string)
node.addKnob(set_knobs_boolean)
node.addKnob(knobs_to_set_string)
node.addKnob(execute_buttons_boolean)
node.addKnob(execute_buttons_string)
node.addKnob(exeucte_code_boolean)
node.addKnob(execute_code_string)
node.addKnob(do_not_bake_boolean)
node.addKnob(queue_order_int)
node.addKnob(knobs_to_serialise)
try:
node.knob("autolabel").setValue(
"__import__('ada.nuke.utils', fromlist=['autolabel']).autolabel()"
)
except ImportError:
pass
# set knob changed on the newly created ada node
kc = node.knob("knobChanged")
cur_kc = kc.value()
new_kc = "{}\n{}".format(cur_kc, ADA_KNOB_CHANGED)
kc.setValue(new_kc)
node.knob("icon").setValue("ada_raw.png") | 26,926 |
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths} | 26,927 |
def tall_clutter(files, config,
clutter_thresh_min=0.0002,
clutter_thresh_max=0.25, radius=1,
max_height=2000., write_radar=True,
out_file=None, use_dask=False):
"""
Wind Farm Clutter Calculation
Parameters
----------
files : list
List of radar files used for the clutter calculation.
config : str
String representing the configuration for the radar.
Such possible configurations are listed in default_config.py
Other Parameters
----------------
clutter_thresh_min : float
Threshold value for which, any clutter values above the
clutter_thres_min will be considered clutter, as long as they
are also below the clutter_thres_max.
clutter_thresh_max : float
Threshold value for which, any clutter values below the
clutter_thres_max will be considered clutter, as long as they
are also above the clutter_thres_min.
radius : int
Radius of the area surrounding the clutter gate that will
be also flagged as clutter.
max_height: float
Maximum height above the radar to mark a gate as clutter.
write_radar : bool
Whether to or not, to write the clutter radar as a netCDF file.
Default is True.
out_file : string
String of location and filename to write the radar object too,
if write_radar is True.
use_dask : bool
Use dask instead of running stats for calculation. The will reduce
run time.
Returns
-------
clutter_radar : Radar
Radar object with the clutter field that was calculated.
This radar only has the clutter field, but maintains all
other radar specifications.
"""
field_names = get_field_names(config)
refl_field = field_names["reflectivity"]
vel_field = field_names["velocity"]
ncp_field = field_names["normalized_coherent_power"]
def get_reflect_array(file, first_shape):
""" Retrieves a reflectivity array for a radar volume. """
try:
radar = pyart.io.read(file, include_fields=[refl_field,
ncp_field, vel_field])
reflect_array = deepcopy(radar.fields[refl_field]['data'])
ncp = radar.fields[ncp_field]['data']
height = radar.gate_z["data"]
up_in_the_air = height > max_height
the_mask = np.logical_or.reduce(
(ncp < 0.9, reflect_array.mask, up_in_the_air))
reflect_array = np.ma.masked_where(the_mask, reflect_array)
del radar
if reflect_array.shape == first_shape:
return reflect_array.filled(fill_value=np.nan)
except(TypeError, OSError):
print(file + ' is corrupt...skipping!')
return np.nan*np.zeros(first_shape)
if use_dask is False:
run_stats = _RunningStats()
first_shape = 0
for file in files:
try:
radar = pyart.io.read(file)
reflect_array = radar.fields[refl_field]['data']
ncp = deepcopy(radar.fields[ncp_field]['data'])
height = radar.gate_z["data"]
reflect_array = np.ma.masked_where(
np.logical_or(height > max_height, ncp < 0.8),
reflect_array)
if first_shape == 0:
first_shape = reflect_array.shape
clutter_radar = radar
run_stats.push(reflect_array)
if reflect_array.shape == first_shape:
run_stats.push(reflect_array)
del radar
except(TypeError, OSError):
print(file + ' is corrupt...skipping!')
continue
mean = run_stats.mean()
stdev = run_stats.standard_deviation()
clutter_values = stdev / mean
clutter_values = np.ma.masked_invalid(clutter_values)
clutter_values_no_mask = clutter_values.filled(
clutter_thresh_max + 1)
else:
cluster = LocalCluster(n_workers=20, processes=True)
client = Client(cluster)
first_shape = 0
i = 0
while first_shape == 0:
try:
radar = pyart.io.read(files[i])
reflect_array = radar.fields[refl_field]['data']
first_shape = reflect_array.shape
clutter_radar = radar
except(TypeError, OSError):
i = i + 1
print(file + ' is corrupt...skipping!')
continue
arrays = [delayed(get_reflect_array)(file, first_shape)
for file in files]
array = [da.from_delayed(a, shape=first_shape, dtype=float)
for a in arrays]
array = da.stack(array, axis=0)
print('## Calculating mean in parallel...')
mean = np.array(da.nanmean(array, axis=0))
print('## Calculating standard deviation...')
count = np.array(da.sum(da.isfinite(array), axis=0))
stdev = np.array(da.nanstd(array, axis=0))
clutter_values = stdev / mean
clutter_values = np.ma.masked_invalid(clutter_values)
clutter_values = np.ma.masked_where(np.logical_or(
clutter_values.mask, count < 20), clutter_values)
# Masked arrays can suck
clutter_values_no_mask = clutter_values.filled(
(clutter_thresh_max + 1))
shape = clutter_values.shape
mask = np.ma.getmask(clutter_values)
is_clutters = np.argwhere(
np.logical_and.reduce((clutter_values_no_mask > clutter_thresh_min,
clutter_values_no_mask < clutter_thresh_max,
)))
clutter_array = _clutter_marker(is_clutters, shape, mask, radius)
clutter_radar.fields.clear()
clutter_array = clutter_array.filled(0)
clutter_dict = _clutter_to_dict(clutter_array)
clutter_value_dict = _clutter_to_dict(clutter_values)
clutter_value_dict["long_name"] = "Clutter value (std. dev/mean Z)"
clutter_radar.add_field('ground_clutter', clutter_dict,
replace_existing=True)
clutter_radar.add_field('clutter_value', clutter_value_dict,
replace_existing=True)
if write_radar is True:
pyart.io.write_cfradial(out_file, clutter_radar)
del clutter_radar
return | 26,928 |
def serialize_bundle7(source_eid, destination_eid, payload,
report_to_eid=None, crc_type_primary=CRCType.CRC32,
creation_timestamp=None, sequence_number=None,
lifetime=300, flags=BlockProcFlag.NONE,
fragment_offset=None, total_adu_length=None,
hop_limit=30, hop_count=0, bundle_age=0,
previous_node_eid=None,
crc_type_canonical=CRCType.CRC16):
"""All-in-one function to encode a payload from a source EID
to a destination EID as BPbis bundle.
See create_bundle7 for a description of options."""
return bytes(create_bundle7(
source_eid, destination_eid, payload,
report_to_eid, crc_type_primary,
creation_timestamp, sequence_number,
lifetime, flags,
fragment_offset, total_adu_length,
hop_limit, hop_count, bundle_age,
previous_node_eid,
crc_type_canonical
)) | 26,929 |
def _get_chinese_week(localtime):
"""获取星期和提醒"""
chinese_week = ["一", "二", "三", "四", "五", "六", "日"]
tm_w_day = localtime.tm_wday
extra_msg = "<green>当前正是周末啦~</green>" if tm_w_day in [5, 6] else "Other"
if extra_msg == "Other":
go_week = 4 - tm_w_day
extra_msg = f"<yellow>还有 {go_week} 天周末</yellow>" if go_week != 0 else "<blue>明天就是周末啦~坚持摸鱼~</blue>"
return chinese_week[tm_w_day], extra_msg | 26,930 |
def resnext101_32x16d_swsl(cfg, progress=True, **kwargs):
"""Constructs a semi-weakly supervised ResNeXt-101 32x16 model pre-trained on 1B weakly supervised
image dataset and finetuned on ImageNet.
`"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>`_
Args:
progress (bool): If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 16
return _resnext(semi_weakly_supervised_model_urls['resnext101_32x16d'], Bottleneck,
[3, 4, 23, 3], cfg.pretrained, progress, **kwargs) | 26,931 |
def leaderboard(players=None, N=DEFAULTN, filename="leaderboard.txt"):
""" Create a leaderboard, and optionally save it to a file """
logger.info("Generating a leaderboard for players: %r, N=%d", players, N)
ratings, allgames, players = get_ratings(players, N)
board, table = make_leaderboard(ratings, allgames, players)
print table
if filename:
logger.info("Saving leaderboard to file: %s", filename)
with open(filename,"w") as f:
f.write(table)
f.write('\n')
return board, table | 26,932 |
def m_college_type(seq):
"""
获取学校的类型信息
当学校的类型是985,211工程院校时:
:param seq:【“985,211工程院校”,“本科”】
:return:“985工程院校”
当学校的类型是211工程院校时:
:param seq:【“211工程院校”,“硕士”】
:return:“211工程院校”
当学校的类型是普通本科或者专科时:
如果获取的某人的学历信息是博士、硕士和本科时
输出的学校类型为普通本科
:param seq:【“****”,“硕士”】
:return:“普通本科”
如果获取的某个人的学历信息时专科时:
输出的学校类型为专科
:param seq:【“****”,“专科”】
:return:“专科”
"""
if "985" in seq[0]:
tmp = "985,211工程院校"
return tmp
elif "211" in seq[0] and "985" not in seq[0]:
tmp = "211工程院校"
return tmp
else:
if seq[1] in ["博士", "硕士", "本科"]:
tmp = "本科"
return tmp
else:
tmp = "专科"
return tmp | 26,933 |
def get_raster_wcs(coordinates, geographic=True, layer=None):
"""Return a subset of a raster image from the local GeoServer via WCS 2.0.1 protocol.
For geoggraphic rasters, subsetting is based on WGS84 (Long, Lat) boundaries. If not geographic, subsetting based
on projected coordinate system (Easting, Northing) boundries.
Parameters
----------
coordinates : sequence
Geographic coordinates of the bounding box (left, down, right, up)
geographic : bool
If True, uses "Long" and "Lat" in WCS call. Otherwise uses "E" and "N".
layer : str
Layer name of raster exposed on GeoServer instance. E.g. 'public:CEC_NALCMS_LandUse_2010'
Returns
-------
bytes
A GeoTIFF array.
"""
from owslib.wcs import WebCoverageService
from lxml import etree
(left, down, right, up) = coordinates
if geographic:
x, y = 'Long', 'Lat'
else:
x, y = 'E', 'N'
wcs = WebCoverageService('http://boreas.ouranos.ca/geoserver/ows', version='2.0.1')
try:
resp = wcs.getCoverage(identifier=[layer, ],
format='image/tiff',
subsets=[(x, left, right), (y, down, up)])
except Exception as e:
raise Exception(e)
data = resp.read()
try:
etree.fromstring(data)
# The response is an XML file describing the server error.
raise ChildProcessError(data)
except etree.XMLSyntaxError:
# The response is the DEM array.
return data | 26,934 |
def del_local_name(*args):
"""
del_local_name(ea) -> bool
"""
return _ida_name.del_local_name(*args) | 26,935 |
def solve_google_pdp(data):
"""Entry point of the program."""
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),
data['num_vehicles'], data['depot'])
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
# Define cost of each arc.
def distance_callback(from_index, to_index):
"""Returns the manhattan distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return data['distance_matrix'][from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# Add Distance constraint.
dimension_name = 'Distance'
routing.AddDimension(
transit_callback_index,
0, # no slack
3000, # vehicle maximum travel distance
True, # start cumul to zero
dimension_name)
distance_dimension = routing.GetDimensionOrDie(dimension_name)
# Define Transportation Requests.
for request in data['pickups_deliveries']:
pickup_index = manager.NodeToIndex(request[0])
delivery_index = manager.NodeToIndex(request[1])
routing.AddPickupAndDelivery(pickup_index, delivery_index)
routing.solver().Add(
routing.VehicleVar(pickup_index) == routing.VehicleVar(
delivery_index))
routing.solver().Add(
distance_dimension.CumulVar(pickup_index) <=
distance_dimension.CumulVar(delivery_index))
# Setting first solution heuristic.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION)
search_parameters.local_search_metaheuristic = (
routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)
search_parameters.time_limit.seconds = 30
search_parameters.log_search = True # Turn on Log for Algorithms
assignment = routing.SolveWithParameters(search_parameters)
g_result = meta.Chromosome(_instance)
g_result.genes = []
if assignment:
total_distance = 0
for vehicle_id in range(data['num_vehicles']):
index = routing.Start(vehicle_id)
plan_output = 'Route for vehicle {}:\n'.format(vehicle_id)
route_distance = 0
while not routing.IsEnd(index):
plan_output += ' {} -> '.format(manager.IndexToNode(index))
previous_index = index
index = assignment.Value(routing.NextVar(index)) # Input Tasks
if manager.IndexToNode(index) != 0:
g_result.genes.append([manager.IndexToNode(index), vehicle_id + 1, False])
route_distance += routing.GetArcCostForVehicle(
previous_index, index, vehicle_id)
plan_output += '{}\n'.format(manager.IndexToNode(index))
plan_output += 'Distance of the route: {}m\n'.format(route_distance)
print(plan_output)
total_distance += route_distance
print('Total Distance of all routes: {}m'.format(total_distance))
meta.evaluate(g_result)
return g_result | 26,936 |
def run ():
"""Run all clustering methods."""
_run('KMeans', 'k')
_run('Hierarchical', 'merges') | 26,937 |
def fetch_net(args: Any,
num_tasks: int,
num_cls: int,
dropout: float = 0.3):
"""
Create a nearal network to train
"""
if "mnist" in args.dataset:
inp_chan = 1
pool = 2
l_size = 80
elif args.dataset == "mini_imagenet":
inp_chan = 3
pool = 3
l_size = 320
elif "cifar" in args.dataset:
inp_chan = 3
pool = 2
l_size = 320
else:
raise NotImplementedError
if args.model == "wrn16_4":
net = WideResNetMultiTask(depth=16, num_task=num_tasks,
num_cls=num_cls, widen_factor=4,
drop_rate=dropout, inp_channels=inp_chan)
elif args.model == "conv":
net = SmallConv(num_task=num_tasks, num_cls=num_cls,
channels=inp_chan, avg_pool=pool,
lin_size=l_size)
else:
raise ValueError("Invalid network")
if args.gpu:
net.cuda()
return net | 26,938 |
def are_models_specified(api_spec: Dict) -> bool:
"""
Checks if models have been specified in the API spec (cortex.yaml).
Args:
api_spec: API configuration.
"""
predictor_type = predictor_type_from_api_spec(api_spec)
if predictor_type == PythonPredictorType and api_spec["predictor"]["multi_model_reloading"]:
models = api_spec["predictor"]["multi_model_reloading"]
elif predictor_type != PythonPredictorType:
models = api_spec["predictor"]["models"]
else:
return False
return models is not None | 26,939 |
def _progress_bar(
current: Union[int, float], total: Union[int, float], width: int = 60
) -> None:
"""
Custom progress bar for wget downloads.
:param current: bytes downloaded so far
:type current: Union[int, float]
:param total: Total size of download in bytes or megabytes
:type total: Union[int, float]
:param width: Progress bar width in chars, defaults to 60
:type width: int, optional
"""
unit = "bytes"
# Show file size in MB for large files
if total >= 100000:
MB = 1024 * 1024
current = current / MB
total = total / MB
unit = "MB"
progress = current / total
progress_message = f"Progress: \
{progress:.0%} [{current:.1f} / {total:.1f}] {unit}"
sys.stdout.write("\r" + progress_message)
sys.stdout.flush() | 26,940 |
def projects(
prospect_projects,
assign_pm_projects,
active_projects,
verify_win_projects,
won_projects,
):
"""A number of projects at different stages associated to an adviser."""
pass | 26,941 |
def user_config(filename):
"""user-provided configuration file"""
try:
with open(filename) as file:
return json.loads(file.read(None))
except FileNotFoundError as fnf:
raise RuntimeError(f"File '{filename}' could not be found") from fnf
except json.JSONDecodeError as jsond:
raise RuntimeError(f"Error while parsing '{filename}'") from jsond | 26,942 |
def _decomposed_dilated_conv2d(x, kernel_size, num_o, dilation_factor, name, top_scope, biased=False):
"""
Decomposed dilated conv2d without BN or relu.
"""
# padding so that the input dims are multiples of dilation_factor
H = tf.shape(x)[1]
W = tf.shape(x)[2]
pad_bottom = (dilation_factor - H % dilation_factor) if H % dilation_factor != 0 else 0
pad_right = (dilation_factor - W % dilation_factor) if W % dilation_factor != 0 else 0
pad = [[0, pad_bottom], [0, pad_right]]
# decomposition to smaller-sized feature maps
# [N,H,W,C] -> [N*d*d, H/d, W/d, C]
o = tf.space_to_batch(x, paddings=pad, block_size=dilation_factor)
# perform regular conv2d
num_x = x.shape[3].value
with tf.variable_scope(name) as scope:
w = tf.get_variable('weights', shape=[kernel_size, kernel_size, num_x, num_o])
s = [1, 1, 1, 1]
o = tf.nn.conv2d(o, w, s, padding='SAME')
if biased:
b = tf.get_variable('biases', shape=[num_o])
o = tf.nn.bias_add(o, b)
o = tf.batch_to_space(o, crops=pad, block_size=dilation_factor)
return o | 26,943 |
def _sign_model(fout):
"""
Write signature of the file in Facebook's native fastText `.bin` format
to the binary output stream `fout`. Signature includes magic bytes and version.
Name mimics original C++ implementation, see
[FastText::signModel](https://github.com/facebookresearch/fastText/blob/master/src/fasttext.cc)
Parameters
----------
fout: writeable binary stream
"""
fout.write(_FASTTEXT_FILEFORMAT_MAGIC.tobytes())
fout.write(_FASTTEXT_VERSION.tobytes()) | 26,944 |
def enable_traceback():
""" disables tracebacks from being added to exception raises """
tb_controls.enable_traceback() | 26,945 |
async def post_autodaily(text_channel: TextChannel, latest_message_id: int, change_mode: bool, current_daily_message: str, current_daily_embed: Embed, utc_now: datetime.datetime) -> Tuple[bool, bool, Message]:
"""
Returns (posted, can_post, latest_message)
"""
posted = False
if text_channel and current_daily_message:
error_msg_delete = f'could not delete message [{latest_message_id}] from channel [{text_channel.id}] on guild [{text_channel.guild.id}]'
error_msg_edit = f'could not edit message [{latest_message_id}] from channel [{text_channel.id}] on guild [{text_channel.guild.id}]'
error_msg_post = f'could not post a message in channel [{text_channel.id}] on guild [{text_channel.guild.id}]'
post_new = change_mode != server_settings.AutoDailyChangeMode.EDIT
can_post = True
latest_message: Message = None
use_embeds = await server_settings.get_use_embeds(None, bot=BOT, guild=text_channel.guild)
if use_embeds:
colour = utils.discord.get_bot_member_colour(BOT, text_channel.guild)
embed = current_daily_embed.copy()
embed.colour = colour
else:
embed = None
if can_post:
can_post, latest_message = await daily_fetch_latest_message(text_channel, latest_message_id)
if can_post:
if latest_message and latest_message.created_at.day == utc_now.day:
latest_message_id = latest_message.id
if change_mode == server_settings.AutoDailyChangeMode.DELETE_AND_POST_NEW:
try:
deleted = await utils.discord.try_delete_message(latest_message)
if deleted:
latest_message = None
utils.dbg_prnt(f'[post_autodaily] deleted message [{latest_message_id}] from channel [{text_channel.id}] on guild [{text_channel.guild.id}]')
else:
print(f'[post_autodaily] could not delete message [{latest_message_id}] from channel [{text_channel.id}] on guild [{text_channel.guild.id}]')
except errors.NotFound:
print(f'[post_autodaily] {error_msg_delete}: the message could not be found')
except errors.Forbidden:
print(f'[post_autodaily] {error_msg_delete}: the bot doesn\'t have the required permissions.')
can_post = False
except Exception as err:
print(f'[post_autodaily] {error_msg_delete}: {err}')
can_post = False
elif change_mode == server_settings.AutoDailyChangeMode.EDIT:
try:
if use_embeds:
await latest_message.edit(embed=embed)
else:
await latest_message.edit(content=current_daily_message)
posted = True
utils.dbg_prnt(f'[post_autodaily] edited message [{latest_message_id}] in channel [{text_channel.id}] on guild [{text_channel.guild.id}]')
except errors.NotFound:
print(f'[post_autodaily] {error_msg_edit}: the message could not be found')
except errors.Forbidden:
print(f'[post_autodaily] {error_msg_edit}: the bot doesn\'t have the required permissions.')
can_post = False
except Exception as err:
print(f'[post_autodaily] {error_msg_edit}: {err}')
can_post = False
else:
post_new = True
if not posted and can_post and post_new:
try:
if use_embeds:
latest_message = await text_channel.send(embed=embed)
else:
latest_message = await text_channel.send(current_daily_message)
posted = True
utils.dbg_prnt(f'[post_autodaily] posted message [{latest_message.id}] in channel [{text_channel.id}] on guild [{text_channel.guild.id}]')
except errors.Forbidden:
print(f'[post_autodaily] {error_msg_post}: the bot doesn\'t have the required permissions.')
can_post = False
except Exception as err:
print(f'[post_autodaily] {error_msg_post}: {err}')
can_post = False
else:
can_post = False
if latest_message:
return posted, can_post, latest_message
else:
return posted, can_post, None
else:
return posted, None, None | 26,946 |
def room_from_loc(env, loc):
"""
Get the room coordinates for a given location
"""
if loc == 'north':
return (1, 0)
if loc == 'south':
return (1, 2)
if loc == 'west':
return (0, 1)
if loc == 'east':
return (2, 1)
if loc == 'left':
return (1, 0)
if loc == 'right':
return (1, 2)
if loc == 'front':
return (2, 1)
if loc == 'behind':
return (0, 1)
# By default, use the central room
return (1, 1) | 26,947 |
def count_encoder(df, cols):
"""count encoding
Args:
df: カテゴリ変換する対象のデータフレーム
cols (list of str): カテゴリ変換する対象のカラムリスト
Returns:
pd.Dataframe: dfにカテゴリ変換したカラムを追加したデータフレーム
"""
out_df = pd.DataFrame()
for c in cols:
series = df[c]
vc = series.value_counts(dropna=False)
_df = pd.DataFrame(df[c].map(vc))
out_df = pd.concat([out_df, _df], axis=1)
out_df = out_df.add_suffix('_count_enc')
return pd.concat([df, out_df], axis=1) | 26,948 |
def main():
"""
Run the script.
Read config, create tasks for all sites using all search_for_files
patterns, start threads, write out_file.
"""
args = parse_args()
config = yaml.load(stream=args.config_file)
for site in set(config['sites']):
# ensure we don't check a site twice
# sets also scramble entries. It's ok if the sites will be scrambled,
# because so one slow site does not slow down all threads simultaniously
# and maybe we can trick DOS prevention mechanisms.
domain = DOMAIN_REGEX.match(site).groupdict()['domain']
RESULTS[domain] = [] # empty list for each domain to store the results
if not site.endswith('/'):
site += '/'
for file_name in set(config.get('search_for_files', [])):
# ensure we don't check a file twice
TASK_QUEUE.put(
Task(
task_type=TASK_TYPES.GET,
url=site + file_name.format(domain=domain),
)
)
for host_name in set(config.get('fake_host_names', [])):
# ensure we don't check a host twice
TASK_QUEUE.put(
Task(
task_type=TASK_TYPES.HOST,
url=site,
args={'host_name': host_name.format(domain=domain)},
)
)
global TASK_COUNT
TASK_COUNT = TASK_QUEUE.qsize()
threads = []
for _ in range(THREAD_COUNT):
thread = threading.Thread(
target=worker,
daemon=True,
kwargs={
'verbosity': args.verbose,
'progress': not args.no_progress,
'auto_save_interval': 100,
'out_file': None if args.no_auto_save or args.out_file.isatty() else args.out_file,
}
)
threads.append(thread)
thread.start()
TASK_QUEUE.put(Task(
task_type=TASK_TYPES.END # add one END task per thread at end of queue
))
for thread in threads:
thread.join()
if not TASK_QUEUE.empty():
error('[x] Exiting due to exception in thread')
exit(1)
save(args.out_file) | 26,949 |
def wrap_response(response):
"""Wrap a tornado response as an open api response"""
mimetype = response.headers.get('Content-Type') or 'application/json'
return OpenAPIResponse(
data=response.body,
status_code=response.code,
mimetype=mimetype,
) | 26,950 |
def shifted(x):
"""Shift x values to the range [-0.5, 0.5)"""
return -0.5 + (x + 0.5) % 1 | 26,951 |
def computeAlignmentError(pP1, pP2, etype = 2, doPlot = False):
"""
Compute area-based alignment error. Assume that the
warping paths are on the same grid
:param pP1: Mx2 warping path 1
:param pP2: Nx2 warping path 2
:param etype: Error type. 1 (default) is area ratio.
2 is L1 Hausdorff distance
:param doPlot: Whether to plot the results
"""
P1 = rasterizeWarpingPath(pP1)
P2 = rasterizeWarpingPath(pP2)
score = 0
if etype == 1:
M = np.max(P1[:, 0])
N = np.max(P1[:, 1])
A1 = np.zeros((M, N))
A2 = np.zeros((M, N))
for i in range(P1.shape[0]):
[ii, jj] = [P1[i, 0], P1[i, 1]]
[ii, jj] = [min(ii, M-1), min(jj, N-1)]
A1[ii, jj::] = 1.0
for i in range(P2.shape[0]):
[ii, jj] = [P2[i, 0], P2[i, 1]]
[ii, jj] = [min(ii, M-1), min(jj, N-1)]
A2[ii, jj::] = 1.0
A = np.abs(A1 - A2)
score = np.sum(A)/(float(M)*float(N))
if doPlot:
plt.imshow(A)
plt.hold(True)
plt.scatter(pP1[:, 1], pP1[:, 0], 5, 'c', edgecolor = 'none')
plt.scatter(pP2[:, 1], pP2[:, 0], 5, 'r', edgecolor = 'none')
plt.title("Score = %g"%score)
else:
C = getCSM(np.array(P1, dtype = np.float32), np.array(P2, dtype = np.float32))
score = (np.sum(np.min(C, 0)) + np.sum(np.min(C, 1)))/float(P1.shape[0]+P2.shape[0])
if doPlot:
plt.scatter(P1[:, 1], P1[:, 0], 20, 'c', edgecolor = 'none')
plt.scatter(P2[:, 1], P2[:, 0], 20, 'r', edgecolor = 'none')
idx = np.argmin(C, 1)
for i in range(len(idx)):
plt.plot([P1[i, 1], P2[idx[i], 1]], [P1[i, 0], P2[idx[i], 0]], 'k')
plt.title("Score = %g"%score)
return score | 26,952 |
def _cumulative_grad(grad_sum, grad):
"""Apply grad sum to cumulative gradient."""
add = ops.AssignAdd()
return add(grad_sum, grad) | 26,953 |
def run_node(node):
"""Python multiprocessing works strangely in windows. The pool function needed to be
defined globally
Args:
node (Node): Node to be called
Returns:
rslts: Node's call output
"""
return node.run_with_loaded_inputs() | 26,954 |
def getitimer(space, which):
"""getitimer(which)
Returns current value of given itimer.
"""
with lltype.scoped_alloc(itimervalP.TO, 1) as old:
c_getitimer(which, old)
return itimer_retval(space, old[0]) | 26,955 |
def substitute_T_and_RH_for_interpolated_dataset(dataset):
"""
Input :
dataset : Dataset interpolated along height
Output :
dataset : Original dataset with new T and RH
Function to remove interoplated values of T and RH in the original dataset and
replace with new values of T and RH,
calculated from values of interpolated theta and q, respetively
"""
T = f3.calc_T_from_theta(dataset)
rh = f3.calc_rh_from_q(dataset, T=T)
dataset["ta"] = (dataset.p.dims, T)
dataset["rh"] = (dataset.p.dims, rh.values)
return dataset | 26,956 |
def get_base_required_fields():
""" Get required fields for base asset from UI.
Fields required for update only: 'id', 'uid', ['lastModifiedTimestamp', 'location', 'events', 'calibration']
Present in input, not required for output:
'coordinates', 'hasDeploymentEvent', 'augmented', 'deployment_numbers', 'deployment_number',
'Ref Des', 'depth',
2016-08-24: removed 'coordinates'
2016-08-26: removed 'augmented', 'Ref Des', 'remoteDocuments', 'hasDeploymentEvent',
2016-09-26: removed 'tense',
2016-10-11: removed 'tense',
"""
base_required_fields = [
'assetInfo',
'assetType',
'dataSource',
'deployment_numbers',
'deployment_number',
'depth',
'editPhase',
'latitude',
'longitude',
'manufactureInfo',
'mobile',
'notes',
'orbitRadius',
'partData',
'physicalInfo',
'purchaseAndDeliveryInfo',
'ref_des',
'remoteResources',
'uid'
]
return base_required_fields | 26,957 |
def reg_tab_ext(*model):
""" Performs weighted linear regression for various models building upon the model specified in section 4,
while additionally including education levels of a council candidate (university degree, doctoral/PhD degree)
A single model (i.e. function argument) takes on the form:
model=[df,polynomial, bw, dependant variable, bandwidth-type]
df: dataframe containing all relevant data
polynomial (str): "quadratic" includes quadratic values of "margin_1" and "inter_1" in regressionmodel;
default is "linear"
bw (float): specifying data to be included relative to cut-off point ("margin_1"=0)
dependant variable (str): name of dependant variable
bandwidth-type (str): method used to calculate bandwidth
:return: df containing results of regression
"""
# pd.set_option('mode.chained_assignment', None)
table = pd.DataFrame(
{'Model': [], 'Female Mayor': [], 'Std.err_Female Mayor': [], 'University': [], 'Std.err_University': [],
'PhD': [], 'Std.err_PhD': [], 'Bandwidth type': [], 'Bandwidth size': [], 'Polynomial': [],
'Observations': [], 'Elections': [], 'Municipalities': [],
'Mean': [], 'Std.err (Mean)': []})
table = table.set_index(['Model'])
for counter, i in enumerate(model):
data_i = subset_by_margin(i[0], i[2])
weight(data_i, i[2])
y = data_i[i[3]]
w = data_i["weight" + str(i[2]) + ""]
x = data_i[["female_mayor", "margin_1", "inter_1", 'university', 'phd']]
polynomial_i = str("Linear")
if i[1] == "quadratic":
x = data_i[["female_mayor", "margin_1", "inter_1", 'university', 'phd', "margin_2", "inter_2"]]
polynomial_i = str("Quadratic")
x = sm_api.add_constant(x)
wls_model = sm_api.WLS(y, x, missing='drop', weights=w)
results = wls_model.fit(cov_type='cluster', cov_kwds={'groups': data_i["gkz"]})
betas = [1, 2, 3]
cov = ["female_mayor", 'university', 'phd']
for j in cov:
betas[cov.index(j)] = significance_level(results.pvalues[j], results.params[(cov.index(j) + 1)].round(3))
bw_size_i = str(round(i[2], 2))
bw_type_i = str(i[4])
output = [betas[0], results.bse[1], betas[1], results.bse[4], betas[2], results.bse[5], bw_type_i, bw_size_i,
polynomial_i, results.nobs,
data_i["gkz_jahr"].value_counts().count(),
data_i["gkz"].value_counts().count(), y.mean().round(2), np.std(y)]
table.loc["(" + str(counter + 1) + ")"] = output
table = table.round(3)
return table | 26,958 |
def load_pdf(filename: str) -> pd.DataFrame:
""" Read PDF dataset to pandas dataframe """
tables = tabula.read_pdf(basedir + '\\' + filename, pages="all")
merged_tables = pd.concat(tables[1:])
merged_tables.head()
return merged_tables | 26,959 |
def he_xavier(in_size: int, out_size: int, init_only=False):
"""
Xavier initialization according to Kaiming He in:
*Delving Deep into Rectifiers: Surpassing Human-Level
Performance on ImageNet Classification
(https://arxiv.org/abs/1502.01852)
"""
stddev = tf.cast(tf.sqrt(2 / in_size), tf.float32)
W = tf.random_normal([in_size, out_size], stddev=stddev)
b = tf.zeros([out_size])
if init_only:
return W, b
return tf.Variable(W, name="weights"), tf.Variable(b, name="biases") | 26,960 |
def critical(message):
"""
Shorthand for logging a message with kCritical severity.
"""
log(message, kCritical) | 26,961 |
def GET_v1_keyboards_build_log():
"""Returns a dictionary of keyboard/layout pairs. Each entry is a dictionary with the following keys:
* `works`: Boolean indicating whether the compile was successful
* `message`: The compile output for failed builds
"""
json_data = qmk_redis.get('qmk_api_configurator_status')
return jsonify(json_data) | 26,962 |
def logprod(lst):
"""Computes the product of a list of numbers"""
return sum(log(i) for i in lst) | 26,963 |
def train(cfg_file: str) -> None:
"""
Main training function. After training, also test on test data if given.
:param cfg_file: path to configuration yaml file
"""
cfg = load_config(cfg_file)
# set the random seed
set_seed(seed=cfg["training"].get("random_seed", 42))
# get model architecture
architecture = cfg["model"].get("architecture", "encoder-decoder")
if architecture not in ["encoder-decoder", "unsupervised-nmt"]:
raise ConfigurationError("Supported architectures: 'encoder-decoder' and 'unsupervised-nmt'")
# original JoeyNMT code for vanilla encoder-decoder model
if architecture == "encoder-decoder":
# load the data
train_data, dev_data, test_data, src_vocab, trg_vocab = \
load_data(data_cfg=cfg["data"])
# build an encoder-decoder model
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
# for training management, e.g. early stopping and model selection
trainer = TrainManager(model=model, config=cfg)
# store copy of original training config in model dir
shutil.copy2(cfg_file, trainer.model_dir + "/config.yaml")
# log all entries of config
log_cfg(cfg, trainer.logger)
log_data_info(train_data=train_data, valid_data=dev_data,
test_data=test_data, src_vocab=src_vocab, trg_vocab=trg_vocab,
logging_function=trainer.logger.info)
trainer.logger.info(str(model))
# store the vocabs
src_vocab_file = "{}/src_vocab.txt".format(cfg["training"]["model_dir"])
src_vocab.to_file(src_vocab_file)
trg_vocab_file = "{}/trg_vocab.txt".format(cfg["training"]["model_dir"])
trg_vocab.to_file(trg_vocab_file)
# train the model
trainer.train_and_validate(train_data=train_data, valid_data=dev_data)
# predict with the best model on validation and test
# (if test data is available)
ckpt = "{}/{}.ckpt".format(trainer.model_dir, trainer.best_ckpt_iteration)
output_name = "{:08d}.hyps".format(trainer.best_ckpt_iteration)
output_path = os.path.join(trainer.model_dir, output_name)
test(cfg_file, ckpt=ckpt, output_path=output_path, logger=trainer.logger)
else:
# Unsupervised NMT model training
# load the data
src2src, trg2trg, BTsrc, BTtrg, \
dev_src2trg, dev_trg2src, \
test_src2trg, test_trg2src, \
src_vocab, trg_vocab,\
fields = load_unsupervised_data(data_cfg=cfg["data"])
# build an unsupervised NMT model
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
# for training management of unsupervised NMT model
trainer = UnsupervisedNMTTrainManager(model=model, config=cfg, fields=fields)
# store copy of original training config in model dir
shutil.copy2(cfg_file, trainer.model_dir + "/config.yaml")
# log all entries of config
log_cfg(cfg, trainer.logger)
# log information on data
log_unsupervised_data_info(src2src=src2src, trg2trg=trg2trg, BTsrc=BTsrc, BTtrg=BTtrg,
dev_src2trg=dev_src2trg, dev_trg2src=dev_trg2src,
test_src2trg=test_src2trg, test_trg2src=test_trg2src,
src_vocab=src_vocab, trg_vocab=trg_vocab,
logging_function=trainer.logger.info)
# log model
trainer.logger.info(str(model))
# store the vocabs
src_vocab_file = "{}/src_vocab.txt".format(cfg["training"]["model_dir"])
src_vocab.to_file(src_vocab_file)
trg_vocab_file = "{}/trg_vocab.txt".format(cfg["training"]["model_dir"])
trg_vocab.to_file(trg_vocab_file)
# train the model
trainer.train_and_validate(src2src, trg2trg, BTsrc, BTtrg, dev_src2trg, dev_trg2src)
# predict with the best averaged model on validation and test
# (if test data is available)
ckpt = "{}/{}.ckpt".format(trainer.model_dir, trainer.best_averaged_ckpt_iteration)
output_name = "{:08d}.hyps".format(trainer.best_averaged_ckpt_iteration)
output_path = os.path.join(trainer.model_dir, output_name)
test(cfg_file, ckpt=ckpt, output_path=output_path, logger=trainer.logger) | 26,964 |
def nms(dets, iou_thr, device_id=None):
"""Dispatch to either CPU or GPU NMS implementations.
The input can be either a torch tensor or numpy array. GPU NMS will be used
if the input is a gpu tensor or device_id is specified, otherwise CPU NMS
will be used. The returned type will always be the same as inputs.
Arguments:
dets (torch.Tensor or np.ndarray): bboxes with scores.
iou_thr (float): IoU threshold for NMS.
device_id (int, optional): when `dets` is a numpy array, if `device_id`
is None, then cpu nms is used, otherwise gpu_nms will be used.
Returns:
tuple: kept bboxes and indice, which is always the same data type as
the input.
"""
# convert dets (tensor or numpy array) to tensor
if isinstance(dets, torch.Tensor):
is_numpy = False
dets_th = dets
elif isinstance(dets, np.ndarray):
is_numpy = True
device = 'cpu' if device_id is None else 'cuda:{}'.format(device_id)
dets_th = torch.from_numpy(dets).to(device)
else:
raise TypeError(
'dets must be either a Tensor or numpy array, but got {}'.format(
type(dets)))
# execute cpu or cuda nms
if dets_th.shape[0] == 0:
inds = dets_th.new_zeros(0, dtype=torch.long)
else:
if dets_th.is_cuda:
if dets_th.shape[1] == 7:
inds = nms_cuda.nms_3d(dets_th, iou_thr)
elif dets_th.shape[1] == 5:
inds = nms_cuda.nms(dets_th, iou_thr)
else:
inds = nms_cpu.nms(dets_th, iou_thr)
if is_numpy:
inds = inds.cpu().numpy()
return dets[inds, :], inds | 26,965 |
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(
ctx.app.jinja_env.get_or_select_template(template_name_or_list),
context,
ctx.app,
) | 26,966 |
def extract_spec(outfile, evtfile, region, clobber=False):
"""
Extract the spectrum within region from the event file.
"""
clobber = "yes" if clobber else "no"
subprocess.check_call(["punlearn", "dmextract"])
subprocess.check_call([
"dmextract", "infile=%s[sky=%s][bin pi]" % (evtfile, region),
"outfile=%s" % outfile, "clobber=%s" % clobber
]) | 26,967 |
def test_FN121Readonly_list(api_client, project):
"""when we access the readonly endpoint for FN121 objects, it should
return a paginated list of net sets that includes all of the FN121
objects in the database (ie. unfiltered).
"""
url = reverse("fn_portal_api:netset_list")
response = api_client.get(url)
assert response.status_code == status.HTTP_200_OK
payload = response.data["results"]
assert len(payload) == 2
expected = set([x.slug for x in project.samples.all()])
observed = set([x["slug"] for x in payload])
assert expected == observed | 26,968 |
def _ReplaceUrlWithPlaceholder(results):
"""Fix a bug by replacing domain names with placeholders
There was a bug in early dogfood versions of the survey extension
in which URLs were included in questions where they
were supposed to have a placeholder. The fix was to replace text like
"Proceed to www.example.com" with "[CHOSEN]", and "Back to safety."
with "[ALTERNATIVE]."
These questions were the first question asked, so this function will only
do the replacement in the first question in each result.
Args:
results: A list of dicts containing parsed and filtered results.
Is it assumed that results has been filtered for a given survey
condition, such that attributes questions should all appear in the
same place.
Returns:
The fixed results. Changes the input results list as well.
"""
for r in results:
q = r['responses'][0]['question'] # Do replacement in first question only
chosenMatch = re.search('\"Proceed to.*?\"', q)
alternateMatch = re.search('\"Back to safety\.\"', q)
if chosenMatch:
q = q.replace(chosenMatch.group(0), '\"[CHOSEN]\"')
if alternateMatch:
q = q.replace(alternateMatch.group(0), '\"[ALTERNATIVE].\"')
r['responses'][0]['question'] = q
return results | 26,969 |
def XCL(code, error, mag=0.0167, propagation='random', NEV=True, **kwargs):
"""
Dummy function to manage the ISCWSA workbook not correctly defining the
weighting functions.
"""
tortuosity = kwargs['tortuosity']
if code == "XCLA":
return XCLA(
code, error, mag=mag, propagation=propagation, NEV=NEV,
tortuosity=tortuosity
)
else:
return XCLH(
code, error, mag=mag, propagation=propagation, NEV=NEV,
tortuosity=tortuosity
) | 26,970 |
def to_bgr(image):
"""Convert image to BGR format
Args:
image: Numpy array of uint8
Returns:
bgr: Numpy array of uint8
"""
# gray scale image
if image.ndim == 2:
bgr = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
return bgr
# BGRA format
if image.shape[2] == 4:
bgr = cv2.cvtColor(image, cv2.COLOR_RGBA2BGR)
return bgr
bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return bgr | 26,971 |
def main() -> None:
"""Run bot."""
# Create the Updater and pass it your bot's token.
updater = Updater(BOT_SETTING.TOKEN)
# bot = Bot(TOKEN)
dispatcher = updater.dispatcher
# on different commands - answer in Telegram
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("help", help))
dispatcher.add_handler(CommandHandler("auth", auth))
dispatcher.add_handler(MessageHandler(Filters.text, text_handler))
# dispatcher.add_handler(CommandHandler("addmusic", addmusic))
# dispatcher.add_handler(CommandHandler("spotify", spotify_handler))
# Start the Bot
updater.start_polling()
updater.idle() | 26,972 |
def load(path: pathlib.Path) -> dict:
"""Load a YAML file, returning its contents.
:raises: RuntimeError
"""
with path.open() as handle:
try:
return yaml.safe_load(handle)
except scanner.ScannerError as error:
LOGGER.critical('Failed to parse YAML from %s: %s',
path, error)
raise RuntimeError('YAML parse failure') | 26,973 |
def find_package(dir):
"""
Given a directory, finds the equivalent package name. If it
is directly in sys.path, returns ''.
"""
dir = os.path.abspath(dir)
orig_dir = dir
path = map(os.path.abspath, sys.path)
packages = []
last_dir = None
while 1:
if dir in path:
return '.'.join(packages)
packages.insert(0, os.path.basename(dir))
dir = os.path.dirname(dir)
if last_dir == dir:
raise ValueError(
"%s is not under any path found in sys.path" % orig_dir)
last_dir = dir | 26,974 |
def set_plate_material_iso(uid, propnum, value, propname='modulus'):
"""sets desired material data to isotropic plate. defaults to modulus"""
data = (ctypes.c_double*8)()
chkErr(St7GetPlateIsotropicMaterial(uid, propnum, data))
data[plateIsoMaterialData[propname]] = value
chkErr(St7SetPlateIsotropicMaterial(uid, propnum, data)) | 26,975 |
def _install():
"""Run `npm install`."""
print 'Running `npm install`...'
subprocess.call(['npm', 'install']) | 26,976 |
async def test_discovery_data_bucket(
hass: HomeAssistantType, mock_bridge: Generator[None, Any, None]
) -> None:
"""Test the event send with the updated device."""
assert await async_setup_component(hass, DOMAIN, MANDATORY_CONFIGURATION)
await hass.async_block_till_done()
device = hass.data[DOMAIN].get(DATA_DEVICE)
assert device.device_id == DUMMY_DEVICE_ID
assert device.ip_addr == DUMMY_IP_ADDRESS
assert device.mac_addr == DUMMY_MAC_ADDRESS
assert device.name == DUMMY_DEVICE_NAME
assert device.state == DUMMY_DEVICE_STATE
assert device.remaining_time == DUMMY_REMAINING_TIME
assert device.auto_off_set == DUMMY_AUTO_OFF_SET
assert device.power_consumption == DUMMY_POWER_CONSUMPTION
assert device.electric_current == DUMMY_ELECTRIC_CURRENT
assert device.phone_id == DUMMY_PHONE_ID | 26,977 |
def main(controlFile, trajName, reportName, folder, top, outputFilename, nProcessors, output_folder, format_str, new_report, trajs_to_select):
"""
Calculate the corrected rmsd values of conformation taking into account
molecule symmetries
:param controlFile: Control file
:type controlFile: str
:param folder: Path the simulation
:type folder: str
:param top: Path to the topology
:type top: str
:param outputFilename: Name of the output file
:type outputFilename: str
:param nProcessors: Number of processors to use
:type nProcessors: int
:param output_folder: Path where to store the new reports
:type output_folder: str
:param format_str: String with the format of the report
:type format_str: str
:param new_report: Whether to write rmsd to a new report file
:type new_report: bool
"""
if trajName is None:
trajName = "*traj*"
else:
trajName += "_*"
if reportName is None:
reportName = "report_%d"
else:
reportName += "_%d"
if output_folder is not None:
outputFilename = os.path.join(output_folder, outputFilename)
outputFilename += "_%d"
if nProcessors is None:
nProcessors = utilities.getCpuCount()
nProcessors = max(1, nProcessors)
print("Calculating RMSDs with %d processors" % nProcessors)
epochs = utilities.get_epoch_folders(folder)
if top is not None:
top_obj = utilities.getTopologyObject(top)
else:
top_obj = None
resname, nativeFilename, symmetries, rmsdColInReport = readControlFile(controlFile)
nativePDB = atomset.PDB()
nativePDB.initialise(nativeFilename, resname=resname)
files = []
if not epochs:
# path does not contain an adaptive simulation, we'll try to retrieve
# trajectories from the specified path
files = analysis_utils.process_folder(None, folder, trajName, reportName, os.path.join(folder, outputFilename), top_obj, trajs_to_select)
for epoch in epochs:
print("Epoch", epoch)
files.extend(analysis_utils.process_folder(epoch, folder, trajName, reportName, os.path.join(folder, epoch, outputFilename), top_obj, trajs_to_select))
pool = mp.Pool(nProcessors)
results = [pool.apply_async(calculate_rmsd_traj, args=(nativePDB, resname, symmetries, rmsdColInReport, info[0], info[1], info[2], info[3], info[4], format_str, new_report)) for info in files]
pool.close()
pool.join()
for res in results:
res.get() | 26,978 |
def validate_settings(raw_settings):
"""Return cleaned settings using schemas collected from INSTALLED_APPS."""
# Perform early validation on Django's INSTALLED_APPS.
installed_apps = raw_settings['INSTALLED_APPS']
schemas_mapping = raw_settings.get('CONFIT_SCHEMAS', {})
# Create schema instance using INSTALLED_APPS.
settings_schema = composite_schema(
installed_apps=installed_apps,
mapping=schemas_mapping)
# Actually validate settings.
cleaned_settings = settings_schema.deserialize(raw_settings)
# Highlight differences between raw and cleaned settings.
# Warn users when raw settings contain directives that are not used in
# schemas.
raw_keys = set(raw_settings.keys())
cleaned_keys = set(cleaned_settings.keys())
unused_keys = raw_keys.difference(cleaned_keys)
if unused_keys:
warnings.warn(
'The following settings are mentioned in your configuration, but '
'are not in cleaned settings. They may be missing in '
'configuration schemas, or you do not need to set them up: \n'
'- {settings}'.format(settings='\n- '.join(unused_keys)),
Warning)
# Return.
return cleaned_settings | 26,979 |
def is_autocast_module_decorated(module: nn.Module):
"""
Return `True` if a nn.Module.forward was decorated with
torch.cuda.amp.autocast
"""
try:
from torch.cuda.amp import autocast
decorators = _get_decorators(module.forward)
for d in decorators:
if isinstance(d, autocast):
return True
except:
pass
return False | 26,980 |
def timestamp_format_is_valid(timestamp: str) -> bool:
"""
Determines if the supplied timestamp is valid for usage with Graylog.
:param timestamp: timestamp that is to be checked
:return: whether the timestamp is valid (True) or invalid (False)
"""
try:
get_datetime_from_timestamp(timestamp)
except ValueError:
return False
return True | 26,981 |
def detect_prediction_results(pred_dir, img_dir, radius, prob_thresh, hasHeader):
"""Detect mitoses from probability maps through an iterative
procedure.
This will read csv prediction files, and output csv prediction files
containing coordinates of the predicted mitoses centers.
Args:
pred_dir: Directory containing the prediction results
img_dir: Directory containing the images
radius: Integer value for the radius of the disk kernel.
prob_thresh: A floating value representing the lower bound on
the probability values.
hasHeader: Boolean value to indicate if the csv file has the header
"""
pred_files = list_files(pred_dir, "*.csv")
pred_files = get_file_id(pred_files, GROUND_TRUTH_FILE_ID_RE)
img_files = list_files(img_dir, "*.tif")
img_files = get_file_id(img_files, GROUND_TRUTH_FILE_ID_RE)
for k, pred_file in pred_files.items():
# convert ijv predictions to prob maps
img_file = img_files[k]
h, w = Image.open(img_file).size
probs = csv_2_arr(pred_file, h, w, hasHeader=hasHeader)
# detect the centers of the mitoses
preds_detected = identify_mitoses(probs, radius, prob_thresh)
# save the prediction results
detected_dir = os.path.dirname(pred_dir + "/") + "_detected"
detected_file_name = pred_file.replace(pred_dir, detected_dir)
tuple_2_csv(preds_detected, detected_file_name, columns={'row', 'col', 'prob'}) | 26,982 |
def indices(n, dtype):
"""Indices of each element in upper/lower triangle of test matrix."""
size = tri.tri_n(n - 1)
return np.arange(size, dtype=dtype) | 26,983 |
def _sample(n, k):
""" Select k number out of n without replacement unless k is greater than n
"""
if k > n:
return np.random.choice(n, k, replace=True)
else:
return np.random.choice(n, k, replace=False) | 26,984 |
def set_price_filter(request, category_slug):
"""Saves the given price filter to session. Redirects to the category with
given slug.
"""
req = request.POST if request.method == 'POST' else request.GET
try:
min_val = lfs.core.utils.atof(req.get("min", "0"))
except (ValueError):
min_val = 0
try:
max_val = lfs.core.utils.atof(req.get("max", "99999"))
except:
max_val = 0
try:
float(min_val)
except (TypeError, ValueError):
min_val = "0"
try:
float(max_val)
except (TypeError, ValueError):
max_val = "0"
request.session["price-filter"] = {"min": min_val, "max": max_val}
url = reverse("lfs_category", kwargs={"slug": category_slug})
return HttpResponseRedirect(url) | 26,985 |
def _get_config():
"""Returns a dictionary with server parameters, or ask them to the user"""
# tries to figure if we can authenticate using a configuration file
data = read_config()
# this does some sort of validation for the "webdav" data...
if "webdav" in data:
if (
"server" not in data["webdav"]
or "username" not in data["webdav"]
or "password" not in data["webdav"]
):
raise KeyError(
'If the configuration file contains a "webdav" '
"section, it should contain 3 variables defined inside: "
'"server", "username", "password".'
)
else:
# ask the user for the information, in case nothing available
logger.warn(
"Requesting server information for webDAV operation. "
"(To create a configuration file, and avoid these, follow "
"the Setup subsection at our Installation manual.)"
)
webdav_data = dict()
webdav_data["server"] = input("The base address of the server: ")
webdav_data["username"] = input("Username: ")
webdav_data["password"] = input("Password: ")
data["webdav"] = webdav_data
return data["webdav"] | 26,986 |
def test_system():
"""Runs few tests to check if npm and peerflix is installed on the system."""
if os.system('npm --version') != 0:
print('NPM not installed installed, please read the Readme file for more information.')
exit()
if os.system('peerflix --version') != 0:
print('Peerflix not installed, installing..')
os.system('npm install -g peerflix') | 26,987 |
def lastfmcompare(text, nick, bot,):
"""[user] ([user] optional) - displays the now playing (or last played) track of LastFM user [user]"""
api_key = bot.config.get("api_keys", {}).get("lastfm")
if not api_key:
return "No last.fm API key set."
if not text:
return "please specify a lastfm username to compare"
try:
user1, user2 = text.split()
except:
user2 = text
user1 = nick
user2_check = get_account(user2)
if user2_check:
user2 = user2_check
user1_check = get_account(user1)
if user1_check:
user1 = user1_check
params = {
'method': 'tasteometer.compare',
'api_key': api_key,
'type1': 'user',
'value1': user1,
'type2': 'user',
'value2': user2
}
request = requests.get(api_url, params=params)
if request.status_code != requests.codes.ok:
return "Failed to fetch info ({})".format(request.status_code)
data = request.json()
if 'error' in data:
return "Error: {}.".format(data["message"])
score = float(data["comparison"]["result"]["score"])
score = float("{:.3f}".format(score * 100))
if score == 0:
return "{} and {} have no common listening history.".format(user2, user1)
level = "Super" if score > 95 else "Very High" if score > 80 else "High" if score > 60 else \
"Medium" if score > 40 else "Low" if score > 10 else "Very Low"
# I'm not even going to try to rewrite this line
artists = [f["name"] for f in data["comparison"]["result"]["artists"]["artist"]] if \
type(data["comparison"]["result"]["artists"]["artist"]) == list else \
[data["comparison"]["result"]["artists"]["artist"]["name"]] if "artist" \
in data["comparison"]["result"]["artists"] else ""
artist_string = "\x02In Common:\x02 " + \
", ".join(artists) if artists else ""
return "Musical compatibility between \x02{}\x02 and \x02{}\x02: {} (\x02{}%\x02) {}".format(user1, user2, level,
score, artist_string) | 26,988 |
def fourier_ellipsoid(inp, size, n=-1, axis=-1, output=None):
"""
Multidimensional ellipsoid Fourier filter.
The array is multiplied with the fourier transform of a ellipsoid of
given sizes.
Parameters
----------
inp : array_like
The inp array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the inp is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the inp is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : Tensor, optional
If given, the result of filtering the inp is placed in this array.
None is returned in this case.
Returns
-------
fourier_ellipsoid : Tensor
The filtered inp.
Notes
-----
This function is implemented for arrays of rank 1, 2, or 3.
"""
inp = np.asarray(inp)
output = _get_output_fourier(output, inp)
axis = normalize_axis_index(axis, inp.ndim)
sizes = cndsupport._normalize_sequence(size, inp.ndim)
sizes = np.asarray(sizes, dtype=np.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
cndi.fourier_filter(inp, sizes, n, axis, output, 2)
return output | 26,989 |
def gaussian1D(x: np.ndarray, amplitude: Number, center: Number, stdev: Number) -> np.ndarray:
"""A one dimensional gaussian distribution.
= amplitude * exp(-0.5 (x - center)**2 / stdev**2)
"""
return amplitude * np.exp(-0.5 * (x - center)**2 / stdev**2) | 26,990 |
def BuildImportLibs(flags, inputs_by_part, deffiles):
"""Runs the linker to generate an import library."""
import_libs = []
Log('building import libs')
for i, (inputs, deffile) in enumerate(zip(inputs_by_part, deffiles)):
libfile = 'part%d.lib' % i
flags_with_implib_and_deffile = flags + ['/IMPLIB:%s' % libfile,
'/DEF:%s' % deffile]
RunLinker(flags_with_implib_and_deffile, i, inputs, 'implib', None)
import_libs.append(libfile)
return import_libs | 26,991 |
def test_rename_file(bucket):
"""Test rename file."""
bucket = bucket()
efs = EFS(storage="s3")
RANDOM_DATA.seek(0)
efs.upload(TEST_FILE, RANDOM_DATA)
key = bucket.Object(TEST_FILE)
assert key
efs.rename(TEST_FILE, "new_test_file.txt")
key = bucket.Object(TEST_FILE)
with pytest.raises(ClientError) as e:
key.get()
assert e.value.response["Error"]["Code"] == "NoSuchKey"
key = bucket.Object("new_test_file.txt")
assert key | 26,992 |
def gather_sparse(a, indices, axis=0, mask=None):
"""
SparseTensor equivalent to tf.gather, assuming indices are sorted.
:param a: SparseTensor of rank k and nnz non-zeros.
:param indices: rank-1 int Tensor, rows or columns to keep.
:param axis: int axis to apply gather to.
:param mask: boolean mask corresponding to indices. Computed if not provided.
:return gathered_a: SparseTensor masked along the given axis.
:return values_mask: bool Tensor indicating surviving values, shape [nnz].
"""
in_size = _square_size(a.dense_shape)
out_size = tf.size(indices)
if mask is None:
mask = ops.indices_to_mask(indices, in_size)
inverse_map = _indices_to_inverse_map(indices, in_size)
return _boolean_mask_sparse(
a, mask, axis=axis, inverse_map=inverse_map, out_size=out_size
) | 26,993 |
def Window(node, size=-1, full_only=False):
"""Lazy wrapper to collect a window of values. If a node is executed 3 times,
returning 1, 2, 3, then the window node will collect those values in a list.
Arguments:
node (node): input node
size (int): size of windows to use
full_only (bool): only return if list is full
"""
def foo(node=node, size=size, full_only=full_only):
if size == 0:
return node.value()
if ret._accum is None:
ret._accum = [node.value()]
elif ret.dependencyIsDirty(node):
ret._accum.append(node.value())
if size > 0:
ret._accum = ret._accum[-size:]
if full_only and len(ret._accum) == size:
return ret._accum
elif full_only:
return None
return ret._accum
# make new node
ret = node._gennode("Window[{}]".format(size if size > 0 else "∞"), foo, [node])
ret._accum = None
return ret | 26,994 |
def fixed_rate_loan(amount, nrate, life, start, freq='A', grace=0,
dispoints=0, orgpoints=0, prepmt=None, balloonpmt=None):
"""Fixed rate loan.
Args:
amount (float): Loan amount.
nrate (float): nominal interest rate per year.
life (float): life of the loan.
start (int, tuple): init period for the loan.
pyr (int): number of compounding periods per year.
grace (int): number of periods of grace (without payment of the principal)
dispoints (float): Discount points of the loan.
orgpoints (float): Origination points of the loan.
prepmt (pandas.Series): generic cashflow representing prepayments.
balloonpmt (pandas.Series): generic cashflow representing balloon payments.
Returns:
A object of the class ``Loan``.
>>> pmt = cashflow(const_value=0, start='2016Q1', periods=11, freq='Q')
>>> pmt['2017Q4'] = 200
>>> fixed_rate_loan(amount=1000, nrate=10, life=10, start='2016Q1', freq='Q',
... grace=0, dispoints=0,
... orgpoints=0, prepmt=pmt, balloonpmt=None) # doctest: +NORMALIZE_WHITESPACE
Amount: 1000.00
Total interest: 129.68
Total payment: 1129.68
Discount points: 0.00
Origination points: 0.00
<BLANKLINE>
Beg_Ppal_Amount Nom_Rate Tot_Payment Int_Payment Ppal_Payment \\
2016Q1 1000.000000 10.0 0.000000 0.000000 0.000000
2016Q2 1000.000000 10.0 114.258763 25.000000 89.258763
2016Q3 910.741237 10.0 114.258763 22.768531 91.490232
2016Q4 819.251005 10.0 114.258763 20.481275 93.777488
2017Q1 725.473517 10.0 114.258763 18.136838 96.121925
2017Q2 629.351591 10.0 114.258763 15.733790 98.524973
2017Q3 530.826618 10.0 114.258763 13.270665 100.988098
2017Q4 429.838520 10.0 314.258763 10.745963 303.512800
2018Q1 126.325720 10.0 114.258763 3.158143 111.100620
2018Q2 15.225100 10.0 15.605727 0.380627 15.225100
2018Q3 0.000000 10.0 0.000000 0.000000 0.000000
<BLANKLINE>
End_Ppal_Amount
2016Q1 1000.000000
2016Q2 910.741237
2016Q3 819.251005
2016Q4 725.473517
2017Q1 629.351591
2017Q2 530.826618
2017Q3 429.838520
2017Q4 126.325720
2018Q1 15.225100
2018Q2 0.000000
2018Q3 0.000000
"""
if not isinstance(float(nrate), float):
TypeError('nrate must be a float.')
nrate = interest_rate(const_value=nrate, start=start, periods=life+grace+1, freq=freq)
if prepmt is None:
prepmt = cashflow(const_value=0, start=start, periods=len(nrate), freq=freq)
else:
verify_period_range([nrate, prepmt])
if balloonpmt is None:
balloonpmt = nrate.copy()
balloonpmt[:] = 0
else:
verify_period_range([nrate, balloonpmt])
# present value of the balloon payments
if balloonpmt is not None:
balloonpv = timevalue(cflo=balloonpmt, prate=nrate, base_date=grace)
else:
balloonpv = 0
pyr = getpyr(nrate)
pmt = pvpmt(pmt=None, pval=-amount+balloonpv, nrate=nrate[0], nper=len(nrate)-1, pyr=pyr)
pmts = nrate.copy()
pmts[:] = 0
for time in range(1, life + 1):
pmts[grace + time] = pmt
# balance
begppalbal = nrate.copy()
intpmt = nrate.copy()
ppalpmt = nrate.copy()
totpmt = nrate.copy()
endppalbal = nrate.copy()
begppalbal[:] = 0
intpmt[:] = 0
ppalpmt[:] = 0
totpmt[:] = 0
endppalbal[:] = 0
# payments per period
for time, _ in enumerate(totpmt):
totpmt[time] = pmts[time] + balloonpmt[time] + prepmt[time]
# balance calculation
for time in range(grace + life + 1):
if time == 0:
begppalbal[0] = amount
endppalbal[0] = amount
totpmt[time] = amount * (dispoints + orgpoints) / 100
### intpmt[time] = amount * dispoints / 100
else:
begppalbal[time] = endppalbal[time - 1]
if time <= grace:
intpmt[time] = begppalbal[time] * nrate[time] / pyr / 100
totpmt[time] = intpmt[time]
endppalbal[time] = begppalbal[time]
else:
intpmt[time] = begppalbal[time] * nrate[time] / pyr / 100
ppalpmt[time] = totpmt[time] - intpmt[time]
if ppalpmt[time] < 0:
capint = - ppalpmt[time]
ppalpmt[time] = 0
else:
capint = 0
endppalbal[time] = begppalbal[time] - ppalpmt[time] + capint
if endppalbal[time] < 0:
totpmt[time] = begppalbal[time] + intpmt[time]
ppalpmt[time] = begppalbal[time]
endppalbal[time] = begppalbal[time] - ppalpmt[time]
pmts[time] = 0
prepmt[time] = 0
data = {'Beg_Ppal_Amount':begppalbal}
result = Loan(life=life, amount=amount, grace=grace, nrate=nrate,
dispoints=dispoints, orgpoints=orgpoints,
data=data)
result['Nom_Rate'] = nrate
result['Tot_Payment'] = totpmt
result['Int_Payment'] = intpmt
result['Ppal_Payment'] = ppalpmt
result['End_Ppal_Amount'] = endppalbal
return result | 26,995 |
def main():
"""
"""
print("finito {}".format(datetime.datetime.now())) | 26,996 |
def get_results():
"""
Returns the scraped results for a set of inputs.
Inputs:
The URL, the type of content to scrap and class/id name.
This comes from the get_results() function in script.js
Output:
Returns a JSON list of the results
"""
# Decode the json data and turn it into a python dict
post_data = json.loads(request.data.decode())
# Extract the inputs from the JSON data
req_url = post_data.get('url')
req_type = post_data.get('type')
req_selector = post_data.get('selector')
results = []
# Each of the types of extraction is handled here
if req_type == 'head':
results = Webpage(req_url).get_head_tag()
elif req_type == 'content':
results = Webpage(req_url).get_all_contents()
elif req_type == 'class':
results = Webpage(req_url).get_content_by_class(req_selector)
elif req_type == 'id':
results = Webpage(req_url).get_content_by_id(req_selector)
elif req_type == 'images':
results = Webpage(req_url).get_all_images()
# The scraped results are turned into JSON format
# and sent to the frontend
serialized = json.dumps(results)
return serialized | 26,997 |
def _mocked_presets(*args, **kwargs):
"""Return a list of mocked presets."""
return [MockPreset("1")] | 26,998 |
def play(context, songpos=None):
"""
*musicpd.org, playback section:*
``play [SONGPOS]``
Begins playing the playlist at song number ``SONGPOS``.
The original MPD server resumes from the paused state on ``play``
without arguments.
*Clarifications:*
- ``play "-1"`` when playing is ignored.
- ``play "-1"`` when paused resumes playback.
- ``play "-1"`` when stopped with a current track starts playback at the
current track.
- ``play "-1"`` when stopped without a current track, e.g. after playlist
replacement, starts playback at the first track.
*BitMPC:*
- issues ``play 6`` without quotes around the argument.
"""
if songpos is None:
return context.core.playback.play().get()
elif songpos == -1:
return _play_minus_one(context)
try:
tl_track = context.core.tracklist.slice(songpos, songpos + 1).get()[0]
return context.core.playback.play(tl_track).get()
except IndexError:
raise exceptions.MpdArgError('Bad song index') | 26,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.