content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def _apply_sobel(img_matrix):
"""
Input: img_matrix(height, width) with type float32
Convolves the image with sobel mask and returns the magnitude
"""
dx = sobel(img_matrix, 1)
dy = sobel(img_matrix, 0)
grad_mag = np.hypot(dx, dy) # Calculates sqrt(dx^2 + dy^2)
grad_mag *= 255 / grad_mag.max() # Normalize the gradient magnitudes
return grad_mag
| 20,700
|
def isDeleted(doc_ref):
"""
Checks if document is logically deleted, i.e. has a deleted timestamp.
Returns: boolean
"""
return exists(doc_ref) and 'ts_deleted' in get_doc(doc_ref)
| 20,701
|
def log_sql_result(count, time):
"""Print the given string to the console with "[SQL] " prefixed
Parameters:
statement (String): The statement to log
"""
print(str(datetime.now().strftime(fmt)) + CYAN + ' [SQL] ' + RESET + str(count) + ' Rows(s) affected in ' + str(round(time,3)) + ' s')
| 20,702
|
def test_list_byte_length_1_nistxml_sv_iv_list_byte_length_2_4(mode, save_output, output_format):
"""
Type list/byte is restricted by facet length with value 6.
"""
assert_bindings(
schema="nistData/list/byte/Schema+Instance/NISTSchema-SV-IV-list-byte-length-2.xsd",
instance="nistData/list/byte/Schema+Instance/NISTXML-SV-IV-list-byte-length-2-4.xml",
class_name="NistschemaSvIvListByteLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 20,703
|
def GenerateSysroot(sysroot_path, board, build_tests, unpack_only=False):
"""Create a sysroot using only binary packages from local binhost.
Args:
sysroot_path: Where we want to place the sysroot.
board: Board we want to build for.
build_tests: If we should include autotest packages.
unpack_only: If we only want to unpack the binary packages, and not build
them.
"""
osutils.SafeMakedirs(sysroot_path)
if not unpack_only:
# Generate the sysroot configuration.
sysroot = sysroot_lib.Sysroot(sysroot_path)
sysroot.WriteConfig(sysroot.GenerateBoardConfiguration(board))
cros_build_lib.RunCommand(
[os.path.join(constants.CROSUTILS_DIR, 'install_toolchain'),
'--noconfigure', '--sysroot', sysroot_path])
cmd = list(_BUILD_PKGS_CMD)
cmd.extend(['--board_root', sysroot_path, '--board', board])
if unpack_only:
cmd.append('--unpackonly')
if not build_tests:
cmd.append('--nowithautotest')
env = {'USE': os.environ.get('USE', ''),
'PORTAGE_BINHOST': 'file://%s' % portage_util.GetBinaryPackageDir(
sysroot=cros_build_lib.GetSysroot(board))}
cros_build_lib.RunCommand(cmd, extra_env=env)
| 20,704
|
def postprocess_summary(summary, name, result_dict, result_keys):
"""
Save the result_keys performances in the result_dict.
"""
for key in result_keys:
if key in summary.keys():
result_dict[key][name] = summary[key]
| 20,705
|
def sys_wait_for_event(
mask: int, k: Optional[Key], m: Optional[Mouse], flush: bool
) -> int:
"""Wait for an event then return.
If flush is True then the buffer will be cleared before waiting. Otherwise
each available event will be returned in the order they're recieved.
Args:
mask (int): :any:`Event types` to wait for.
k (Optional[Key]): A tcod.Key instance which might be updated with
an event. Can be None.
m (Optional[Mouse]): A tcod.Mouse instance which might be updated
with an event. Can be None.
flush (bool): Clear the event buffer before waiting.
.. deprecated:: 9.3
Use the :any:`tcod.event.wait` function to wait for events.
"""
return int(
lib.TCOD_sys_wait_for_event(
mask,
k.key_p if k else ffi.NULL,
m.mouse_p if m else ffi.NULL,
flush,
)
)
| 20,706
|
def func_module_subcmd(args):
"""Entry point for "buildtest module" subcommand.
:param args: command line arguments passed to buildtest
:type args: dict, required
"""
if args.diff_trees:
diff_trees(args.diff_trees)
if args.easybuild:
check_easybuild_module()
if args.spack:
check_spack_module()
if args.module_deps:
find_module_deps(args.module_deps)
if args.list_all_parents:
list_all_parent_modules()
if args.software:
list_software()
| 20,707
|
def get_test():
"""
Return test data.
"""
context = {}
context['test'] = 'this is a test message'
return flask.jsonify(**context)
| 20,708
|
def num_poisson_events(rate, period, rng=None):
"""
Returns the number of events that have occurred in a Poisson
process of ``rate`` over ``period``.
"""
if rng is None:
rng = GLOBAL_RNG
events = 0
while period > 0:
time_to_next = rng.expovariate(1.0/rate)
if time_to_next <= period:
events = events + 1
period = period - time_to_next
return events
| 20,709
|
def findmatch(members,classprefix):
"""Find match for class member."""
lst = [n for (n,c) in members]
return fnmatch.filter(lst,classprefix)
| 20,710
|
def create_fields_provided_instances(apps, schema_editor):
""" creates new instances of the FieldsProvided model for each submission.
This helps track which form fields were filled in (or edited/removed) when users
submit activity reports or staff edits.
Each submission should have one related instance of FieldsProvided.
"""
ActivitySubmission = apps.get_model('wells', 'ActivitySubmission')
FieldsProvided = apps.get_model('wells', 'FieldsProvided')
foreign_key_models = {
'casing_set': apps.get_model('wells', 'Casing'),
'screen_set': apps.get_model('wells', 'Screen'),
'linerperforation_set': apps.get_model('wells', 'ActivitySubmissionLinerPerforation'),
'decommission_description_set': apps.get_model('wells', 'DecommissionDescription'),
'lithologydescription_set': apps.get_model('wells', 'LithologyDescription')
}
instances_created = 0
# find reports that do not have a "fields_provided" object. This means we were
# not recording the fields that were explicitly provided by the user
# at the time the report was submitted.
for report in ActivitySubmission.objects.filter(fields_provided__isnull=True, well_activity_type='STAFF_EDIT'):
data_submitted = {}
# we gather values from each report using the same logic in place in stack.py on 2019/05/21,
# and create a "fields provided" mapping. By moving toward using the mapping to determine
# which fields were explicitly filled in or altered by the user, we can be determine what
# fields to update rather than guessing based on the value. After this migration is run,
# it should no longer be necessary to inspect the value to decide whether to update a piece of
# well data or not.
for field in FieldsProvided._meta.get_fields():
if field.name == 'activity_submission':
continue
source_key = field.name
value = _getattr(report, source_key, foreign_key_models)
if value or value is False or value == 0 or value == '':
data_submitted[source_key] = True
data_submitted.pop('filing_number', None)
FieldsProvided.objects.create(activity_submission=report, **data_submitted)
instances_created += 1
logger.info("created {} fields_provided mappings for submission reports".format(instances_created))
| 20,711
|
def find_cheapest_price(price_data, timeframe, power):
"""Return start time and end time where the electricity price is the cheapest.
:param price_data: Price data, key is start hour in unix time, value is price
:type price_data: Dict[int, float]
:param timeframe: time span for which we want to consume power
:type price_data: int
:param power: the total power consumption (kWh) which occurres in the timeframe
:type power: float
:rtype: tuple(int, float)
"""
start = arrow.now()
end = start.shift(minutes=+timeframe)
start_date = arrow.get(start.date())
max_end_ts = max(price_data)
next_full_hour = start_date.shift(hours=+start.to("UTC").hour + 1)
# left aligned
prices = {start.timestamp: calculate_price(start, end, price_data, power)}
# aligned on the next full hour
prices[next_full_hour.timestamp] = calculate_price(
next_full_hour, next_full_hour.shift(minutes=+timeframe), price_data, power
)
# Shift the window by one hour got jump into the loop that increments at the end
next_full_hour = next_full_hour.shift(hours=+1)
# We have price data until the next_full_hour + 1h
while next_full_hour.shift(minutes=+timeframe).timestamp <= max_end_ts:
# 1) Look for end bound price (use the full last hour)
# Get the last hour.
end_hour = next_full_hour.shift(minutes=+timeframe).replace(minute=0)
prices[end_hour.shift(minutes=-timeframe).timestamp] = calculate_price(
end_hour.shift(minutes=-timeframe),
end_hour,
price_data,
power,
)
# start bound
prices[next_full_hour.timestamp] = calculate_price(
next_full_hour, next_full_hour.shift(minutes=+timeframe), price_data, power
)
# Shift the window by one hour
next_full_hour = next_full_hour.shift(hours=+1)
# Add the price that ends at the end of the last hour we have prices for
# 1) Look for end bound price (use the full last hour)
# Get the last hour.
end_hour = next_full_hour.shift(minutes=+timeframe).replace(minute=0)
prices[end_hour.shift(minutes=-timeframe).timestamp] = calculate_price(
end_hour.shift(minutes=-timeframe),
end_hour,
price_data,
power,
)
for ts, p in prices.items():
print(arrow.get(ts).to("local"), p)
cheapest_price_ts = min(prices, key=prices.get)
print(
"\n"
f"The cheapest price is starting at {arrow.get(cheapest_price_ts).to('local')} "
f"ending at {arrow.get(cheapest_price_ts).shift(minutes=+timeframe).to('local')} "
f"costing {prices[cheapest_price_ts]}."
)
| 20,712
|
def main(args, unit_test=False):
""" Runs fluxing steps
"""
import os
import numpy as np
from pypeit import fluxspec
from pypeit.core import flux
from pypeit.par import pypeitpar
# Load the file
spectrograph, config_lines, flux_dict = read_fluxfile(args.flux_file)
# Parameters
spectrograph_def_par = spectrograph.default_pypeit_par()
par = pypeitpar.PypeItPar.from_cfg_lines(cfg_lines=spectrograph_def_par.to_config(),
merge_with=config_lines)
# TODO: Remove this. Put this in the unit test itself.
if unit_test:
path = os.path.join(os.getenv('PYPEIT_DEV'), 'Cooked', 'Science')
par['fluxcalib']['std_file'] = os.path.join(path, par['fluxcalib']['std_file'])
for kk, spec1d_file, flux_file in zip(np.arange(len(flux_dict['spec1d_files'])), flux_dict['spec1d_files'], flux_dict['flux_files']):
flux_dict['spec1d_files'][kk] = os.path.join(path, spec1d_file)
flux_dict['flux_files'][kk] = os.path.join(path, flux_file)
# Write the par to disk
print("Writing the parameters to {}".format(args.par_outfile))
par.to_config(args.par_outfile)
# Instantiate
FxSpec = fluxspec.instantiate_me(spectrograph, par['fluxcalib'], debug=args.debug)
# Generate sensfunc??
if par['fluxcalib']['std_file'] is not None:
# Load standard
_,_ = FxSpec.load_objs(par['fluxcalib']['std_file'], std=True)
## For echelle, the code will deal with the standard star in the ech_fluxspec.py
#if not spectrograph.pypeline == 'Echelle':
# Find the star
_ = FxSpec.find_standard()
# Sensitivity
_ = FxSpec.generate_sensfunc()
# Output
_ = FxSpec.save_sens_dict(FxSpec.sens_dict, par['fluxcalib']['sensfunc'])
# Show
if args.plot:
FxSpec.show_sensfunc()
# Flux?
if len(flux_dict) > 0:
for spec1d_file, flux_file in zip(flux_dict['spec1d_files'], flux_dict['flux_files']):
FxSpec.flux_science(spec1d_file)
FxSpec.write_science(flux_file)
| 20,713
|
def drop_database(dbname,engine):
"""
Warning, drops the specified database!
Args:
dbname (str): Name of database to drop.
engine (obj): Database engine.
"""
msg = """
--------------------------------------------------------- \n
Warning, you are about to delete the following database! \n
{}.{}
Are you sure you wish to continue? \n
Type 'yes' to proceed. \n
--------------------------------------------------------- \n
\n""".format(engine.name,dbname)
if input(msg).lower() != "yes":
sys.exit()
con = engine.connect()
con.execute("COMMIT") # need to close current transaction
con.execute("DROP DATABASE IF EXISTS {}".format(dbname))
con.execute("COMMIT") # need to close current transaction
con.close()
msg = "Target database dropped: {}".format(dbname)
logging.info(msg)
| 20,714
|
def is_narcissistic(number):
"""Must return True if number is narcissistic"""
return sum([pow(int(x), len(str(number))) for x in str(number)]) == number
| 20,715
|
def Plot1DFields(r,h,phi_n_bar,g_s,g_b):
"""
Generates a nice plot of the 1D fields with 2 axes and a legend.
Note: The sizing works well in a jupyter notebook
but probably should be adjusted for a paper.
"""
fig,ax1 = plt.subplots(figsize=(6.7,4))
fig.subplots_adjust(right=0.8)
ax2 = ax1.twinx()
p1, = ax1.plot(r,h,'C0-',label=r'$h$')
p2, = ax2.plot(r,phi_n_bar,'C1-',label=r'$\bar{\phi}_n$')
p3, = ax2.plot(r,g_s,'C2-',label=r'$g_s$')
p4, = ax2.plot(r,g_b,'C3-',label=r'$g_b$')
ax1.set_xlabel(r'$r$',labelpad=0)
ax1.set_ylabel(r'$h$',rotation=0,labelpad=10)
ax1.set_xlim(r[0],r[-1])
ax2.set_ylabel('$\\bar{\\phi}_n$\n$g_s$\n$g_b$',rotation=0,labelpad=12,va='center')
ax2.set_ylim(-0.05,1.05)
lines = [p1,p2,p3,p4]
ax1.legend(lines,[l.get_label() for l in lines],loc='center left',bbox_to_anchor=(1.16,0.54))
return fig,[ax1,ax2]
| 20,716
|
def get_yesterday():
"""
:return:
"""
return _get_passed_one_day_from_now(days=1).date()
| 20,717
|
def classroom_mc():
"""
Corresponds to the 2nd line of Table 4 in https://doi.org/10.1101/2021.10.14.21264988
"""
concentration_mc = mc.ConcentrationModel(
room=models.Room(volume=160, inside_temp=models.PiecewiseConstant((0., 24.), (293,)), humidity=0.3),
ventilation=models.MultipleVentilation(
ventilations=(
models.SlidingWindow(
active=models.PeriodicInterval(period=120, duration=120),
outside_temp=TorontoTemperatures['Dec'],
window_height=1.6,
opening_length=0.2,
),
models.AirChange(active=models.PeriodicInterval(period=120, duration=120), air_exch=0.25),
)
),
infected=mc.InfectedPopulation(
number=1,
presence=models.SpecificInterval(((0, 2), (2.5, 4), (5, 7), (7.5, 9))),
virus=virus_distributions['SARS_CoV_2_ALPHA'],
mask=models.Mask.types["No mask"],
activity=activity_distributions['Light activity'],
expiration=build_expiration('Speaking'),
host_immunity=0.,
),
evaporation_factor=0.3,
)
return mc.ExposureModel(
concentration_model=concentration_mc,
short_range=(),
exposed=mc.Population(
number=19,
presence=models.SpecificInterval(((0, 2), (2.5, 4), (5, 7), (7.5, 9))),
activity=activity_distributions['Seated'],
mask=models.Mask.types["No mask"],
host_immunity=0.,
),
)
| 20,718
|
def teardown_module():
"""Remove test data and scripts from .retriever directories."""
for test in tests:
shutil.rmtree(os.path.join(HOME_DIR, "raw_data", test['name']))
os.remove(os.path.join(HOME_DIR, "scripts", test['name'] + '.json'))
subprocess.call(['rm', '-r', test['name']])
| 20,719
|
def reset_params():
"""Reset all global (or module) parameters.
"""
Z3_global_param_reset_all()
| 20,720
|
def smart_eval(stmt, _globals, _locals, filename=None, *, ast_transformer=None):
"""
Automatically exec/eval stmt.
Returns the result if eval, or NoResult if it was an exec. Or raises if
the stmt is a syntax error or raises an exception. If stmt is multiple
statements ending in an expression, the statements are exec-ed and the
final expression is eval-ed and returned as the result.
filename should be the filename used for compiling the statement. If
given, stmt will be saved to the Python linecache, so that it appears in
tracebacks. Otherwise, a default filename is used and it isn't saved to the
linecache. To work properly, "fake" filenames should start with < and end
with >, and be unique for each stmt.
Note that classes defined with this will have their module set to
'__main__'. To change this, set _globals['__name__'] to the desired
module.
To transform the ast before compiling it, pass in an ast_transformer
function. It should take in an ast and return a new ast.
Examples:
>>> g = l = {}
>>> smart_eval('1 + 1', g, l)
2
>>> smart_eval('a = 1 + 1', g, l)
<class 'mypython.mypython.NoResult'>
>>> g['a']
2
>>> smart_eval('a = 1 + 1; a', g, l)
2
"""
if filename:
if filename != "<stdin>":
# (size, mtime, lines, fullname)
linecache.cache[filename] = (len(stmt), None, stmt.splitlines(keepends=True), filename)
else:
filename = mypython_file()
p = ast.parse(stmt)
if ast_transformer:
p = ast_transformer(p)
expr = None
res = NoResult
if p.body and isinstance(p.body[-1], ast.Expr):
expr = p.body.pop()
code = compile(p, filename, 'exec')
exec(code, _globals, _locals)
if expr:
code = compile(ast.Expression(expr.value), filename, 'eval')
res = eval(code, _globals, _locals)
return res
| 20,721
|
def parse_toml(path_string: Optional[str]) -> Dict[str, Any]:
"""Parse toml"""
if not path_string:
path = pathlib.Path(os.getcwd())
else:
path = pathlib.Path(path_string)
toml_path = path / "pyproject.toml"
if not toml_path.exists():
return {}
with open(toml_path, encoding="utf8") as handle:
pyproject_toml = tomli.loads(handle.read())
config = pyproject_toml.get("tool", {}).get("pydoc_fork", {})
loose_matching = {
k.replace("--", "").replace("-", "_"): v for k, v in config.items()
}
return loose_matching
| 20,722
|
def run_authorization_flow():
"""Run authorization flow where the user must
authorize Pardal to access their Twitter account."""
start_server()
logger.info('Starting not logged user flow...')
say('Please wait while an access token is retrieved from Twitter.')
# FIXME if Linux copy the address to user clipboard
webbrowser.open(f'{API_ADDRESS}/authorize')
| 20,723
|
def mtxv(m1, vin):
"""
Multiplies the transpose of a 3x3 matrix
on the left with a vector on the right.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mtxv_c.html
:param m1: 3x3 double precision matrix.
:type m1: 3x3-Element Array of floats
:param vin: 3-dimensional double precision vector.
:type vin: 3-Element Array of floats
:return: 3-dimensional double precision vector.
:rtype: 3-Element Array of floats
"""
m1 = stypes.toDoubleMatrix(m1)
vin = stypes.toDoubleVector(vin)
vout = stypes.emptyDoubleVector(3)
libspice.mtxv_c(m1, vin, vout)
return stypes.cVectorToPython(vout)
| 20,724
|
def cmip_recipe_basics(func):
"""A decorator for starting a cmip recipe
"""
def parse_and_run(*args, **kwargs):
set_verbose(_logger, kwargs.get('verbose'))
opts = parse_recipe_options(kwargs.get('options'), add_cmip_collection_args_to_parser)
# Recipe is run.
returnval = func(*args, **kwargs)
return returnval
return parse_and_run
| 20,725
|
def optimize_concrete_function(
concrete_function: function.ConcreteFunction,
strip_control_dependencies: bool) -> wrap_function.WrappedFunction:
"""Returns optimized function with same signature as `concrete_function`."""
wrapped_fn = wrap_function.WrappedFunction(
concrete_function.graph,
variable_holder=wrap_function.VariableHolder(share_variables=True))
fetches = concrete_function.structured_outputs
if strip_control_dependencies:
flat_outputs, _ = tf2_utils.strip_and_get_tensors_and_control_dependencies(
tf.nest.flatten(fetches, expand_composites=True))
fetches = tf.nest.pack_sequence_as(
concrete_function.structured_outputs,
flat_outputs,
expand_composites=True)
result = wrapped_fn.prune(
feeds=concrete_function.inputs,
fetches=fetches,
input_signature=concrete_function.structured_input_signature)
# TODO(b/163329414): Remove once `prune` retains shape information for all
# components.
for original_out, pruned_out in zip(concrete_function.outputs,
result.outputs):
pruned_out.set_shape(original_out.get_shape())
return result
| 20,726
|
def serialize_cupcake(cupcake):
"""Serialize a cupcake SQLAlchemy obj to dictionary."""
return {
"id": cupcake.id,
"flavor": cupcake.flavor,
"size": cupcake.size,
"rating": cupcake.rating,
"image": cupcake.image,
}
| 20,727
|
def compute_accuracy(labels, logits):
"""Compute accuracy for a single batch of data, given the precomputed logits
and expected labels. The returned accuracy is normalized by the batch size.
"""
current_batch_size = tf.cast(labels.shape[0], tf.float32)
# logits is the percent chance; this gives the category for each.
predictions = tf.argmax(logits, axis=1)
# return the average number of items equal to their label.
return tf.reduce_sum(tf.cast(tf.equal(labels, predictions),
tf.float32)) / current_batch_size
| 20,728
|
def enable_daily_notification(update: Update, context: CallbackContext) -> None:
"""Enable daily notifications for clean time at user specified time."""
update, context, user = bot_helper.get_user(update, context)
user_job = bot_helper.get_daily_notification(context, user["UserID"])
if user_job:
notification_time = utils.convert_utc_time_to_local_time(user_job[0].job.next_run_time,
database.get_time_offset(user["UserID"]))
msg = Strings.ENABLE_NOTIFICATION_NOTIFICATION_ALREADY_SET.format(user["FirstName"],
notification_time.time())
else:
msg = enable_daily_notification_set(update, context, user)
send_message(BotUCM(update, context, msg), reply_markup=bot_helper.main_menu_keyboard())
| 20,729
|
def jvc(ctx):
"""Current–voltage characteristic + graphs"""
extension = "ocw"
jvc_template_out = ["+V [V]", "+J [mA/cm2]", "-V [V]", "-J [mA/cm2]", "+P [W/cm2]", "-P [W/cm2]"]
jvc_result_template = ["Scan", "Power max", "Voc [V]","Jsc [mA/cm2]", "FF", "PCE (%)"]
jvc_summary = []
jvc_mask_area = click.prompt(
"Enter mask-area value which will be applied to all provided files.",
value_proc=check_maskarea)
file_paths = ctx.obj['user_file_paths'] or get_files_at(ctx.obj['abs_path_in'], extension)
make_dir_at(ctx.obj['abs_path_out'])
for file in progressBar(file_paths, prefix = 'Progress:', suffix = 'Complete', length = 50):
filename = extract_filename(file, extension)
if ctx.obj['user_file_paths']:
file_path_in = file
else:
file_path_in = f"{ctx.obj['abs_path_in']}/{file}"
file_path_out = f"{ctx.obj['abs_path_out']}/{filename}"
# READ SOURCE
scan, file_lenght = read_jvc_file(file_path_in)
if file_lenght == 0:
click.echo(f"File {file} has no records")
continue
voltage = []
_voltage = []
density = []
_density = []
power = []
_power = []
middle_index = int(file_lenght / 2) # middle index of source file
# LOGIC for reversed order of data in source file
if not ctx.obj['reversed']:
f_index = 0
r_index = middle_index
else:
f_index = middle_index
r_index = 0
for i in range(middle_index):
voltage.append(calc_voltage(scan[i + f_index][0]))
_voltage.append(calc_voltage(scan[i + r_index][0]))
density.append(calc_density(scan[i + f_index][1], jvc_mask_area))
_density.append(calc_density(scan[i + r_index][1], jvc_mask_area))
power.append(calc_power(voltage[i], density[i]))
_power.append(calc_power(_voltage[i], _density[i]))
# WRITE OUTPUT
output = transpose([voltage, density, _voltage, _density, power, _power])
write_csv_file(output, file_path_out, jvc_template_out)
# WRITE RESULTS
results = [
result_row("Forward", power, voltage, density),
result_row("Reverse", _power, _voltage, _density)
]
write_csv_file(results, file_path_out + "-result", jvc_result_template)
for row in results:
jvc_summary.append([filename] + row)
if len(file_paths) > 1:
write_csv_file(jvc_summary, ctx.obj['abs_path_out'] + "/jvc_summary", jvc_result_template)
draw_graph(filename, file_path_out, density, voltage, _density, _voltage)
time.sleep(0.01)
finish(ctx.obj['abs_path_out'])
| 20,730
|
def generate_sosreport_in_node(
nodeip: str, uname: str, pword: str, directory: str, results: list
) -> None:
"""Generate sosreport in the given node and copy report to directory provided
Args:
nodeip host Ip address
uname Username for accessing host
pword password for accessing host through given user
directory directory to store all the logs
results host IP address for which this operation are failed
Returns:
None
"""
print(f"Connecting {nodeip} to generate sosreport")
try:
ssh_d = paramiko.SSHClient()
ssh_d.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_d.connect(nodeip, username=uname, password=pword)
ssh_d.exec_command("sudo yum -y install sos")
stdin, stdout, stderr = ssh_d.exec_command(
"sudo sosreport -a --all-logs -e ceph --batch"
)
rc = stdout.channel.recv_exit_status()
sosreport = re.search(r"sosreport-.*.tar.xz", stdout.read().decode())
if rc and not sosreport:
print(f"Failed to generate sosreport {nodeip}")
results.append(nodeip)
return
source_file = f"/var/tmp/{sosreport.group()}"
ssh_d.exec_command(f"sudo chown {uname} {source_file}")
directory_path = os.path.join(directory, "sosreports")
dir_exist = os.path.exists(directory_path)
if not dir_exist:
os.makedirs(directory_path)
ftp_client = ssh_d.open_sftp()
ftp_client.get(f"{source_file}", f"{directory_path}/{sosreport.group()}")
ftp_client.close()
print(
f"Successfully generated sosreport for node {nodeip} :{sosreport.group()}"
)
ssh_d.exec_command(f"sudo rm -rf {source_file}")
ssh_d.close()
except Exception:
results.append(nodeip)
| 20,731
|
def get_data(generic_iterator):
"""Code to get minibatch from data iterator
Inputs:
- generic_iterator; iterator for dataset
Outputs:
- data; minibatch of data from iterator
"""
data = next(generic_iterator)
if torch.cuda.is_available():
data = data.cuda()
return data
| 20,732
|
def aug_transform(crop, base_transform, cfg, extra_t=[]):
""" augmentation transform generated from config """
return T.Compose(
[
T.RandomApply(
[T.ColorJitter(cfg.cj0, cfg.cj1, cfg.cj2, cfg.cj3)], p=cfg.cj_p
),
T.RandomGrayscale(p=cfg.gs_p),
T.RandomResizedCrop(
crop,
scale=(cfg.crop_s0, cfg.crop_s1),
ratio=(cfg.crop_r0, cfg.crop_r1),
interpolation=3,
),
T.RandomHorizontalFlip(p=cfg.hf_p),
*extra_t,
base_transform(),
]
)
| 20,733
|
def _check_definition_contains_or(definition_dict, key, values):
"""need docstring"""
out = False
for value in values:
if (np.array(list(definition_dict[key])) == value).any():
out = True
break
return out
| 20,734
|
def write_conf(config):
"""
"""
try:
with open('./data/config.json', 'w') as outfile:
print(timestamp(), "\tConfig written: ", config)
json.dump(config, outfile)
except IOError:
print(timestamp(), "\tIOError opening config.json for writing")
return
| 20,735
|
def concurrent_map(func, data):
"""
Similar to the bultin function map(). But spawn a thread for each argument
and apply `func` concurrently.
Note: unlike map(), we cannot take an iterable argument. `data` should be an
indexable sequence.
WARNING : this function doesn't limit the number of threads at the same time
"""
N = len(data)
result = [None] * N
# wrapper to dispose the result in the right slot
def task_wrapper(i):
result[i] = func(data[i])
threads = [Thread(target=task_wrapper, args=(i,)) for i in range(N)]
for t in threads:
t.start()
for t in threads:
t.join()
return result
| 20,736
|
def star_hexagon(xy, radius=5, **kwargs):
"""
|\
c | \ b
|__\
a
"""
x,y = xy
r = radius
a = 1/4*r
b = a*2
c = a*3**(1/2)
return plt.Polygon(xy=(
(x, y-2*c), (x+a, y-c), (x+a+b, y-c),
(x+b, y), (x+a+b, y+c), (x+a, y+c),
(x, y+2*c), (x-a, y+c), (x-a-b, y+c),
(x-b, y), (x-a-b, y-c), (x-a, y-c),
), closed=True, **kwargs)
| 20,737
|
def initialize(*args, **kwargs):
"""Instance creation"""
global TSI
TSI = TestServerInterface(*args, **kwargs)
TSI.startFifo()
| 20,738
|
def calibrate(leveled_arcs, sat_biases, stn_biases):
"""
???
"""
calibrated_arcs = []
for arc in leveled_arcs:
if arc.sat[0] == 'G':
sat_bias = sat_biases['GPS'][int(arc.sat[1:])][0] * NS_TO_TECU
stn_bias = stn_biases['GPS'][arc.stn.upper()][0] * NS_TO_TECU
elif arc.sat[0] == 'R':
sat_bias = sat_biases['GLONASS'][int(arc.sat[1:])][0] * NS_TO_TECU
stn_bias = stn_biases['GLONASS'][arc.stn.upper()][0] * NS_TO_TECU
else:
raise ValueError('Satellite bias for {} not found'.format(arc.sat))
data_map = {'gps_time': arc.gps_time.values,
'az': arc.az.values,
'el': arc.el.values,
'satx': arc.satx.values,
'saty': arc.saty.values,
'satz': arc.satz.values,
'sobs': arc.L_I + sat_bias + stn_bias,
'sprn': arc.P_I + sat_bias + stn_bias}
calibrated_arc = CalibratedArc(data_map)
calibrated_arc.xyz = arc.xyz
calibrated_arc.llh = arc.llh
calibrated_arc.stn = arc.stn
calibrated_arc.recv_type = arc.recv_type
calibrated_arc.sat = arc.sat
calibrated_arc.L = arc.L
calibrated_arc.L_scatter = arc.L_scatter
calibrated_arc.sat_bias = sat_bias
calibrated_arc.stn_bias = stn_bias
calibrated_arcs.append(calibrated_arc)
return calibrated_arcs
| 20,739
|
def test_google_bigquery_destination(sdc_builder, sdc_executor, gcp):
"""
Send data to Google BigQuery from Dev Raw Data Source and
confirm that Google BigQuery destination successfully recieves them using Google BigQuery client.
This is achieved by using a deduplicator which assures that there is only one ingest to Google BigQuery.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> google_bigquery
record_deduplicator >> trash
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(CSV_DATA_TO_INSERT))
dataset_name = get_random_string(ascii_letters, 5)
table_name = get_random_string(ascii_letters, 5)
google_bigquery = pipeline_builder.add_stage('Google BigQuery', type='destination')
google_bigquery.set_attributes(dataset=dataset_name,
table_name=table_name)
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> google_bigquery
record_deduplicator >> trash
pipeline = pipeline_builder.build(title='Google BigQuery Destination').configure_for_environment(gcp)
sdc_executor.add_pipeline(pipeline)
bigquery_client = gcp.bigquery_client
schema = [SchemaField('full_name', 'STRING', mode='required'),
SchemaField('age', 'INTEGER', mode='required')]
dataset_ref = Dataset(bigquery_client.dataset(dataset_name))
try:
logger.info('Creating dataset %s using Google BigQuery client ...', dataset_name)
dataset = bigquery_client.create_dataset(dataset_ref)
table = bigquery_client.create_table(Table(dataset_ref.table(table_name), schema=schema))
logger.info('Starting BigQuery Destination pipeline and waiting for it to produce records ...')
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_batch_count(1)
logger.info('Stopping BigQuery Destination pipeline and getting the count of records produced in total ...')
sdc_executor.stop_pipeline(pipeline)
# Verify by reading records using Google BigQuery client
data_from_bigquery = [tuple(row.values()) for row in bigquery_client.list_rows(table)]
data_from_bigquery.sort()
logger.debug('read_data = {}'.format(data_from_bigquery))
assert ROWS_EXPECTED == data_from_bigquery
finally:
bigquery_client.delete_dataset(dataset_ref, delete_contents=True)
| 20,740
|
def negSamplingCostAndGradient(predicted, target, outputVectors, dataset, K=10):
"""
Implements the negative sampling cost function and gradients for word2vec
:param predicted: ndarray, the predicted (center) word vector(v_c)
:param target: integer, the index of the target word
:param outputVectors: 2D ndarray, output word vectors (as rows)
:param dataset: an interface into the dataset
:param K: integer, no of negative samples
:return:
cost: cost function for negative sampling
gradPred: gradient with respect to predicted (input / center) word vector
grad: gradient with respect to output word vectors
"""
grad = np.zeros(outputVectors.shape)
gradPred = np.zeros(predicted.shape)
indices = [target]
for k in xrange(K):
newidx = dataset.sampleTokenIdx()
while newidx == target:
newidx = dataset.sampleTokenIdx()
indices += [newidx]
labels = np.array([1] + [-1 for k in xrange(K)]).reshape(-1, 1)
vecs = outputVectors[indices, :]
t = sigmoid(vecs.dot(predicted.T) * labels)
cost = -np.sum(np.log(t))
delta = labels * (t - 1)
gradPred = delta.reshape((1, K + 1)).dot(vecs).flatten()
gradtemp = delta.dot(predicted)
for k in xrange(K + 1):
grad[indices[k]] += gradtemp[k, :]
return cost, gradPred, grad
| 20,741
|
def isotime(timestamp):
"""ISO 8601 formatted date in UTC from unix timestamp"""
return datetime.fromtimestamp(timestamp, pytz.utc).isoformat()
| 20,742
|
def initializeSeam():
"""
This function defines the seams of a baseball. It is
based, in large extant, on the work from
http://www.darenscotwilson.com/spec/bbseam/bbseam.html
"""
n = 109 #number of points were calculating on the seam line
alpha = np.linspace(0,np.pi*2,n)
x = np.zeros(len(alpha))
y = np.zeros(len(alpha))
z = np.zeros(len(alpha))
R = (2 + 15/16.)/2
for i in range(len(alpha)-1):
x[i] = ((1/13)*R*((9*np.cos(alpha[i]) - 4*np.cos(3*alpha[i]))))
y[i] = ((1/13)*R*((9*np.sin(alpha[i]) + 4*np.sin(3*alpha[i]))))
z[i] = ((12/13)*R*np.cos(2*alpha[i]))
return x,y,z
| 20,743
|
def check_model_consistency(model, grounding_dict, pos_labels):
"""Check that serialized model is consistent with associated json files.
"""
groundings = {grounding for grounding_map in grounding_dict.values()
for grounding in grounding_map.values()}
model_labels = set(model.estimator.named_steps['logit'].classes_)
consistent_labels = groundings <= model_labels
shortforms = set(grounding_dict.keys())
model_shortforms = set(model.shortforms)
consistent_shortforms = shortforms == model_shortforms
model_labels = set(model.estimator.named_steps['logit'].classes_)
consistent_pos_labels = set(pos_labels) <= model_labels
return consistent_labels and consistent_shortforms and \
consistent_pos_labels
| 20,744
|
def get_submission_info(tile_grid, collections, tile_indices,
period_start, period_end, period_freq):
""" Return information about tracked order submissions
"""
return {
'submitted': dt.datetime.today().isoformat(),
'collections': collections,
'tile_grid': tile_grid.to_dict(),
'tile_indices': list(tile_indices),
'period_start': period_start.isoformat(),
'period_end': period_end.isoformat(),
'period_freq': period_freq
}
| 20,745
|
def load_obj(path):
"""Load an object from a Python file.
path is relative to the data dir. The file is executed and the obj
local is returned.
"""
localdict = {}
with open(_DATADIR / path) as file:
exec(file.read(), localdict, localdict)
return localdict['obj']
| 20,746
|
def dp_policy_evaluation(env, pi, v=None, gamma=1, tol=1e-3, iter_max=100,
verbose=True):
"""Evaluates state-value function by performing iterative policy evaluation
via Bellman expectation equation (in-place)
Based on Sutton/Barto, Reinforcement Learning, 2nd ed. p. 75
Args:
env: Environment
pi: Policy
v: Initial value function or None
gamma: Discount factor
tol: Tolerance to stop iteration
iter_max: Maximum iteration count
Returns:
v: State-value function
"""
if v is None:
v = np.zeros(env.observation_space.n)
for i_iter in range(iter_max):
if verbose:
print("\r> DP Policy evaluation: Iteration {}/{}".format(
i_iter+1, iter_max), end="")
delta = 0
for state in range(env.observation_space.n):
v_new = 0
for action in range(env.action_space.n):
for (prob,state2,reward,done) in env.P[state][action]:
v_new += pi[state][action] * prob * (
reward + gamma*v[state2]
)
delta = max(delta, np.abs(v_new-v[state]))
v[state] = v_new
if delta < tol:
break
if verbose:
print()
return v
| 20,747
|
async def test_cut_video_aio():
"""
测试视频缩放
:return:
"""
print('')
h264_obj = H264Video(constval.VIDEO, constval.OUTPUT_DIR, aio=True)
start_time = random.random() * 100
last_time = random.randint(int(start_time)+1, 1000)
print('current work dir', os.path.abspath(os.getcwd()))
print(f'start_time: {start_time:f}, last_time: {last_time:d}')
print(start_time, last_time)
home_dir = os.path.abspath(os.getenv('HOME'))
cuted_video, stderr = await h264_obj.cmd_do_aio(f'{home_dir:s}', 'mp4', FfmpegCmdModel.cut_video,
start_time=start_time,
last_time=last_time,
encode_lib=constval.CODEC,
target_videobitrate=500)
assert cuted_video is not None and stderr == ''
print('H264Video object info:', cuted_video)
print(f'out put video width:{cuted_video.video_width:d},video height:{cuted_video.video_height:d},'
f'video bit rate:{cuted_video.video_bitrate:d}')
slice_begin = random.randint(0, 120)
slice_end = random.randint(slice_begin, 240)
slice_count = random.randint(0,20)
print(f'begin: {slice_begin:d}, end: {slice_end:d}, count: {slice_count:d}')
print(h264_obj[slice_begin:slice_end:slice_count])
print(h264_obj[slice_end])
| 20,748
|
def gpst2utc(tgps, leaps_=-18):
""" calculate UTC-time from gps-time """
tutc = timeadd(tgps, leaps_)
return tutc
| 20,749
|
def create_shell(username, session_id, key):
"""Instantiates a CapturingSocket and SwiftShell and hooks them up.
After you call this, the returned CapturingSocket should capture all
IPython display messages.
"""
socket = CapturingSocket()
session = Session(username=username, session=session_id, key=key)
shell = SwiftShell.instance()
shell.display_pub.session = session
shell.display_pub.pub_socket = socket
return [socket, shell]
| 20,750
|
def _get_index_sort_str(env, name):
"""
Returns a string by which an object with the given name shall be sorted in
indices.
"""
ignored_prefixes = env.config.cmake_index_common_prefix
for prefix in ignored_prefixes:
if name.startswith(prefix) and name != prefix:
return name[len(prefix):]
return name
| 20,751
|
def giveError(message: str) -> None:
"""Display error message and exits program"""
print(colored(f"Error: {message}", 'red'))
exit()
| 20,752
|
def utcnow():
"""Gets current time.
:returns: current time from utc
:rtype: :py:obj:`datetime.datetime`
"""
return datetime.datetime.utcnow()
| 20,753
|
def elem2full(elem: str) -> str:
"""Retrieves full element name for short element name."""
for element_name, element_ids, element_short in PERIODIC_TABLE:
if elem == element_short:
print(element_name)
return element_name
else:
raise ValueError(f"Index {elem} does not match any element.")
| 20,754
|
def fixture_path(relapath=''):
""":return: absolute path into the fixture directory
:param relapath: relative path into the fixtures directory, or ''
to obtain the fixture directory itself"""
return os.path.join(os.path.dirname(__file__), 'fixtures', relapath)
| 20,755
|
def create_random_totp_secret(secret_length: int = 72) -> bytes:
"""
Generate a random TOTP secret
:param int secret_length: How long should the secret be?
:rtype: bytes
:returns: A random secret
"""
random = SystemRandom()
return bytes(random.getrandbits(8) for _ in range(secret_length))
| 20,756
|
def _get_roles_can_update(community_id):
"""Get the full list of roles that current identity can update."""
return _filter_roles("members_update", {"user", "group"}, community_id)
| 20,757
|
def register_external_compiler(op_name, fexternal=None, level=10):
"""Register the external compiler for an op.
Parameters
----------
op_name : str
The name of the operator.
fexternal : function (attrs: Attrs, args: List[Expr], compiler: str)
-> new_expr: Expr
The function for wrapping a call expr with compiler_begin and
compiler_end.
level : int
The priority level
"""
return tvm.ir.register_op_attr(op_name, "FTVMExternalCompiler", fexternal, level)
| 20,758
|
def kl_divergence_from_logits_bm(logits_a, logits_b):
"""Gets KL divergence from logits parameterizing categorical distributions.
Args:
logits_a: A tensor of logits parameterizing the first distribution.
logits_b: A tensor of logits parameterizing the second distribution.
Returns:
The (batch_size,) shaped tensor of KL divergences.
"""
beta_coeff = 1
alphas = tf.exp(logits_a)
betas = tf.exp(logits_b)
a_zero = tf.reduce_sum(alphas, -1)
loss1 = tf.lgamma(a_zero) - tf.reduce_sum(tf.lgamma(alphas), -1)
loss2 = tf.reduce_sum(
(alphas - betas) * (tf.digamma(alphas) - tf.digamma(tf.expand_dims(a_zero, -1))), -1)
kl_loss = loss1 + loss2
return kl_loss
| 20,759
|
def get_stats_fuzzy(stats, img, var_img, roi_set, suffix="", ignore_nan=True, ignore_inf=True, ignore_zerovar=True, mask=None, pv_threshold=0.):
"""
Get a set of statistics for a set of 'fuzzy' ROIs
:param img: 3D Numpy array
:param roi_set: 4D Numpy array with same dimensions as img and each volume
containing partial volumes for each ROI in the set
:return: Mapping from name of statistic to sequence of values, one for each ROI in the set.
This may be NaN or infinite depending on the input arguments.
"""
roi_shape = list(roi_set.shape)[:3]
if list(img.shape) != roi_shape:
raise ValueError("Image must have same dimensions as ROI")
if list(var_img.shape) != roi_shape:
raise ValueError("Variance image must have same dimensions as ROI")
if mask is not None and list(mask.shape) != roi_shape:
raise ValueError("Mask must have same dimensions as ROI")
if mask is None:
mask = np.ones(roi_shape, dtype=np.int)
if ignore_nan:
mask = np.logical_and(mask, ~np.isnan(img))
if ignore_inf:
mask = np.logical_and(mask, np.isfinite(img))
if ignore_zerovar:
mask = np.logical_and(mask, var_img > 0)
# Only take voxels where at least one of the ROIs has non-zero percentage
mask = np.logical_and(mask, np.sum(roi_set, axis=3) > pv_threshold)
# Flatten ROI PVs and data into masked 2D array
roi_array = roi_set[mask]
g = img[mask]
# Standardize ROI set so total PV is 1
roi_array = standardise(roi_array, mode='expand')
# Ask Jack about this???
#if var:
# roi_array = np.square(roi_array)
HT = roi_array.T
print(f" - Fuzzy ROI set: condition number for transfer matrix (unweighted) = {np.linalg.cond(HT):.2f}.")
# Calculate roi means by linear regression
means_lstsq, _res, _rank, _s = np.linalg.lstsq(HT@roi_array, HT@g[..., np.newaxis],
rcond=None) # None uses future default
# and silences warning
# Note that we do not report stats for the 'background' ROI added to ensure total PV of 1
stats["Nvoxels" + suffix] = [np.count_nonzero(roi_array[:, idx] > pv_threshold) for idx in range(roi_set.shape[-1])]
stats["Mean" + suffix] = np.atleast_1d(np.squeeze(means_lstsq[:-1]))
# If variance has been supplied add a precision-weighted mean
if var_img is not None:
V_inv = scipy.sparse.diags(1/var_img[mask])
HT = roi_array.T @ V_inv
print(f" - Fuzzy ROI set: condition number for transfer matrix (prec-weighted) = {np.linalg.cond(HT):.2f}.")
# Calculate roi means by linear regression
means_lstsq, _res, _rank, _s = np.linalg.lstsq(HT@roi_array, HT@g[..., np.newaxis],
rcond=None) # None uses future default
# and silences warning
stats["Precision-weighted mean" + suffix] = np.atleast_1d(np.squeeze(means_lstsq[:-1]))
| 20,760
|
def if_stopped_or_playing(speaker, action, args, soco_function, use_local_speaker_list):
"""Perform the action only if the speaker is currently in the desired playback state"""
state = speaker.get_current_transport_info()["current_transport_state"]
logging.info(
"Condition: '{}': Speaker '{}' is in state '{}'".format(
action, speaker.player_name, state
)
)
if (state != "PLAYING" and action == "if_playing") or (
state == "PLAYING" and action == "if_stopped"
):
logging.info("Action suppressed")
return True
action = args[0]
args = args[1:]
logging.info(
"Action invoked: '{} {} {}'".format(speaker.player_name, action, " ".join(args))
)
return process_action(
speaker, action, args, use_local_speaker_list=use_local_speaker_list
)
| 20,761
|
def test_quorum_slices_to_definition():
"""Test quorum_slices_to_definition()"""
assert quorum_slices_to_definition([{'A', 'B'}, {'C'}]) == {
'threshold': 1,
'nodes': set(),
'children_definitions': [{
'threshold': 2,
'nodes': {'A', 'B'},
'children_definitions': set()
}, {
'threshold': 1,
'nodes': {'C'},
'children_definitions': set()
}]
}
| 20,762
|
def compute_src_graph(hive_holder, common_table):
""" computes just the src part of the full version graph.
Side effect: updates requirements of blocks to actually point to real dep versions
"""
graph = BlockVersionGraph()
versions = hive_holder.versions
graph.add_nodes(versions.itervalues())
references = References()
for block_holder in hive_holder.block_holders:
dep_table = block_holder.requirements
base_version = versions[block_holder.block_name]
for target_bcn in block_holder.external_targets():
target_block_name = target_bcn.block_name
if target_block_name in versions:
other_version = versions[target_block_name]
else:
other_version = common_table[target_block_name]
references[other_version].add(target_bcn.cell_name)
graph.add_edge(base_version, other_version)
dep_table.add_version(other_version)
return graph, references
| 20,763
|
def get_uv(seed=0, nrm=False, vector=False):
"""Dataset with random univariate data
Parameters
----------
seed : None | int
Seed the numpy random state before generating random data.
nrm : bool
Add a nested random-effects variable (default False).
vector : bool
Add a 3d vector variable as ``ds['v']`` (default ``False``).
"""
if seed is not None:
np.random.seed(seed)
ds = permute([('A', ('a1', 'a2')),
('B', ('b1', 'b2')),
('rm', ['s%03i' % i for i in range(20)])])
ds['rm'].random = True
ds['intvar'] = Var(np.random.randint(5, 15, 80))
ds['intvar'][:20] += 3
ds['fltvar'] = Var(np.random.normal(0, 1, 80))
ds['fltvar'][:40] += 1.
ds['fltvar2'] = Var(np.random.normal(0, 1, 80))
ds['fltvar2'][40:] += ds['fltvar'][40:].x
ds['index'] = Var(np.repeat([True, False], 40))
if nrm:
ds['nrm'] = Factor(['s%03i' % i for i in range(40)], tile=2, random=True)
if vector:
x = np.random.normal(0, 1, (80, 3))
x[:40] += [.3, .3, .3]
ds['v'] = NDVar(x, (Case, Space('RAS')))
return ds
| 20,764
|
def private_names_for(cls, names):
"""
Returns:
Iterable of private names using privateNameFor()"""
if not isinstance(names, Iterable):
raise TypeError('names must be an interable')
return (private_name_for(item, cls) for item in names)
| 20,765
|
def check_xpbs_install() -> None:
"""Try to get the install path of third party tool
Xpbs (https://github.com/FranckLejzerowicz/Xpbs).
If it exists, nothing happens and the code proceeds.
Otherwise, the code ends and tells what to do.
"""
ret_code, ret_path = subprocess.getstatusoutput('which Xpbs')
if ret_code:
print('Xpbs is not installed:\n either use `--no-jobs` to not '
'prepare Torque/Slurm job scripts,\n or make sure to install '
'Xpbs (https://github.com/FranckLejzerowicz/Xpbs) '
'and to edit its config.txt (see readme))\nExiting...')
sys.exit(1)
else:
with open(ret_path) as f:
for line in f:
break
if line.startswith('$HOME'):
print('Xpbs is installed but its config.txt '
'need editing!\nExiting...')
sys.exit(1)
| 20,766
|
def find_vcs_root(location="", dirs=(".git", ".hg", ".svn"), default=None) -> str:
"""Return current repository root directory."""
if not location:
location = os.getcwd()
prev, location = None, os.path.abspath(location)
while prev != location:
if any(os.path.isdir(os.path.join(location, d)) for d in dirs):
return location
prev, location = location, os.path.abspath(os.path.join(location, os.pardir))
return default
| 20,767
|
def invert_trimat(A, lower=False, right_inv=False, return_logdet=False, return_inv=False):
"""Inversion of triangular matrices.
Returns lambda function f that multiplies the inverse of A times a vector.
Args:
A: Triangular matrix.
lower: if True A is lower triangular, else A is upper triangular.
right_inv: If False, f(v)=A^{-1}v; if True f(v)=v' A^{-1}
return_logdet: If True, it also returns the log determinant of A.
return_inv: If True, it also returns A^{-1}
Returns:
Lambda function that multiplies A^{-1} times vector.
Log determinant of A
A^{-1}
"""
if right_inv:
fh=lambda x: la.solve_triangular(A.T, x.T, lower=not(lower)).T
else:
fh=lambda x: la.solve_triangular(A, x, lower=lower)
if return_logdet or return_inv:
r = [fh]
else:
r = fh
if return_logdet:
logdet=np.sum(np.log(np.diag(A)))
r.append(logdet)
if return_inv:
invA=fh(np.eye(A.shape[0]))
r.append(invA)
return r
| 20,768
|
def category_input_field_delete(request, structure_slug,
category_slug, module_id,
field_id, structure):
"""
Deletes a field from a category input module
:type structure_slug: String
:type category_slug: String
:type module_id: Integer
:type field_id: Integer
:type structure: OrganizationalStructure (from @is_manager)
:param structure_slug: structure slug
:param category_slug: category slug
:param module_id: input module id
:param field_id: module field id
:param structure: structure object (from @is_manager)
:return: redirect
"""
category = get_object_or_404(TicketCategory,
organizational_structure=structure,
slug=category_slug)
module = get_object_or_404(TicketCategoryModule,
pk=module_id,
ticket_category=category)
if not module.can_be_deleted():
# log action
logger.error('[{}] manager of structure {}'
' {} tried to delete a field'
' from module {} of category {}'.format(timezone.localtime(),
structure,
request.user,
module,
category))
messages.add_message(request, messages.ERROR,
_("Impossibile eliminare il modulo {}."
" Ci sono delle richieste collegate").format(module))
else:
field = get_object_or_404(TicketCategoryInputList,
pk=field_id,
category_module=module)
# log action
logger.info('[{}] manager of structure {}'
' {} deleted the field {}'
' from module {} of category {}'.format(timezone.localtime(),
structure,
request.user,
field,
module,
category))
field.delete()
messages.add_message(request, messages.SUCCESS,
_("Campo {} eliminato con successo").format(field.name))
return redirect('uni_ticket:manager_category_input_module',
structure_slug=structure_slug,
category_slug=category_slug,
module_id=module_id)
| 20,769
|
def cat_to_num(att_df):
"""
Changes categorical variables in a dataframe to numerical
"""
att_df_encode = att_df.copy(deep=True)
for att in att_df_encode.columns:
if att_df_encode[att].dtype != float:
att_df_encode[att] = pd.Categorical(att_df_encode[att])
att_df_encode[att] = att_df_encode[att].cat.codes
return att_df_encode
| 20,770
|
async def handle_get(request):
"""Handle GET request, can be display at http://localhost:8080"""
text = (f'Server is running at {request.url}.\n'
f'Try `curl -X POST --data "text=test" {request.url}example`\n')
return web.Response(text=text)
| 20,771
|
def values_target(size: tuple, value: float, cuda: False) -> Variable:
""" returns tensor filled with value of given size """
result = Variable(full(size=size, fill_value=value))
if cuda:
result = result.cuda()
return result
| 20,772
|
def get_new_perpendicular_point_with_custom_distance_to_every_line_segment(
line_segments: np.ndarray, distance_from_the_line: np.ndarray
):
"""
:param line_segments: array of shape [number_of_line_segments, 2, 2]
:param distance_from_the_line: how far the new point to create from the reference
:return:
"""
return new_perpendicular_point_to_line_segment(
line_segments, distance_from_the_line
)
| 20,773
|
def test_files_safen_path(mongodb_settings, filename, fuuid):
"""Verify that regular and url-encoded paths are equivalent
"""
base = FileStore(mongodb_settings)
doc = FileRecord({'name': filename})
resp = base.add_update_document(doc)
assert resp['uuid'] == fuuid
| 20,774
|
def tmdb_find_movie(movie: str, tmdb_api_token: str):
"""
Search the tmdb api for movies by title
Args:
movie (str): the title of a movie
tmdb_api_token (str): your tmdb v3 api token
Returns:
dict
"""
url = 'https://api.themoviedb.org/3/search/movie?'
params = {'query': movie, 'language': 'en-US', 'api_key': tmdb_api_token, }
return requests.get(url, params).json()
| 20,775
|
def bus_routes():
"""
Gets all the bus routes from the LTA API and store them in bus_routes.txt
Each row in bus_routes.txt will have a bus service number, direction, bus stop code, bus stop name,
first and last bus timings for weekdays, Saturday and Sunday
"""
os.remove('bus_routes.txt')
bus_stop_list = get_bus_stop_name()
length_json, interval = 500, 1
while length_json == 500:
url = "http://datamall2.mytransport.sg/ltaodataservice/BusRoutes?$skip={}".format(interval * 500)
headers = {'AccountKey': ACCOUNT_KEY}
response = requests.get(url, headers=headers).json()
routes = response['value']
for route in routes:
for bus_stop in bus_stop_list:
if route['BusStopCode'] == bus_stop[0]:
with open('bus_routes.txt', 'a') as r:
r.write('{} | {} | {} | {} | {} | {} | {} '
'| {} | {} | {}\n'.format(route['ServiceNo'], route['Direction'], route['BusStopCode'],
bus_stop[1].upper(), route['WD_FirstBus'],
route['WD_LastBus'], route['SAT_FirstBus'],
route['SAT_LastBus'], route['SUN_FirstBus'],
route['SUN_LastBus']))
length_json = len(response['value'])
interval += 1
| 20,776
|
def is_missing_artifact_error(err: WandbError):
"""
Check if a specific W&B error is caused by a 404 on the artifact we're looking for.
"""
# This is brittle, but at least we have a test for it.
return "does not contain artifact" in err.message
| 20,777
|
def robust_makedirs(path):
""" create a directory in a robust race safe manner if not
already existing.
Good for multiprocessing / threading or cases where
multiple actors might create a directory
"""
if not os.path.isdir(path):
try:
os.makedirs(path)
except Exception:
if not os.path.isdir(path):
if os.path.isfile(path):
raise MyTBError("path %r is not a directory" % path)
else:
raise
| 20,778
|
def activate_locale(locale=None, app=None):
"""Active an app or a locale."""
prefixer = old_prefix = get_url_prefix()
old_app = old_prefix.app
old_locale = translation.get_language()
if locale:
rf = RequestFactory()
prefixer = Prefixer(rf.get('/%s/' % (locale,)))
translation.activate(locale)
if app:
prefixer.app = app
set_url_prefix(prefixer)
yield
old_prefix.app = old_app
set_url_prefix(old_prefix)
translation.activate(old_locale)
| 20,779
|
def create_reforecast_valid_times(start_year=2000):
"""Inits from year 2000 to 2019 for the same days as in 2020."""
reforecasts_inits = []
inits_2020 = create_forecast_valid_times().forecast_time.to_index()
for year in range(start_year, reforecast_end_year + 1):
# dates_year = pd.date_range(start=f"{year}-01-02", end=f"{year}-12-31", freq="7D")
dates_year = pd.DatetimeIndex([i.strftime("%Y%m%d").replace("2020", str(year)) for i in inits_2020])
dates_year = xr.DataArray(
dates_year,
dims="forecast_time",
coords={"forecast_time": dates_year},
)
reforecasts_inits.append(dates_year)
reforecasts_inits = xr.concat(reforecasts_inits, dim="forecast_time")
reforecast_valid_times = create_valid_time_from_forecast_time_and_lead_time(reforecasts_inits, leads)
reforecast_valid_times = (
reforecast_valid_times.rename("test").assign_coords(valid_time=reforecast_valid_times).to_dataset()
)
reforecast_valid_times = xr.ones_like(reforecast_valid_times).astype("float32")
return reforecast_valid_times
| 20,780
|
def _checkerror(fulloutput):
"""
Function to check the full output for known strings and plausible fixes to the error.
Future: add items to `edict` where the key is a unique string contained in the offending
output, and the data is the reccomended solution to resolve the problem
"""
edict = {'multiply': ('NOTE: you might(?) need to clean the `tmp/` folder!'),
'already defined': ('NOTE: you probably (might?) need to clean the `tmp/` folder!'),
'unresolved externals': ('NOTE: consider recompiling the linked libraries to'
'have the correct name mangling for cl.exe:'
'ifort: /names:lowercase /assume:underscore '),
"KeyError: 'void'": ('There may be an issue with public/private function '
'definitions or a missing variable definition in the last '
'function listed above. For the first error consider using '
'the parameter `functiondict` or checking to ensure all '
'module functions are public... For the second error, check '
'that all of the parameters in the subroutine are defined'),
"No such file or directory": ('There may be a space in the path to one of the '
'source code or library folders'),
"LINK : fatal error LNK1104: cannot open file": ('The pyd is currently in use, '
'restart any kernels using it !')
}
# iterate through the keys in the error dictionary and see if the key is in the full output
extramessage = ''
for error_key in edict.keys():
if error_key in fulloutput:
extramessage = edict[error_key]
return extramessage
| 20,781
|
def create_pre_process_block(net, ref_layer_name, means, scales=None):
"""
Generates the pre-process block for the IR XML
Args:
net: root XML element
ref_layer_name: name of the layer where it is referenced to
means: tuple of values
scales: tuple of values
Returns:
pre-process XML element
"""
pre_process = SubElement(net, 'pre-process')
pre_process.set('reference-layer-name', ref_layer_name)
for idx in range(len(means)):
channel_xml = SubElement(pre_process, 'channel')
channel_xml.set('id', str(idx))
mean_xml = SubElement(channel_xml, 'mean')
mean_xml.set('value', str(means[idx]))
if scales:
scale_xml = SubElement(channel_xml, 'scale')
scale_xml.set('value', str(scales[idx]))
return pre_process
| 20,782
|
def tabWidget_func(value, main_window):
"""Connect main tabWidget."""
main_window.scene.current_tab_idx = value
fill_listWidget_with_data(main_window.scene.project_data, main_window.listWidget, value)
set_selected_id_in_listWidget(main_window.scene, 0)
| 20,783
|
def GetSystemFaultsFromState(state, spot_wrapper):
"""Maps system fault data from robot state proto to ROS SystemFaultState message
Args:
data: Robot State proto
spot_wrapper: A SpotWrapper object
Returns:
SystemFaultState message
"""
system_fault_state_msg = SystemFaultState()
system_fault_state_msg.faults = getSystemFaults(state.system_fault_state.faults, spot_wrapper)
system_fault_state_msg.historical_faults = getSystemFaults(state.system_fault_state.historical_faults, spot_wrapper)
return system_fault_state_msg
| 20,784
|
def findAnEven(L):
"""
:Assumes L is a list of integers:
:Returns the first even number in L:
:Raises ValueError if L does not contain an even number:
"""
for num in L:
if num % 2 == 0:
return num
raise ValueError
| 20,785
|
def get_points(wire):
"""
get all points (including starting point), where the wire bends
>>> get_points(["R75","D30","R83","U83","L12","D49","R71","U7","L72"])
[((0, 0), (75, 0)), ((75, 0), (75, -30)), ((75, -30), (158, -30)), ((158, -30), (158, 53)), ((158, 53), (146, 53)), ((146, 53), (146, 4)), ((146, 4), (217, 4)), ((217, 4), (217, 11)), ((217, 11), (145, 11))]
>>> get_points(["U62","R66","U55","R34","D71","R55","D58","R83"])
[((0, 0), (0, 62)), ((0, 62), (66, 62)), ((66, 62), (66, 117)), ((66, 117), (100, 117)), ((100, 117), (100, 46)), ((100, 46), (155, 46)), ((155, 46), (155, -12)), ((155, -12), (238, -12))]
>>> get_points(["R98","U47","R26","D63","R33","U87","L62","D20","R33","U53","R51"])
[((0, 0), (98, 0)), ((98, 0), (98, 47)), ((98, 47), (124, 47)), ((124, 47), (124, -16)), ((124, -16), (157, -16)), ((157, -16), (157, 71)), ((157, 71), (95, 71)), ((95, 71), (95, 51)), ((95, 51), (128, 51)), ((128, 51), (128, 104)), ((128, 104), (179, 104))]
>>> get_points(["U98","R91","D20","R16","D67","R40","U7","R15","U6","R7"])
[((0, 0), (0, 98)), ((0, 98), (91, 98)), ((91, 98), (91, 78)), ((91, 78), (107, 78)), ((107, 78), (107, 11)), ((107, 11), (147, 11)), ((147, 11), (147, 18)), ((147, 18), (162, 18)), ((162, 18), (162, 24)), ((162, 24), (169, 24))]
"""
starting_point = (0, 0)
result = []
for part in wire:
end_point = get_end_point(starting_point, part)
result.append((starting_point, end_point))
starting_point = end_point
return result
| 20,786
|
def getcutscheckerboard(rho):
"""
:param rho:
:return: cell centers and values along horizontal, vertical, diag cut
"""
ny, nx = rho.shape
assert nx == ny
n = ny
horizontal = rho[6 * n // 7, :]
vertical = rho[:, n // 7]
if np.abs(horizontal[0]) < 1e-15:
horizontal = horizontal[2:-2]
if np.abs(vertical[0]) < 1e-15:
vertical = vertical[2:-2]
diag = [rho[i, i] for i in range(n)]
if np.abs(diag[0]) < 1e-15:
diag = diag[2:-2]
edges = np.linspace(0, 7, len(horizontal) + 1)
centers = (edges[1:] + edges[:-1]) / 2
return centers, horizontal, vertical, diag
| 20,787
|
def errorString(node, error):
"""
Format error messages for node errors returned by checkLinkoStructure.
inputs:
node - the node for the error.
error - a (backset, foreset) tuple, where backset is the set of
missing backlinks and foreset is the set of missing forelinks.
returns: string
string - the error string message.
"""
back, fore = error[0], error[1]
if len(back) == 0:
back = 'None'
if len(fore) == 0:
fore = 'None'
return ('Node {0}: missing backlinks {1},'
' missing forelinks {2}').format(node, back, fore)
| 20,788
|
def test_2():
"""
query : compare average sales of A and B in date range 2000 to 2010
here the oversight is not detected as the 2 companies don't differ in
experience time
"""
table = pandas.DataFrame()
table['Company'] = pandas.Series(['A', 'B', 'A', 'B', 'A', 'B', 'A', 'B'])
table['year'] = pandas.Series(['2001', '2001', '2002', '2002', '2006', '2006', '2007', '2007'])
table['sales'] = pandas.Series([1, 34, 23, 42, 23, 1324, 34, 134])
print(table)
suggestion = calendar_vs_experience_time.calendar_vs_experience_time(table, 'sales',
['Company', 'year', 'sales'],
'Company', 'A', 'B',
SummaryOperators.MEAN,
date_column_name='year',
date_range=['2000-01-01', '2010-01-01'],
date_format='%Y')
print(suggestion)
expected_suggestion = 'None'
assert(str(suggestion) == expected_suggestion)
| 20,789
|
def deep_update(target, source):
"""
Deep merge two dicts
"""
if isinstance(source, dict):
for key, item in source.items():
if key in target:
target[key] = deep_update(target[key], item)
else:
target[key] = source[key]
return target
| 20,790
|
def verify_iou_value(issued_currency_value: str) -> None:
"""
Validates the format of an issued currency amount value.
Raises if value is invalid.
Args:
issued_currency_value: A string representing the "value"
field of an issued currency amount.
Returns:
None, but raises if issued_currency_value is not valid.
Raises:
XRPLBinaryCodecException: If issued_currency_value is invalid.
"""
decimal_value = Decimal(issued_currency_value)
if decimal_value.is_zero():
return
exponent = decimal_value.as_tuple().exponent
if (
(_calculate_precision(issued_currency_value) > _MAX_IOU_PRECISION)
or (exponent > _MAX_IOU_EXPONENT)
or (exponent < _MIN_IOU_EXPONENT)
):
raise XRPLBinaryCodecException(
"Decimal precision out of range for issued currency value."
)
_verify_no_decimal(decimal_value)
| 20,791
|
def md5_hash_file(path):
"""
Return a md5 hashdigest for a file or None if path could not be read.
"""
hasher = hashlib.md5()
try:
with open(path, 'rb') as afile:
buf = afile.read()
hasher.update(buf)
return hasher.hexdigest()
except IOError:
# This may happen if path has been deleted
return None
| 20,792
|
def Constant(value):
"""
Produce an object suitable for use as a source in the 'connect' function that
evaluates to the given 'value'
:param value: Constant value to provide to a connected target
:return: Output instance port of an instance of a Block that produces the given constant when evaluated
"""
global _constantCounter
blockName = "Constant" + str(_constantCounter)
constBlock = defineBlock(blockName)
defineOutputs(constBlock, "out")
defineBlockOutputBehaviour(constBlock.out, lambda: value)
setMetaData(constBlock.out, "Sensation-Producing", False)
inst = createInstance(blockName, "constant" + str(_constantCounter))
_constantCounter += 1
return inst.out
| 20,793
|
def _Backward3a_T_Ps(P, s):
"""Backward equation for region 3a, T=f(P,s)
Parameters
----------
P : float
Pressure [MPa]
s : float
Specific entropy [kJ/kgK]
Returns
-------
T : float
Temperature [K]
References
----------
IAPWS, Revised Supplementary Release on Backward Equations for the
Functions T(p,h), v(p,h) and T(p,s), v(p,s) for Region 3 of the IAPWS
Industrial Formulation 1997 for the Thermodynamic Properties of Water and
Steam, http://www.iapws.org/relguide/Supp-Tv%28ph,ps%293-2014.pdf, Eq 6
Examples
--------
>>> _Backward3a_T_Ps(20,3.8)
628.2959869
>>> _Backward3a_T_Ps(100,4)
705.6880237
"""
I = [-12, -12, -10, -10, -10, -10, -8, -8, -8, -8, -6, -6, -6, -5, -5, -5,
-4, -4, -4, -2, -2, -1, -1, 0, 0, 0, 1, 2, 2, 3, 8, 8, 10]
J = [28, 32, 4, 10, 12, 14, 5, 7, 8, 28, 2, 6, 32, 0, 14, 32, 6, 10, 36, 1,
4, 1, 6, 0, 1, 4, 0, 0, 3, 2, 0, 1, 2]
n = [0.150042008263875e10, -0.159397258480424e12, 0.502181140217975e-3,
-0.672057767855466e2, 0.145058545404456e4, -0.823889534888890e4,
-0.154852214233853, 0.112305046746695e2, -0.297000213482822e2,
0.438565132635495e11, 0.137837838635464e-2, -0.297478527157462e1,
0.971777947349413e13, -0.571527767052398e-4, 0.288307949778420e5,
-0.744428289262703e14, 0.128017324848921e2, -0.368275545889071e3,
0.664768904779177e16, 0.449359251958880e-1, -0.422897836099655e1,
-0.240614376434179, -0.474341365254924e1, 0.724093999126110,
0.923874349695897, 0.399043655281015e1, 0.384066651868009e-1,
-0.359344365571848e-2, -0.735196448821653, 0.188367048396131,
0.141064266818704e-3, -0.257418501496337e-2, 0.123220024851555e-2]
Pr = P/100
sigma = s/4.4
suma = 0
for i, j, ni in zip(I, J, n):
suma += ni * (Pr+0.240)**i * (sigma-0.703)**j
return 760*suma
| 20,794
|
def calculate_width_and_height(url_parts, options):
"""Appends width and height information to url"""
width = options.get("width", 0)
has_width = width
height = options.get("height", 0)
has_height = height
flip = options.get("flip", False)
flop = options.get("flop", False)
if flip:
width = width * -1
if flop:
height = height * -1
if not has_width and not has_height:
if flip:
width = "-0"
if flop:
height = "-0"
if width or height:
url_parts.append("%sx%s" % (width, height))
| 20,795
|
def db_handler(args):
"""db_handler."""
if args.type == 'create':
if args.db is None:
db.init_db()
return
if not _setup_db(args.db):
return
if args.type == 'status':
current_rev = db_revision.current_db_revision()
print('current_rev', current_rev)
if args.type == 'upgrade':
db.upgrade()
if args.type == 'revision':
db_revision.new_revision()
if args.type == 'drop':
if args.db is not None:
db.downgrade()
db.remove_db()
| 20,796
|
def expandvars(s):
"""Expand environment variables of form %var%.
Unknown variables are left unchanged.
"""
global _env_rx
if '%' not in s:
return s
if _env_rx is None:
import re
_env_rx = re.compile(r'%([^|<>=^%]+)%')
return _env_rx.sub(_substenv, s)
| 20,797
|
def main():
"""
Parse command line parameters
"""
parser = argparse.ArgumentParser(add_help=False,
description="Don't worry loves, cavalry's here!")
subparsers = parser.add_subparsers(dest='command')
parser_config = subparsers.add_parser('config',
description='Configure general authentication.')
parser_config.add_argument('-cs', '--clientsecret',
help='path to Google Drive client secret json file')
parser_config.add_argument('-ac', '--authcode',
help='Google Drive OAuth2 authorization code')
parser_config.add_argument('-a', '--appname',
help='name of Google Drive app to register (custom)')
parser_config.add_argument('-t', '--temp',
default=tempfile.gettempdir(),
help='save temporary files to this path')
parser_init = subparsers.add_parser('init',
description='Initializes a directory to be pushed to remote.')
parser_init.add_argument('-g', '--gnupg',
help='GnuPG key rings directory path')
parser_init.add_argument('-i', '--keyid',
help='keypair ID to use to encrypt directory files')
parser_init.add_argument('-n', '--enable-names',
help='do not encrypt file names',
action='store_true', default=False)
parser_push = subparsers.add_parser('push',
description='Pushes all unchanged files in the current directory.')
parser_push.add_argument('-f', '--force', action='store_true',
help='skips file change verification')
parser_pull = subparsers.add_parser('pull',
description='Pulls all unchanged files in the current directory')
parser_pull.add_argument('-f', '--force', action='store_true',
help='skips file change verification')
args = parser.parse_args()
command = args.command
if (command not in COMMANDS):
parser.print_help()
sys.exit(-1)
if (command == 'config'):
cmd_config(args)
elif (command == 'init'):
cmd_init(args)
elif (command == 'push'):
cmd_push(args)
elif (command == 'pull'):
cmd_pull(args)
| 20,798
|
def run_queries(q, file):
"""Run Twitter username queires against Twitter API.
Args:
q (tuple): A tuple of query strings.
file (str): A str filepath to a save results.
"""
data = csv(cd(file)) # modified to point to Data dir.
seen = set(col(0, data))
for q in reversed(q):
for t in twitter(q):
if t.id not in seen:
data.append((
t.id,
t.author,
t.language,
t.text,
t.date,
t.likes,
))
seen.add(t.id)
data.save()
| 20,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.