content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def read_loss_file(path):
"""Read the given loss csv file and process its data into lists that can be
plotted by matplotlib.
Args:
path (string): The path to the file to be read.
Returns: A list of lists, one list for each subnetwork containing the loss
values over time.
"""
with open(path, 'r') as csvfile:
reader = csv.reader(csvfile)
data = []
for row in reader:
# Ignore the epoch numbers
if len(data) == 0:
data = [[] for _ in row[1:]]
for i in range(1, len(row)):
data[i-1].append(float(row[i]))
return data | 26,000 |
def number2human(n: Union[int, float]) -> str:
"""
Format large number into readable string for a human
Examples:
>>> number2human(1000)
'1.0K'
>>> number2human(1200000)
'1.2M'
"""
# http://code.activestate.com/recipes/578019
# >>> bytes2human(10000)
# '9.8K'
# >>> bytes2human(100001221)
# '95.4M'
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = (10 ** 3) ** (i + 1)
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%.2f" % n | 26,001 |
def binomial_confidence_interval(successes, trials, error_rate):
"""Computes a confidence interval on the true p of a binomial.
Assumes:
- The given `successes` count outcomes of an iid Bernoulli trial
with unknown probability p, that was repeated `trials` times.
Guarantees:
- The probability (over the randomness of drawing the given sample)
that the true p is outside the returned interval is no more than
the given `error_rate`.
Args:
successes: Python or numpy `int` number of successes.
trials: Python or numpy `int` number of trials.
error_rate: Python `float` admissible rate of mistakes.
Returns:
low_p: Lower bound of confidence interval.
high_p: Upper bound of confidence interval.
Raises:
ValueError: If scipy is not available.
"""
def p_small_enough(p):
# This is positive iff p is smaller than the desired upper bound.
log_prob = stats.binom.logcdf(successes, trials, p)
return log_prob - np.log(error_rate / 2.)
def p_big_enough(p):
# This is positive iff p is larger than the desired lower bound.
# Scipy's survival function for discrete random variables excludes
# the argument, but I want it included for this purpose.
log_prob = stats.binom.logsf(successes-1, trials, p)
return log_prob - np.log(error_rate / 2.)
if successes < trials:
high_p = optimize.brentq(
p_small_enough, successes / float(trials), 1., rtol=1e-9)
else:
high_p = 1.
if successes > 0:
low_p = optimize.brentq(
p_big_enough, 0., successes / float(trials), rtol=1e-9)
else:
low_p = 0.
return low_p, high_p | 26,002 |
def try_parse_func_decl(start, end):
"""Parse a function declarator between start and end.
Expects that tokens[end-1] is a close parenthesis. If a function
declarator is successfully parsed, returns the decl_node.Function
object. Otherwise, returns None.
"""
open_paren = find_pair_backward(end - 1)
try:
params, index = parse_parameter_list(open_paren + 1)
except ParserError as e:
log_error(e)
return None
if index == end - 1:
return decl_nodes.Function(
params, parse_declarator(start, open_paren)) | 26,003 |
def eth_getBlockTransactionCountByNumber(block_number: int) -> int:
""" See EthereumAPI#get_block_transaction_count_by_number. """
with contextlib.closing(EthereumAPI()) as api:
return api.get_block_transaction_count_by_number(block_number) | 26,004 |
def day(db: Database, site: str = 'test', tag: str = '', search_body: str = '') -> List[Any]:
"""
戻り値 名前付きタプルのリスト # xxx List[DayCount] するにはclass DayCount(NamedTuple) 必要 pypy…
"""
tag_where = ''
body_where = ''
param = [site] # type: List[Union[str, int]]
if tag != '':
tag_where = "AND (tags like ? or tags like ?)"
param.extend([f"% {tag} %", f"% {tag}:%"])
if search_body != '':
body_where = "AND body LIKE ?"
param.append(f"%{search_body}%")
if db.dbms == 'postgresql':
date = 'to_char(DATE("datetime"),\'YYYY-MM-DD\')'
else:
date = 'DATE("datetime")'
sql = f"""
SELECT
{date} as "date" ,
COUNT(*) as "count"
FROM basedata
WHERE site = ?
{tag_where}
{body_where}
GROUP BY DATE("datetime")
ORDER BY DATE("datetime") DESC
LIMIT ?
"""
limit = 1000 # PENDING ページングする?
param.append(limit)
day_count = NamedTuple('day_count', (('date', str), ('count', int)))
logger.log(5, "日付投稿数SQL: %s", sql)
logger.log(5, "プレースホルダパラメータ: %s", param)
return db.execute_fetchall(sql, param, namedtuple=day_count) | 26,005 |
def init_data(my_data, rp):
""" initialize the sod problem """
msg.bold("initializing the sod problem...")
# make sure that we are passed a valid patch object
if not isinstance(my_data, patch.CellCenterData2d):
print("ERROR: patch invalid in sod.py")
print(my_data.__class__)
sys.exit()
# get the sod parameters
dens_left = rp.get_param("sod.dens_left")
dens_right = rp.get_param("sod.dens_right")
u_left = rp.get_param("sod.u_left")
u_right = rp.get_param("sod.u_right")
p_left = rp.get_param("sod.p_left")
p_right = rp.get_param("sod.p_right")
# get the density, momenta, and energy as separate variables
dens = my_data.get_var("density")
xmom = my_data.get_var("x-momentum")
ymom = my_data.get_var("y-momentum")
ener = my_data.get_var("energy")
# initialize the components, remember, that ener here is rho*eint
# + 0.5*rho*v**2, where eint is the specific internal energy
# (erg/g)
xmin = rp.get_param("mesh.xmin")
xmax = rp.get_param("mesh.xmax")
ymin = rp.get_param("mesh.ymin")
ymax = rp.get_param("mesh.ymax")
gamma = rp.get_param("eos.gamma")
direction = rp.get_param("sod.direction")
xctr = 0.5*(xmin + xmax)
yctr = 0.5*(ymin + ymax)
myg = my_data.grid
p = np.ones_like(dens) * p_left
dens[:, :] = dens_left
if direction == "x":
# left
idxl = myg.x2d <= xctr
dens[idxl] = dens_left
xmom[idxl] = u_left
ymom[idxl] = 0.0
# ener[idxl] = p_left/(gamma - 1.0) + 0.5*xmom[idxl]*u_left
p[idxl] = p_left
# right
idxr = myg.x2d > xctr
dens[idxr] = dens_right
xmom[idxr] = u_right
ymom[idxr] = 0.0
# ener[idxr] = p_right/(gamma - 1.0) + 0.5*xmom[idxr]*u_right
p[idxr] = p_right
else:
# bottom
idxb = myg.y2d <= yctr
dens[idxb] = dens_left
xmom[idxb] = 0.0
ymom[idxb] = u_left
# ener[idxb] = p_left/(gamma - 1.0) + 0.5*ymom[idxb]*u_left
p[idxb] = p_left
# top
idxt = myg.y2d > yctr
dens[idxt] = dens_right
xmom[idxt] = 0.0
ymom[idxt] = u_right
# ener[idxt] = p_right/(gamma - 1.0) + 0.5*ymom[idxt]*u_right
p[idxt] = p_right
rhoh = eos.rhoh_from_rho_p(gamma, dens, p)
W = 1./np.sqrt(1-xmom**2-ymom**2)
dens[:, :] *= W
xmom[:, :] *= rhoh*W**2
ymom[:, :] *= rhoh*W**2
ener[:, :] = rhoh*W**2 - p - dens | 26,006 |
def xml_files_list(path):
"""
Return the XML files found in `path`
"""
return (f for f in os.listdir(path) if f.endswith(".xml")) | 26,007 |
def helloLoop(name_list):
"""assumes name_list is a list of strings, representing names
prints a greeting for each name in name_list"""
for name in name_list:
print("Hello!, " + name) | 26,008 |
def ppv2(
aim_stars=None, speed_stars=None, max_combo=None,
nsliders=None, ncircles=None, nobjects=None, base_ar=5.0,
base_od=5.0, mode=MODE_STD, mods=MODS_NOMOD, combo=None,
n300=None, n100=0, n50=0, nmiss=0, score_version=1, bmap=None
):
"""
calculates ppv2
returns (pp, aim_pp, speed_pp, acc_pp, acc_percent)
if bmap is provided, mode, base_ar, base_od, max_combo,
nsliders, ncircles and nobjects are taken from it. otherwise
they must be provided.
if combo is None, max_combo is used.
if n300 is None, max_combo - n100 - n50 - nmiss is used.
"""
if mode != MODE_STD:
info(
"ppv2 is only implemented for osu!std at the moment\n"
)
raise NotImplementedError
if bmap != None:
mode = bmap.mode
base_ar = bmap.ar
base_od = bmap.od
max_combo = bmap.max_combo()
nsliders = bmap.nsliders
ncircles = bmap.ncircles
nobjects = len(bmap.hitobjects)
else:
if aim_stars == None:
raise ValueError("missing aim_stars or bmap")
if speed_stars == None:
raise ValueError("missing speed_stars")
if max_combo == None:
raise ValueError("missing max_combo or bmap")
if nsliders == None:
raise ValueError("missing nsliders or bmap")
if ncircles == None:
raise ValueError("missing ncircles or bmap")
if nobjects == None:
raise ValueError("missing nobjects or bmap")
if max_combo <= 0:
info("W: max_combo <= 0, changing to 1\n")
max_combo = 1
if combo == None:
combo = max_combo - nmiss
if n300 == None:
n300 = nobjects - n100 - n50 - nmiss
# accuracy ----------------------------------------------------
accuracy = acc_calc(n300, n100, n50, nmiss)
real_acc = accuracy
if score_version == 1:
# scorev1 ignores sliders since they are free 300s
# for whatever reason it also ignores spinners
nspinners = nobjects - nsliders - ncircles
real_acc = acc_calc(
n300 - nsliders - nspinners, n100, n50, nmiss
)
# can go negative if we miss everything
real_acc = max(0.0, real_acc)
elif score_version == 2:
ncircles = nobjects
else:
info("unsupported scorev%d\n" % (score_version))
raise NotImplementedError
# global values -----------------------------------------------
def low_objects(stars):
multiplier = min(0.5, 0.59 + (-0.59 * math.exp(-0.0038 * nobjects)))
multiplier = min(0.95 + min(0.1, nobjects / 5000),
0.55 + multiplier + max(0, 0.4 - pp_base(stars) / 12.5))
def bonus(n):
if n <= 500:
return multiplier
elif n <= 2000:
return bonus(500) + 0.3 * min(1, (n-500) / 1500)
elif n > 2000:
return bonus(2000) + 0.5 * math.log10(n / 2000)
return bonus(nobjects)
miss_penality = pow(0.97, nmiss)
combo_break = pow(combo, 0.8) / pow(max_combo, 0.8)
# calculate stats with mods
speed_mul, ar, od, _, _ = (
mods_apply(mods, ar=base_ar, od=base_od)
)
# ar bonus ----------------------------------------------------
ar_bonus = 1.0
if ar > 10.33:
ar_bonus += 0.45 * (ar - 10.33)
elif ar < 8.0:
low_ar_bonus = 0.01 * (8.0 - ar)
if mods & MODS_HD != 0:
low_ar_bonus *= 2.0
ar_bonus += low_ar_bonus
# aim pp ------------------------------------------------------
aim = pp_base(aim_stars)
aim *= low_objects(aim_stars)
aim *= miss_penality
aim *= combo_break
aim *= ar_bonus
if mods & MODS_HD != 0:
aim *= 1.02 + (11 - ar) / 50
if mods & MODS_FL != 0:
aim *= max(1, 1.45 * low_objects(aim_stars))
acc_bonus = 0.5 + accuracy / 2.0
od_bonus = 0.98 + (od * od) / 2500.0
aim *= acc_bonus
aim *= od_bonus
# speed pp ----------------------------------------------------
speed = pp_base(speed_stars)
speed *= low_objects(speed_stars)
speed *= miss_penality
speed *= combo_break
speed *= acc_bonus
speed *= od_bonus
if mods & MODS_HD != 0:
speed *= 1.18
# acc pp ------------------------------------------------------
acc = pow(1.52163, od) * pow(real_acc, 24.0) * 2.83
# length bonus (not the same as speed/aim length bonus)
acc *= min(1.15, pow(ncircles / 1000.0, 0.3))
if mods & MODS_HD != 0:
acc *= 1.02
if mods & MODS_FL != 0:
acc *= 1.02
# total pp ----------------------------------------------------
final_multiplier = 1.12
if mods & MODS_NF != 0:
final_multiplier *= 0.90
if mods & MODS_SO != 0:
final_multiplier *= 0.95
total = (
pow(
pow(aim, 1.1) + pow(speed, 1.1) + pow(acc, 1.1),
1.0 / 1.1
) * final_multiplier
)
return (total, aim, speed, acc, accuracy * 100.0) | 26,009 |
def frame_shows_car(base_dir, frame, data_dir):
"""Return True if frame shows car. """
sem_seg = cv2.imread(os.path.join(base_dir, "semantic_segmentation/semantic_segmentation" + str(frame) + ".png"),
-1)
class_id_dict = pre_processing.get_dict_from_file(data_dir, "class_id_legend.txt")
return int(class_id_dict['car']) in np.unique(sem_seg) | 26,010 |
def p_portail_home(request):
""" Portail d'accueil de CRUDY """
crudy = Crudy(request, "portail")
title = crudy.application["title"]
crudy.folder_id = None
crudy.layout = "portail"
return render(request, 'p_portail_home.html', locals()) | 26,011 |
def test_standardReceive():
"""Test StandardReceive."""
address = bytearray([0x11, 0x22, 0x33])
target = bytearray([0x44, 0x55, 0x66])
flags = 0x77
cmd1 = 0x88
cmd2 = 0x99
msg = StandardReceive(address, target, {'cmd1': cmd1, 'cmd2': cmd2},
flags=flags)
assert msg.hex == hexmsg(0x02, 0x50, Address(address), Address(target),
flags, cmd1, cmd2)
assert len(msg.hex) / 2 == msg.sendSize
assert len(msg.hex) / 2 == msg.receivedSize | 26,012 |
def check_eyr(eyr):
"""eyr (Expiration Year) - four digits; at least 2020 and at most 2030."""
year = int(eyr)
if year < 2020 or 2030 < year:
raise | 26,013 |
def generate_image_list(dir_path, max_dataset_size=float("inf")):
"""
Traverse the directory to generate a list of images path.
Args:
dir_path (str): image directory.
max_dataset_size (int): Maximum number of return image paths.
Returns:
Image path list.
"""
images = []
assert os.path.isdir(dir_path), '%s is not a valid directory' % dir_path
for root, _, fnames in sorted(os.walk(dir_path)):
for fname in fnames:
if is_image(fname):
path = os.path.join(root, fname)
images.append(path)
print("len(images):", len(images))
return images[:min(max_dataset_size, len(images))] | 26,014 |
def A_norm(freqs,eta):
"""Calculates the constant scaling factor A_0
Parameters
----------
freqs : array
The frequencies in Natural units (Mf, G=c=1) of the waveform
eta : float
The reduced mass ratio
"""
const = np.sqrt(2*eta/3/np.pi**(1/3))
return const*freqs**-(7/6) | 26,015 |
def task1(input_io: IO) -> int:
"""
Solve task 1.
Parameters
----------
input_io: IO
Day10
stream of adapters joltage.
Return
------
int
number of differentes of 1 times number of diferences of 3.
"""
numbers = list(read_numbers(input_io))
numbers.append(0)
numbers.sort()
counter = Counter(map(sub, numbers[1:], numbers[:-1]))
return counter[1] * (counter[3] + 1) | 26,016 |
def get_logger(name, log_dir, config_dir):
"""
Creates a logger object
Parameters
----------
name: Name of the logger file
log_dir: Directory where logger file needs to be stored
config_dir: Directory from where log_config.json needs to be read
Returns
-------
A logger object which writes to both file and stdout
"""
config_dict = json.load(open( config_dir + 'log_config.json'))
config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')
logging.config.dictConfig(config_dict)
logger = logging.getLogger(name)
std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logging.Formatter(std_out_format))
logger.addHandler(consoleHandler)
return logger | 26,017 |
def include(*sources: Union[FileSource, str], swim):
"""
include a source with its preprocessor directives
:param sources: the source objects or paths to cpp header files
"""
lines = []
for source in sources:
if isinstance(source, str):
if source.startswith('<') or source.startswith('"'):
lines.append('#include ' + source)
continue
source = FileSource(source)
lines.extend('#define ' + directive for directive in source.directives)
lines.append('#include "' + source.source_path + '"')
lines.extend('#undef ' + directive for directive in reversed(source.directives))
swim.add_begin(lines)
swim.add_nl() | 26,018 |
def test_varint__underflow():
"""Crash if VLQ gets a negative number."""
field = numeric.VariableLengthInteger(vli_format=varints.VarIntEncoding.VLQ)
with pytest.raises(errors.UnserializableValueError):
field.to_bytes(-1) | 26,019 |
def get_total_balance(view_currency='BTC') -> float:
"""
Shows total balance for account in chosen currency
:param view_currency: currency for total balance
:return: total balance amount for account
"""
result = pay.get_balance()
balance_dict = result.get('balance')
total = 0
for currency in balance_dict:
total += ((balance_dict.get(currency).get(view_currency).get('total')) +
(balance_dict.get(currency).get(view_currency).get('reserved')))
return total | 26,020 |
def target_install():
"""Use the setup.py script to install."""
log.info("target: install")
_run("python setup.py install") | 26,021 |
def bin_hex(binary):
"""
Convert bytes32 to string
Parameters
----------
input: bytes object
Returns
-------
str
"""
return binascii.hexlify(binary).decode('utf-8') | 26,022 |
def get_component_observers(component: Dict[str, Any],
observer_type: str = 'qp',
**observer_kwargs):
"""Get component-based Observers."""
del component, observer_kwargs
raise NotImplementedError(observer_type) | 26,023 |
def build_format(name: str, pattern: str, label: bool) -> str:
"""Create snippet format.
:param name: Instruction name
:param pattern: Instruction regex pattern
"""
snip: str = f"{name:7s}" + pattern.format(**SNIPPET_REPLACEMENTS)
snip = snip.replace("(", "")
snip = snip.replace(")", "")
snip = snip.replace("number?\\\\$reg\\", "number(\\$reg)")
snip = snip.replace("\\$", "")
replace_ct = 1
reg_ct = snip.count("reg")
for i in range(0, reg_ct):
f = f"${REG_ARGS[i]}"
snip = snip.replace("reg", f, 1)
replace_ct += 1
if not label:
snip = snip.replace("number", "100")
replace_ct += 1
return snip | 26,024 |
def make_observation_mapper(claims):
"""Make a dictionary of observation.
Parameters
----------
claims: pd.DataFrame
Returns
-------
observation_mapper: dict
an dictionary that map rv to their observed value
"""
observation_mapper = dict()
for c in claims.index:
s = claims.iloc[c]['source_id']
observation_mapper[f'b_{s}_{c}'] = torch.tensor(
claims.iloc[c]['value'])
return observation_mapper | 26,025 |
def toCSV(
dataset, # type: BasicDataset
showHeaders=True, # type: Optional[bool]
forExport=False, # type: Optional[bool]
localized=False, # type: Optional[bool]
):
# type: (...) -> String
"""Formats the contents of a dataset as CSV (comma separated
values), returning the resulting CSV as a string.
If the "forExport" flag is set, then the format will be appropriate
for parsing using the `fromCSV` function.
Args:
dataset: The dataset to export to CSV.
showHeaders: If set to True, a header row will be present in
the CSV. Default is True. Optional.
forExport: If set to True, extra header information will be
present in the CSV data which is necessary for the CSV to be
compatible with the fromCSV method. Overrides showHeaders.
Default is False. Optional.
localized: If set to True, the string representations of the
values in the CSV data will be localized. Default is
False. Optional.
Returns:
The CSV data as a string.
"""
print(dataset, showHeaders, forExport, localized)
return "" | 26,026 |
def test_driver_add_container_record(index_driver, database_conn):
"""
Tests creation of a record.
"""
index_driver.add('container')
count = database_conn.execute("""
SELECT COUNT(*) FROM index_record
""").fetchone()[0]
assert count == 1, 'driver did not create record'
record = database_conn.execute("""
SELECT * FROM index_record
""").fetchone()
assert record[0], 'record id not populated'
assert record[1], 'record baseid not populated'
assert record[2], 'record rev not populated'
assert record[3] == 'container', 'record form is not container'
assert record[4] is None, 'record size non-null' | 26,027 |
def add_record(session, data):
"""
session -
data - dictionary {"site":"Warsaw"}
"""
skeleton = Skeleton()
skeleton.site = data["site"]
skeleton.location = data["location"]
skeleton.skeleton = data["skeleton"]
skeleton.observer = data["observer"]
skeleton.obs_date = data["obs_date"]
session.add(skeleton)
session.commit()
return skeleton.skeleton_id | 26,028 |
def pytest_tavern_beta_before_every_test_run(test_dict, variables):
"""Called:
- directly after fixtures are loaded for a test
- directly before verifying the schema of the file
- Before formatting is done on values
- After global configuration has been loaded
- After plugins have been loaded
Modify the test in-place if you want to do something to it.
Args:
test_dict (dict): Test to run
variables (dict): Available variables
""" | 26,029 |
def insert_player_in_db(player):
"""Add player object in database."""
player = encode_class_to_dict(player)
db.player_table.insert(player) | 26,030 |
def hue_angle(C):
"""
Returns the *hue* angle :math:`h` in degrees from given colour difference
signals :math:`C`.
Parameters
----------
C : array_like
Colour difference signals :math:`C`.
Returns
-------
numeric or ndarray
*Hue* angle :math:`h` in degrees.
Examples
--------
>>> C = np.array([
... -5.365865581996587e-05,
... -0.000571699383647,
... 0.000625358039467
... ])
>>> hue_angle(C) # doctest: +ELLIPSIS
269.2737594...
"""
C_1, C_2, C_3 = tsplit(C)
hue = (180 * np.arctan2(0.5 * (C_2 - C_3) / 4.5, C_1 -
(C_2 / 11)) / np.pi) % 360
return hue | 26,031 |
def get_submodel_list_copasi(model_name: str,
model_info: pd.DataFrame):
"""
This function loads a list of Copasi model files, which all belong to the
same benchmark model, if a string with the id of the benchmark model id is
provided.
It also extracts the respective sbml files from the list and returns them
with the models, if any postprecessing of the Copasi results is necessary
"""
# get information about the model from the tsv table
model_rows = model_info.loc[model_info['short_id'] == model_name]
# only take accepted models
model_rows = model_rows[model_rows['accepted']]
submodel_paths = [path for path in model_rows['copasi_path_final']]
# collect the submodels
copasi_file_list = []
sbml_model_list = []
for submodel_path in submodel_paths:
copasi_file, sbml_model = get_submodel_copasi(submodel_path, model_info)
if copasi_file is not None:
copasi_file_list.append(copasi_file)
sbml_model_list.append(sbml_model)
return copasi_file_list, sbml_model_list | 26,032 |
def GetNextBmask(enum_id, value):
"""
Get next bitmask in the enum (bitfield)
@param enum_id: id of enum
@param value: value of the current bitmask
@return: value of a bitmask with value higher than the specified
value. -1 if no such bitmasks exist.
All bitmasks are sorted by their values
as unsigned longs.
"""
return idaapi.get_next_bmask(enum_id, value) | 26,033 |
def nbshell(context):
"""Launch an interactive nbshell session."""
command = "nautobot-server nbshell"
run_command(context, command) | 26,034 |
def post_measurement(database) -> None:
"""Put the measurement in the database."""
measurement = dict(bottle.request.json)
latest = latest_measurement(measurement["metric_uuid"], database)
if latest:
for latest_source, new_source in zip(latest["sources"], measurement["sources"]):
if "ignored_units" in latest_source:
# Copy the keys of ignored units that still exist in the new measurement
new_unit_keys = set(unit["key"] for unit in new_source.get("units", []))
new_source["ignored_units"] = [key for key in latest_source["ignored_units"] if key in new_unit_keys]
if latest["sources"] == measurement["sources"]:
# If the new measurement is equal to the previous one, merge them together
database.measurements.update_one(filter={"_id": latest["_id"]}, update={"$set": {"end": iso_timestamp()}})
return
insert_new_measurement(measurement["metric_uuid"], measurement, database) | 26,035 |
def annotate_link(domain):
"""This function is called by the url tag. Override to disable or change behaviour.
domain -- Domain parsed from url
"""
return u" [%s]"%_escape(domain) | 26,036 |
def kmeans(data, k, num_iterations, num_inits=10, verbose=False):
"""Execute the k-means algorithm for
determining the best k clusters of data
points in a dataset.
Parameters
----------
data : ndarray, (n,d)
n data points in R^d.
k : int
The number of clusters to separate
the data into.
num_iterations : int
The number of iterations of the k-means
algorithm to execute.
num_inits : int, optional
Number of random initializations to try.
Returns the best result.
verbose : bool, optional
Specifies whether to print info about
the execution of the algorithm.
Return
------
(clusters, data_point_assigment, centroids)
The results of the k-means algorithm. Clusters
is a list of the clusters (which are lists of ints).
data_point_assigment is a (n,) numpy array of ints
that indicates which cluster a data point has been
assigned to. And centroids is (k,d) numpy array
specifying the cluster centers.
"""
# Number of data points
num_data_points = int(data.shape[0])
# Spatial dimension d
d = int(data.shape[1])
best_results = None
best_total_distance = np.inf
for init in range(num_inits):
# Map from data point index to cluster index.
data_point_assignment = np.zeros(num_data_points, dtype=int)
# list of data points in clusters
clusters = [[]] * k
# Initialize the centroids
# using k-randomly sampled points.
centroids = np.zeros((d,k))
for ind_cluster in range(k):
inds_data = np.random.choice(num_data_points, k)
centroid = np.mean(data[inds_data, :], axis=0)
centroids[:, ind_cluster] = centroid
for iteration in range(num_iterations):
if verbose:
print('==== Iteration {}/{} ===='.format(iteration+1, num_iterations))
print('centroids = {}'.format(centroids))
clusters = []
for ind_c in range(k):
clusters.append([])
# Assignment step:
# Assign each data point to the
# cluster with nearest centroid.
total_distance = 0.0
for ind_point in range(num_data_points):
distances = np.array([nla.norm(data[ind_point, :] - centroids[:, ind_c]) for ind_c in range(k)])
ind_cluster = np.argmin(distances)
total_distance += distances[ind_cluster]
data_point_assignment[ind_point] = ind_cluster
clusters[ind_cluster].append(ind_point)
# Update step:
# Update the centroids of the
# new clusters.
for ind_cluster in range(k):
cluster = clusters[ind_cluster]
cluster_data = np.array([data[ind_point, :] for ind_point in cluster])
centroid = np.mean(cluster_data, axis=0)
centroids[:, ind_cluster] = centroid
if total_distance < best_total_distance:
best_total_distance = total_distance
best_results = (clusters, data_point_assignment, centroids)
return best_results | 26,037 |
def main(es_host, es_index_1, es_index_2, es_type):
"""
The main function
:param es_host: elastic search host server
:param es_index_1: the index prefix 1
:param es_index_2: the index prefix 2
:param es_type: the type of records to be compared
:return:
"""
error_flag = False
if not es_index_1:
print("mandatory parameter es_index_1 is not provided")
error_flag = True
if not es_index_2:
print("mandatory parameter es_index_2 is not provided")
error_flag = True
if not es_type:
print("mandatory parameter es_type is not provided")
error_flag = True
else:
if es_type not in constants.TYPES:
print("Unrecognized type which must be one of {}".format(",".join(constants.TYPES)))
error_flag = True
if error_flag:
exit()
es_index_1 = remove_underscore_from_end_prefix(es_index_1)
es_index_2 = remove_underscore_from_end_prefix(es_index_2)
resp1 = get_ids(es_host, es_index_1, es_type)
resp2 = get_ids(es_host, es_index_2, es_type)
for record_id in sorted(resp1):
if record_id in resp2:
resp2.remove(record_id)
else:
print(f"Only in {es_index_1}_{es_type}: {record_id}")
if resp2:
for record_id in sorted(resp2):
print(f"Only in {es_index_2}_{es_type}: {record_id}") | 26,038 |
def add_figure(bbox=None, slide_no=None, keep_aspect=True, tight=True,
delete_placeholders=True, replace=False, **kwargs):
""" Add current figure to the active slide (or a slide with a given number).
Parameters:
bbox - Bounding box for the image in the format:
- None - the first empty image placeholder will be used, if
no such placeholders are found, then the 'Center'
value will be used.
- list of coordinates [x, y, width, height]
- string: 'Center', 'Left', 'Right', 'TopLeft', 'TopRight',
'BottomLeft', 'BottomRight', 'CenterL', 'CenterXL', 'Full'
based on the presets, that could be modified.
Preset name is case-insensitive.
slide_no - number of the slide (stating from 1), where to add image.
If not specified (None), active slide will be used.
keep_aspect - if True, then the aspect ratio of the image will be
preserved, otherwise the image will shrink to fit bbox.
tight - if True, then tight_layout() will be used
delete_placeholders - if True, then all placeholders will be deleted.
Else: all empty placeholders will be preserved.
Default: delete_placeholders=True
replace - if True, before adding picture it will first check if
there're any other pictures on the slide that overlap with
the target bbox. Then the picture, that overlap the most
will be replaced by the new one, keeping its position (i.e.
method will act like replace_figure() and target bbox will
be ignored). If no such pictures found - method will add
figure as usual.
**kwargs - to be passed to plt.savefig()
There're two options of how to treat empty placeholders:
- delete them all (delete_placeholders=True). In this case everything,
which does not have text or figures will be deleted. So if you want
to keep them - you should add some text there before add_figure()
- keep the all (delete_placeholders=False). In this case, all of them
will be preserved even if they are completely hidden by the added
figure.
The only exception is when bbox is not provided (bbox=None). In this
case the figure will be added to the first available empty placeholder
(if found) and keep all other placeholders in place even if
delete_placeholders is set to True.
"""
# Small hack
target_z_order = kwargs.pop('target_z_order', None)
# Save the figure to png in temporary directory
fname = _temp_fname()
if tight:
# Usually is an overkill, but is needed sometimes...
plt.tight_layout()
plt.savefig(fname, bbox_inches='tight', **kwargs)
else:
plt.savefig(fname, **kwargs)
# Call to private method
_add_figure(fname, bbox=bbox, slide_no=slide_no, keep_aspect=keep_aspect,
replace=replace, delete_placeholders=delete_placeholders,
target_z_order=target_z_order,
delete=True) | 26,039 |
def com_google_fonts_check_metadata_match_weight_postscript(font_metadata):
"""METADATA.pb weight matches postScriptName for static fonts."""
WEIGHTS = {
"Thin": 100,
"ThinItalic": 100,
"ExtraLight": 200,
"ExtraLightItalic": 200,
"Light": 300,
"LightItalic": 300,
"Regular": 400,
"Italic": 400,
"Medium": 500,
"MediumItalic": 500,
"SemiBold": 600,
"SemiBoldItalic": 600,
"Bold": 700,
"BoldItalic": 700,
"ExtraBold": 800,
"ExtraBoldItalic": 800,
"Black": 900,
"BlackItalic": 900
}
pair = []
for k, weight in WEIGHTS.items():
if weight == font_metadata.weight:
pair.append((k, weight))
if not pair:
yield FAIL, ("METADATA.pb: Font weight value ({})"
" is invalid.").format(font_metadata.weight)
elif not (font_metadata.post_script_name.endswith('-' + pair[0][0]) or
font_metadata.post_script_name.endswith('-' + pair[1][0])):
yield FAIL, ("METADATA.pb: Mismatch between postScriptName (\"{}\")"
" and weight value ({}). The name must be"
" ended with \"{}\" or \"{}\"."
"").format(font_metadata.post_script_name,
pair[0][1],
pair[0][0],
pair[1][0])
else:
yield PASS, "Weight value matches postScriptName." | 26,040 |
def point_on_bezier_curve(cpw, n, u):
"""
Compute point on Bezier curve.
:param ndarray cpw: Control points.
:param int n: Degree.
:param u: Parametric point (0 <= u <= 1).
:return: Point on Bezier curve.
:rtype: ndarray
*Reference:* Algorithm A1.4 from "The NURBS Book".
"""
bernstein = all_bernstein(n, u)
pnt = zeros(4, dtype=float64)
for k in range(0, n + 1):
pnt += bernstein[k] * cpw[k]
return pnt | 26,041 |
def list_with_one_dict(sort_type, url_param=None):
"""
Search by parameter that returns a list with one dictionary.
Used for full country name and capital city.
"""
extra_param = ""
if sort_type == 2:
url_endpoint = "/name/"
user_msg = "full country name"
extra_param = "?fullText=true"
desc = "\nSearch by full country name. Example: United States of America"
elif sort_type == 6:
url_endpoint = "/capital/"
user_msg = "capital city"
desc = "\nSearch by capital city. Example: Washington"
if url_param is None:
print(desc)
url_param = input("\nEnter " + user_msg + ": ")
res = requests.get(URL + url_endpoint +
url_param.strip().lower() + extra_param)
try:
res.raise_for_status()
except:
return "\nError! Could not find information for the given input."
res_json = res.json()
country_name = res_json[0]["name"]
info = get_info(res_json, country_name)
print(info)
save_to_file(info, country_name) | 26,042 |
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start() | 26,043 |
def get_form(case, action_filter=lambda a: True, form_filter=lambda f: True, reverse=False):
"""
returns the first form that passes through both filter functions
"""
gf = get_forms(case, action_filter=action_filter, form_filter=form_filter, reverse=reverse)
try:
return gf.next()
except StopIteration:
return None | 26,044 |
def csv_to_json_generator(df, field_map: dict, id_column: str, category_column: str):
"""
Creates a dictionary/json structure for a `single id dataframe` extracting content using the
`extract_features_by_category` function.
"""
id_list = find_ids(df=df, id_column=id_column)
logger.info('Found {} units on \'{}\' to process'.format(len(id_list), id_column))
out = []
for f_id in id_list:
f_info = {'id': str(f_id), '@timestamp': datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")}
f_df = single_id_df(df=df, id_column=id_column, id_value=f_id)
for key in field_map.keys():
try:
data = extract_features_by_category(single_id_df=f_df, category=key, category_column=category_column,
related_features=field_map[key])[key.lower()]
f_info[key.lower()] = data
except:
logger.error('id: {} key: \'{}\''.format(f_id, key))
out.append(f_info)
logger.info('Generated: {}. Delta: {}'.format(len(out), len(out)-len(id_list)))
return out | 26,045 |
def write_to_csv(filename, emb_paths, emb_array, exp_time, register_order_file=None):
"""Write to csv file in the format of: [name, features, threshold, path]
Args:
filename: The filename of output csv file
emb_paths: The image paths of the embeddings
emb_array: The embeddings generated from FaceNet,
same order with emb_paths
register_order_file:The order of the images registered,
if none, will be the order of the emb_paths.
"""
# Construct path and emb dict
keys = [v.split('/')[-2]+'/'+v.split('/')[-1] for v in emb_paths]
emb_dict = dict(zip(keys, emb_array))
# Read register order
if register_order_file is None:
register_order = keys
elif register_order_file == 'auto-gen':
register_order = generate_register_order(keys, filename, exp_time)
else:
with open(register_order_file, 'r') as textfile:
register_order = textfile.read().split('\n')
filename = 'data/features_' + filename + '_v' + exp_time + '.csv'
with open(filename, "w") as csv_file:
csv_writer = csv.writer(csv_file)
for path in register_order:
name = path.split('/')[0]
features = emb_dict[path]
threshold = 0 # Redundant
csv_writer.writerow([name, features, threshold, path])
csv_file.close()
green_print("Finish Write the CSV file: %s " %(filename)) | 26,046 |
def mark_battle_reported(database_key):
"""
Marks a battle from the reporting queue as reported, given a database_key retrieved from get_next_battle_to_report.
If this method isn't called, get_next_battle_to_report will start returning already-reported battles once it has
returned each battle once.
:param database_key: The database_key returned from get_next_battle_to_report corresponding to the battle
successfully reported.
"""
redis_conn.lrem(REPORTING_QUEUE, -1, database_key) | 26,047 |
def eval_args(egroup, show_supsup_task_inference=False):
"""This is a helper function of the function :func:`parse_cmd_arguments` to
add arguments to the evaluation argument group.
Args:
egroup: The argument group returned by function
:func:`utils.cli_args.eval_args`.
show_supsup_task_inference (bool): Whether the option
`supsup_task_inference` should be provided.
"""
egroup.add_argument('--full_test_interval', type=int, metavar='N',
default=-1,
help='Full testing (on all tasks trained so far) is ' +
'always invoked after training on each task. ' +
'To reduce this demanding computation (and only ' +
'always test on the task just trained), one can ' +
'specify an interval that determines after how ' +
'many trained tasks the full testing is ' +
'performed. Note, full testing is always ' +
'performed after training the last task. ' +
'Default: %(default)s.')
if show_supsup_task_inference:
egroup.add_argument('--supsup_task_inference', action='store_true',
help='If activated, gradient-based task ' +
'inference as in the SupSup method will be ' +
'computed alongside all other methods to ' +
'perform task-inference.')
egroup.add_argument('--supsup_grad_steps', type=int, default=1,
help='Number of entropy gradient steps to be used ' +
'for performing SupSup-like task inference. ' +
'Default: %(default)s')
egroup.add_argument('--supsup_lr', type=float, default=1e-3,
help='The scaling for the update of the alpha ' +
'coefficients when doing SupSup task-inference. ' +
'Only relevant if the number of gradient steps ' +
'is larger than 1. Default: %(default)s') | 26,048 |
def write_flag_drawing(img, filename_out):
"""Write an image to a file in the flag_drawings directory"""
save_img(img, 'flag_drawings', filename_out) | 26,049 |
def parse_args():
"""Get command line arguments."""
parser = argparse.ArgumentParser(prog='metrics',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=desc_str)
parser.add_argument('-v', '--version', action='version', version='%(prog)s v0.1')
parser.add_argument('-i', '--input', type=str, default=os.path.join('.', 'input'),
help=r'directory with input binarized and ground-truth images (default: "%(default)s")')
parser.add_argument('-o', '--output', type=str, default=os.path.join('.', 'output'),
help=r'directory with output metrics files (default: "%(default)s")')
parser.add_argument('-w', '--weights', type=str, default=os.path.join('weights', 'weights.exe'),
help=r'path to weights evaluation tool (default: %(default)s)')
parser.add_argument('-m', '--metrics', type=str, default=os.path.join('metrics', 'metrics.exe'),
help=r'path to metrics evaluation tool (default: %(default)s)')
parser.add_argument('-p', '--procs', type=int, default=cpu_count(),
help=r'number of processes (default: %(default)s)')
return parser.parse_args() | 26,050 |
def sunset_hour_angle(sinLat, cosLat, sinDec, cosDec):
"""
Calculate local sunset hour angle (radians) given sines and cosines
of latitude and declination.
"""
return np.arccos(np.clip(-sinDec / cosDec * sinLat / cosLat, -1, 1)) | 26,051 |
def handle_enable(options):
"""Enable a Sopel plugin.
:param options: parsed arguments
:type options: :class:`argparse.Namespace`
:return: 0 if everything went fine;
1 if the plugin doesn't exist
"""
plugin_names = options.names
allow_only = options.allow_only
settings = utils.load_settings(options)
usable_plugins = plugins.get_usable_plugins(settings)
# plugin does not exist
unknown_plugins = [
name
for name in plugin_names
if name not in usable_plugins
]
if unknown_plugins:
display_unknown_plugins(unknown_plugins)
return 1 # do nothing and return an error code
actually_enabled = tuple(
name
for name in plugin_names
if _handle_enable_plugin(settings, usable_plugins, name, allow_only)
)
# save if required
if actually_enabled:
settings.save()
else:
return 0 # nothing to disable or save, but not an error case
# display plugins actually disabled by the command
print(utils.get_many_text(
actually_enabled,
one='Plugin {item} enabled.',
two='Plugins {first} and {second} enabled.',
many='Plugins {left}, and {last} enabled.'
))
return 0 | 26,052 |
def compare_img_hist(img_path_1, img_path_2):
"""
Get the comparison result of the similarity by the histogram of the
two images. This is suitable for checking whether the image is close
in color. Conversely, it is not suitable for checking whether shapes
are similar.
Parameters
----------
img_path_1 : str
The path of the first image for comparison.
img_path_2 : str
The path of the second image for comparison.
Returns
-------
similarity : float
Similarity between two images. The maximum is set to 1.0, and the
closer to 1.0, the higher the similarity. It is set by the mean
value of the histogram of RGB channels.
"""
assert_img_exists(img_path=img_path_1)
assert_img_exists(img_path=img_path_2)
img_1 = cv2.imread(img_path_1)
img_2 = cv2.imread(img_path_2)
channels_list = [[0], [1], [2]]
similarity_list = []
for channels in channels_list:
img_1_hist = cv2.calcHist(
images=[img_1],
channels=channels,
mask=None,
histSize=[256],
ranges=[0, 256]
)
img_2_hist = cv2.calcHist(
images=[img_2],
channels=channels,
mask=None,
histSize=[256],
ranges=[0, 256]
)
similarity_unit = cv2.compareHist(
H1=img_1_hist, H2=img_2_hist, method=cv2.HISTCMP_CORREL)
similarity_list.append(similarity_unit)
similarity = np.mean(similarity_list)
return similarity | 26,053 |
def test_get_queryset_duplicates(
api_rf, km_user_accessor_factory, km_user_factory, user_factory
):
"""
If the user has managed to create an accessor granting access to
their own account, there should not be a duplicate entry in the user
list.
Regression test for #352.
"""
user = user_factory()
api_rf.user = user
km_user = km_user_factory(user=user)
km_user_accessor_factory(
is_accepted=True, km_user=km_user, user_with_access=user
)
# Have to create another accessor for the bug to be present
km_user_accessor_factory(km_user=km_user)
view = views.KMUserListView()
view.request = api_rf.get("/")
assert list(view.get_queryset()) == [km_user] | 26,054 |
def recombinant_example(resource_name, doc_type, indent=2, lang='json'):
"""
Return example data formatted for use in API documentation
"""
chromo = recombinant_get_chromo(resource_name)
if chromo and doc_type in chromo.get('examples', {}):
data = chromo['examples'][doc_type]
elif doc_type == 'sort':
data = "request_date desc, file_number asc"
elif doc_type == 'filters':
data = {"resource": "doc", "priority": "high"}
elif doc_type == 'filter_one':
data = {"file_number": "86086"}
else:
data = {
"request_date": "2016-01-01",
"file_number": "42042",
"resource": "doc",
"prioroty": "low",
}
if not isinstance(data, (list, dict)):
return json.dumps(data)
left = ' ' * indent
if lang == 'pythonargs':
return ',\n'.join(
"%s%s=%s" % (left, k, json.dumps(data[k]))
for k in sorted(data))
out = json.dumps(data, indent=2, sort_keys=True, ensure_ascii=False)
return left[2:] + ('\n' + left[2:]).join(out.split('\n')[1:-1]) | 26,055 |
def _integral_diff(x, pdf, a, q):
"""Return difference between q and the integral of the function `pdf`
between a and x. This is used for solving for the ppf."""
return integrate.quad(pdf, a, x)[0] - q | 26,056 |
def loadBar(self):
"""载入历史K线数据"""
pdData = pd.DataFrame(self.bars).set_index('datetime')
self.uiKLine.loadData(pdData)
for s in self.mainSigs:
self.plotMain(s)
for s in self.subSigs:
self.plotSub(s)
self.uiKLine.updateSig(self.sigs)
self.bar = self.strategy.bar
# ---------------------------------------------------------------------- | 26,057 |
def update_df_slab_ids():
"""
"""
#| - update_df_slab_ids
# #####################################################
# Read Data
from methods import get_df_slab_ids
df_slab_ids = get_df_slab_ids()
from methods import get_df_slab
df_slab = get_df_slab()
# #####################################################
# Checking that df_slab_ids has only unique bulk_id+facet pairs
num_entries = len(df_slab_ids.index.tolist())
num_unique_entries = len(list(set(df_slab_ids.index.tolist())))
if num_entries != num_unique_entries:
print("Woh what's going on here")
mess_i = "Woops not good"
assert num_entries == num_unique_entries, mess_i
# #########################################################
# Looping through df_slab rows and checking if the slab_id is present in df_slab_ids
# #########################################################
data_dict_list = []
# #########################################################
for slab_id_i, row_i in df_slab.iterrows():
# #####################################################
data_dict_i = dict()
# #####################################################
bulk_id_i = row_i.bulk_id
facet_i = row_i.facet
# #####################################################
# df_slab_ids.loc[bulk_id_i, facet_i]
slab_ids_match_i = "NaN"
index_in_df_slab_ids_i = (bulk_id_i, facet_i, ) in df_slab_ids.index
if index_in_df_slab_ids_i:
# #################################################
row_id_i = df_slab_ids.loc[(bulk_id_i, facet_i)]
# #################################################
slab_id__from_fle = row_id_i.slab_id
# #################################################
slab_ids_match_i = slab_id_i == slab_id__from_fle
# #####################################################
data_dict_i["bulk_id"] = bulk_id_i
data_dict_i["facet"] = facet_i
data_dict_i["slab_id"] = slab_id_i
data_dict_i["index_in_df_slab_ids"] = index_in_df_slab_ids_i
data_dict_i["slab_ids_match"] = slab_ids_match_i
# #####################################################
data_dict_list.append(data_dict_i)
# #####################################################
# #########################################################
df = pd.DataFrame(data_dict_list)
# #########################################################
# #####################################################
# Checking that data objects are consistent with each other
df_i = df[df.index_in_df_slab_ids == True]
df_slab__df_slab_ids__consistent = False
unique_vals = list(set(df_i.slab_ids_match.tolist()))
if (len(unique_vals) == 1) and unique_vals[0] == True:
df_slab__df_slab_ids__consistent = True
mess_i = "df_slab and df_slab_ids are not consistent"
assert df_slab__df_slab_ids__consistent, mess_i
# #####################################################
# Getting all the entries in df_slab that aren't present in df_slab_ids
df_1 = df[df.index_in_df_slab_ids == False]
df_1 = df_1.set_index(["bulk_id", "facet", ], drop=False)
df_1 = df_1.sort_index()
# #####################################################
# Combining the old and new df_slab_ids and saving
df_slab_ids_new = pd.concat(
[
df_1[["bulk_id", "facet", "slab_id", ]],
df_slab_ids,
],
axis=0,
)
# #####################################################
# Writing data to file
pre_dir = os.path.join(
os.environ["PROJ_irox_oer"],
"workflow/creating_slabs")
df_slab_ids_new.to_csv(
os.path.join(pre_dir, "out_data/slab_id_mapping.csv"),
index=False)
df_slab_ids_new.to_csv(
os.path.join(pre_dir, "in_data/slab_id_mapping.csv"),
index=False)
# "in_data/slab_id_mapping.csv",
#__| | 26,058 |
def Fcomplete(t,y,k0m,k1,k2m,k2p,k3,k4,k5m,k6m,k7,Kr0,Kr1,Kr2,Kr2p,Km5,Km6,Km7,Gt,Rt,Mt,k_Gp,Gpt,n):
"""
Right hand side of ODE y'(t) = f(t,y,...)
It receives parameters as f_args, as given py param_array (see param.py)
3 components: G, R, M
"""
k0=k0m*Kr0 # kmi =ki/Kri or ki/Kmi
k2=k2m*Kr2
k5=k5m*Km5
k6=k6m*Km6
G=y[0]
R=y[1]
M=y[2]
if len(y) > 3:
Gp=y[3] # GEF perturbation (what's given in the data)
Gpvis=y[4] # GEF perturbation (what's given in the data)
else:
Gp = 0.
Gpvis = 0
return np.array( [ k3*R*(Gt-G) - k4*M*G, (k0*G+Gpt*Gp)*(Rt-R)/(Kr0+(Rt-R)) + k1*(Rt-R)/(Kr1+(Rt-R)) - k2*R/(Kr2+R), k5*R*(Mt-M)**n/(Km5**n+(Mt-M)**n) - k6*M/(Km6+M) + k7*(Mt-M)/(Km7+(Mt-M)),k_Gp-k_Gp*Gp-k4*Gp*M, k_Gp-k_Gp*Gpvis] ) | 26,059 |
def lu_solve(l: np.ndarray, u: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Решение СЛАУ, прошедшей через LU-разложение.
Требуется предварительно умножить вектор правых частей на матрицу перестановки.
:param l: нижняя треугольная матрица
:param u: верхняя треугольная матрица
:param b: вектор правых частей СЛАУ
:return: вектор-решение СЛАУ
"""
n = l.shape[0]
z = np.zeros_like(b)
z[0] = b[0]
for i in range(1, n):
s = 0
for j in range(i):
s += l[i, j] * z[j]
z[i] = b[i] - s
x = np.zeros_like(b)
x[-1] = z[-1] / u[-1, -1]
for i in range(n - 2, -1, -1):
s = 0
for j in range(i + 1, n):
s += u[i, j] * x[j]
x[i] = (z[i] - s) / u[i, i]
return x | 26,060 |
def test_finding_logdate(dispatcher, ntbk_dir, mocker):
"""Test using --find-dir flag outputs path to specified date"""
mocker.patch('builtins.print')
expected_path = ntbk_dir / 'log/2021/01-january/2021-01-01'
dispatcher.run(['today', '--find-dir'])
print.assert_called_once_with(expected_path) | 26,061 |
def test_sum_with_incompatible_types(table):
"""
Must error out:
Invalid UpdateExpression: Incorrect operand type for operator or function; operator or function: +, operand type: S'
Returns:
"""
try:
update_expression = "SET ri = :val + :val2"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "1"}, "ri": {"L": [{"S": "i1"}, {"S": "i2"}]}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values={":val": {"S": "N"}, ":val2": {"N": "3"}},
item=item,
table=table,
).validate()
except IncorrectOperandType as e:
assert e.operand_type == "S"
assert e.operator_or_function == "+" | 26,062 |
def test_order_view_permissions(client, user):
"""A user should not be able to access order data if it does not belong to them"""
random_user = UserFactory.create(is_staff=False, is_superuser=False)
order = OrderFactory.create(user=user)
client.force_login(random_user)
resp = client.get(reverse("order-api", kwargs={"pk": order.id}))
assert resp.status_code == statuses.HTTP_403_FORBIDDEN
order.user = random_user
order.save()
resp = client.get(reverse("order-api", kwargs={"pk": order.id}))
assert resp.status_code == statuses.HTTP_200_OK | 26,063 |
def datetime_to_httpdate(input_date):
# type: (datetime) -> Optional[str]
"""Converts datetime to http date string"""
if input_date is None:
return None
try:
return input_date.strftime(HTTP_DATE_FORMAT)
except (ValueError, TypeError) as e:
logger.debug(e)
return None | 26,064 |
def test_evolution_trigger_list(client: TestClient):
"""Test case for evolution_trigger_list
"""
params = [("limit", 56), ("offset", 56)]
headers = {
}
response = client.request(
"GET",
"/api/v2/evolution-trigger/",
headers=headers,
params=params,
)
# uncomment below to assert the status code of the HTTP response
#assert response.status_code == 200 | 26,065 |
def scanner(url, scan_time=None):
"""
Scan files in the MDSS tape store
"""
if isinstance(url, str):
url = urlparse(url)
project = url.netloc
path = url.path
if project == '':
raise Exception('No MDSS project specified')
cmd = ['/opt/bin/mdss', '-P', project, 'dmls', '-aniR', '--', path]
with subprocess.Popen(cmd,
bufsize=1,
text=True,
stdout=subprocess.PIPE) as p:
yield from parse_mdss(p.stdout, scan_time=scan_time)
if p.returncode != 0:
logging.getLogger(__name__).warning(
f'Command "{" ".join(p.args)}" failed with code {p.returncode}') | 26,066 |
def textToSheet(directory, filename):
"""converts text files to columns in excel worksheet
Args:
directory (str): folder containing text files
filename (str): name of excel file
Returns:
None
"""
wb = openpyxl.Workbook()
wb.create_sheet(index=0, title='result')
sheet = wb.active
colIndex = 1
# write text files as columns in worksheet
for file in os.listdir(directory):
if file.endswith('.txt'):
rowIndex = 1
with open(directory + file) as f:
for line in f:
sheet.cell(row=rowIndex, column=colIndex).value = line
rowIndex += 1
colIndex += 1
wb.save(filename) | 26,067 |
def zr_bfr_tj():
"""
Real Name: b'Zr bfr Tj'
Original Eqn: b'Zr aftr Dam-Wr sup aftr Zr Dam+(Wr sup aftr Zr Dam*0.2)'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return zr_aftr_dam() - wr_sup_aftr_zr_dam() + (wr_sup_aftr_zr_dam() * 0.2) | 26,068 |
def save_db():
"""Save the local db variable to the database"""
global db
database.set(bot, db, 'tell') | 26,069 |
def simplify(n):
"""Remove decimal places."""
return int(round(n)) | 26,070 |
def resnet_qc_18(**kwargs):
"""Constructs a ResNet-18 model."""
model = ResNetQC(BasicBlock, [2, 2, 2, 2], **kwargs)
return model | 26,071 |
def test_check_conda_pkg_dir():
"""
Test that the check_conda_pkg_dir correctly replaces an installed ggd .tar.bz2 if it has been removed from the conda pkg dir
"""
## Test prefix not set:
### Temp conda environment
temp_env = os.path.join(utils.conda_root(), "envs", "check_pkg_info_dir")
### Remove temp env if it already exists
sp.check_output(["conda", "env", "remove", "--name", "check_pkg_info_dir"])
try:
shutil.rmtree(temp_env)
except Exception:
pass
### Create conda environmnet
sp.check_output(["conda", "create", "--name", "check_pkg_info_dir"])
## Test for a info dir and pkg dir that do not exists
try:
utils.check_conda_pkg_dir(temp_env)
except OSError as e:
assert "No such file or directory" in str(e)
### Install ggd recipe using conda into temp_env
ggd_package = "hg19-pfam-domains-ucsc-v1"
install_args = Namespace(channel='genomics', command='install', debug=False, name=[ggd_package], file=[], prefix = temp_env, id = None)
assert install.install((), install_args) == True
## Check that there is no errors
assert utils.check_conda_pkg_dir(temp_env) == True
## Check a tar.bz2 file removed from conda pkg dir
conda_pkg_dir = os.path.join(temp_env,"pkgs")
installed_ggd_pkgs = utils.get_conda_package_list(temp_env)
installed_pkg = ggd_package + "-" + installed_ggd_pkgs[ggd_package]["version"] + "-"+ installed_ggd_pkgs[ggd_package]["build"] + ".tar.bz2"
## Remove the pkg from the conda pkg dir
if os.path.exists(os.path.join(conda_pkg_dir,installed_pkg)):
os.remove(os.path.join(conda_pkg_dir,installed_pkg))
## Test that when a pkg tar file does not exists in the conda pkg dir, ggd will replace it
assert os.path.exists(os.path.join(conda_pkg_dir,installed_pkg)) == False
assert utils.check_conda_pkg_dir(temp_env) == True
assert os.path.exists(os.path.join(conda_pkg_dir,installed_pkg)) == True
### Remove temp env
sp.check_output(["conda", "env", "remove", "--name", "check_pkg_info_dir"])
try:
shutil.rmtree(temp_env)
except Exception:
pass
assert os.path.exists(temp_env) == False | 26,072 |
def sparse_from_npz(file, **_kw):
"""
Possible dispatch function for ``from_path_impl``'s ``from_npz``.
Reads a scipy sparse matrix.
"""
import scipy.sparse
return scipy.sparse.load_npz(file) | 26,073 |
def get_config():
"""
Read the configuration
:returns: current configuration
"""
global config
return copy.deepcopy(config) | 26,074 |
def resnext20_2x64d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (2x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=64,
model_name="resnext20_2x64d_cifar100", **kwargs) | 26,075 |
def test_target(target):
"""Returns the label for the corresponding target in the test tree."""
label = to_label(target)
test_package = label.package.replace("src/main/", "src/test/", 1)
return Label("@{workspace}//{package}:{target_name}".format(
workspace = label.workspace_name,
package = test_package,
target_name = label.name,
)) | 26,076 |
def save_gif(
data: Union[np.ndarray, torch.Tensor],
path: str,
duration: float = 2.0,
loop: int = 0,
):
"""Save a GIF from a tensor.
Args:
data: Tensor of shape (N, H, W).
path: Path to save the gif to.
duration: GIF duration.
loop: Number of loops. 0 means infinite loops.
"""
if not isinstance(data, np.ndarray):
data = data.cpu().numpy()
images = []
for d in range(data.shape[0]):
d = data[d]
d = (d - d.min()) / (d.max() - d.min())
images.append(Image.fromarray((d * 255).astype(np.uint8)))
images[0].save(
path, save_all=True, append_images=images[1:], duration=duration, loop=loop
) | 26,077 |
def pred_fwd_rc(model, input_npy, output_fwd, output_rc, replicates=1, batch_size=512):
"""Predict pathogenic potentials from a preprocessed numpy array and its reverse-complement."""
y_fwd, _ = predict_npy(model, input_npy, output_fwd, rc=False, replicates=replicates, batch_size=batch_size)
y_rc, _ = predict_npy(model, input_npy, output_rc, rc=True, replicates=replicates, batch_size=batch_size)
return y_fwd, y_rc | 26,078 |
def get_ids(viva_path, dataset):
"""Get image identifiers for corresponding list of dataset identifies.
Parameters
----------
viva_path : str
Path to VIVA directory.
datasets : list of str tuples
List of dataset identifiers in the form of (year, dataset) pairs.
Returns
-------
ids : list of str
List of all image identifiers for given datasets.
"""
dataset_path = os.path.join(viva_path, dataset, 'pos')
ids = [dir.replace('.png', '') for dir in os.listdir(dataset_path)]
return ids | 26,079 |
def get_api_version(version_string):
"""Returns checked APIVersion object"""
version_string = str(version_string)
api_version = APIVersion(version_string)
check_major_version(api_version)
return api_version | 26,080 |
def test_show_versions():
"""should show all versions if no value is given"""
results = yvs.get_result_list('version')
nose.assert_greater(len(results), 10) | 26,081 |
def evaluate_g9( tau7, tau8, tau9, tau10, tau11, s9 ):
"""
Evaluate the ninth constraint equation and also return the Jacobian
:param float tau7: The seventh tau parameter
:param float tau8: The eighth tau parameter
:param float tau9: The ninth tau parameter
:param float tau10: The tenth tau parameter
:param float tau11: The eleventh tau parameter
:param float s9: The value of the constraint
"""
return tau7 + 2 * tau8 - abs( tau9 + tau10 + tau11 ) - s9**2,\
{ 'tau7':1., 'tau8':2., 'tau9':float( -np.sign( tau9 ) ),\
'tau10':float( -np.sign( tau10 ) ),\
'tau11':( -np.sign( tau11 ) ), 's9':-2*s9 } | 26,082 |
def qmul(*q, qaxis=-1):
""" Quaternion multiplication.
Parameters
----------
q: iterable of array_like
Arrays containing quaternions to multiply. Their dtype can be
quaternion, otherwise `qaxis` specifies the axis representing
the quaternions.
qaxis: int, default -1
If `q` are not quaternion dtype, axis of the quaternion arrays
representing the coordinates of the quaternions.
Returns
-------
qm: ndarray
A new array containing the multiplied quaternions.
"""
# TODO xarray support
if len(q) < 2:
raise ValueError("Please provide at least 2 quaternions to multiply")
if all(qq.dtype != quaternion for qq in q):
q = (as_quat_array(np.swapaxes(qq, qaxis, -1)) for qq in q)
qm = reduce(operator.mul, q, 1)
return np.swapaxes(as_float_array(qm), -1, qaxis)
elif all(qq.dtype == quaternion for qq in q):
return reduce(operator.mul, q, 1)
else:
raise ValueError(
"Either all or none of the provided quaternions must be "
"quaternion dtype"
) | 26,083 |
def test_example_4p14():
"""Test example 4.14 in "RF and Microwave Engineering" - Gustrau """
# Waveguide cavity dimensions
a, b, d = 24*sc.milli, 10*sc.milli, 40*sc.milli
# They use c = 3e8 m/s (I mean come on...)
corr = 3e8 / sc.c
# Test values
abs_tol = 0.001e9
assert wg.resonant_frequency(a, b, d, m=1, n=0, l=1) * corr == approx( 7.289e9, abs=abs_tol)
assert wg.resonant_frequency(a, b, d, m=0, n=1, l=1) * corr == approx(15.462e9, abs=abs_tol)
assert wg.resonant_frequency(a, b, d, m=1, n=1, l=0) * corr == approx(16.250e9, abs=abs_tol)
assert wg.resonant_frequency(a, b, d, m=1, n=1, l=1) * corr == approx(16.677e9, abs=abs_tol)
assert wg.resonant_frequency(a, b, d, m=2, n=0, l=1) * corr == approx(13.050e9, abs=abs_tol)
assert wg.resonant_frequency(a, b, d, m=1, n=0, l=2) * corr == approx( 9.763e9, abs=abs_tol)
assert wg.resonant_frequency(a, b, d, m=0, n=1, l=2) * corr == approx(16.771e9, abs=abs_tol)
assert wg.resonant_frequency(a, b, d, m=0, n=2, l=1) * corr == approx(30.233e9, abs=abs_tol)
assert wg.resonant_frequency(a, b, d, m=1, n=2, l=0) * corr == approx(30.644e9, abs=abs_tol)
assert wg.resonant_frequency(a, b, d, m=2, n=1, l=0) * corr == approx(19.526e9, abs=abs_tol)
assert wg.resonant_frequency(a, b, d, m=2, n=1, l=1) * corr == approx(19.882e9, abs=abs_tol)
assert wg.resonant_frequency(a, b, d, m=1, n=2, l=1) * corr == approx(30.873e9, abs=abs_tol)
assert wg.resonant_frequency(a, b, d, m=1, n=1, l=2) * corr == approx(17.897e9, abs=abs_tol) | 26,084 |
def iterable(value,
allow_empty = False,
forbid_literals = (str, bytes),
minimum_length = None,
maximum_length = None,
**kwargs):
"""Validate that ``value`` is a valid iterable.
.. hint::
This validator checks to ensure that ``value`` supports iteration using
any of Python's three iteration protocols: the ``__getitem__`` protocol,
the ``__iter__`` / ``next()`` protocol, or the inheritance from Python's
`Iterable` abstract base class.
If ``value`` supports any of these three iteration protocols, it will be
validated. However, if iteration across ``value`` raises an unsupported
exception, this function will raise an
:exc:`IterationFailedError <validator_collection.errors.IterationFailedError>`
:param value: The value to validate.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value``
is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if
``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:param forbid_literals: A collection of literals that will be considered invalid
even if they are (actually) iterable. Defaults to :class:`str <python:str>` and
:class:`bytes <python:bytes>`.
:type forbid_literals: iterable
:param minimum_length: If supplied, indicates the minimum number of members
needed to be valid.
:type minimum_length: :class:`int <python:int>`
:param maximum_length: If supplied, indicates the minimum number of members
needed to be valid.
:type maximum_length: :class:`int <python:int>`
:returns: ``value`` / :obj:`None <python:None>`
:rtype: iterable / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises NotAnIterableError: if ``value`` is not a valid iterable or
:obj:`None <python:None>`
:raises IterationFailedError: if ``value`` is a valid iterable, but iteration
fails for some unexpected exception
:raises MinimumLengthError: if ``minimum_length`` is supplied and the length of
``value`` is less than ``minimum_length`` and ``whitespace_padding`` is
``False``
:raises MaximumLengthError: if ``maximum_length`` is supplied and the length of
``value`` is more than the ``maximum_length``
"""
if not value and not allow_empty:
raise errors.EmptyValueError('value (%s) was empty' % value)
elif value is None:
return None
minimum_length = integer(minimum_length, allow_empty = True, force_run = True) # pylint: disable=E1123
maximum_length = integer(maximum_length, allow_empty = True, force_run = True) # pylint: disable=E1123
if isinstance(value, forbid_literals):
raise errors.NotAnIterableError('value type (%s) not iterable' % type(value))
try:
iter(value)
except TypeError:
raise errors.NotAnIterableError('value type (%s) not iterable' % type(value))
except Exception as error:
raise errors.IterationFailedError('iterating across value raised an unexpected Exception: "%s"' % error)
if value and minimum_length is not None and len(value) < minimum_length:
raise errors.MinimumLengthError(
'value has fewer items than the minimum length %s' % minimum_length
)
if value and maximum_length is not None and len(value) > maximum_length:
raise errors.MaximumLengthError(
'value has more items than the maximum length %s' % maximum_length
)
return value | 26,085 |
def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert,
new_confd_hmac_key, new_cds,
rapi_cert_pem=None, spice_cert_pem=None,
spice_cacert_pem=None, cds=None,
nodecert_file=pathutils.NODED_CERT_FILE,
rapicert_file=pathutils.RAPI_CERT_FILE,
spicecert_file=pathutils.SPICE_CERT_FILE,
spicecacert_file=pathutils.SPICE_CACERT_FILE,
hmackey_file=pathutils.CONFD_HMAC_KEY,
cds_file=pathutils.CLUSTER_DOMAIN_SECRET_FILE):
"""Updates the cluster certificates, keys and secrets.
@type new_cluster_cert: bool
@param new_cluster_cert: Whether to generate a new cluster certificate
@type new_rapi_cert: bool
@param new_rapi_cert: Whether to generate a new RAPI certificate
@type new_spice_cert: bool
@param new_spice_cert: Whether to generate a new SPICE certificate
@type new_confd_hmac_key: bool
@param new_confd_hmac_key: Whether to generate a new HMAC key
@type new_cds: bool
@param new_cds: Whether to generate a new cluster domain secret
@type rapi_cert_pem: string
@param rapi_cert_pem: New RAPI certificate in PEM format
@type spice_cert_pem: string
@param spice_cert_pem: New SPICE certificate in PEM format
@type spice_cacert_pem: string
@param spice_cacert_pem: Certificate of the CA that signed the SPICE
certificate, in PEM format
@type cds: string
@param cds: New cluster domain secret
@type nodecert_file: string
@param nodecert_file: optional override of the node cert file path
@type rapicert_file: string
@param rapicert_file: optional override of the rapi cert file path
@type spicecert_file: string
@param spicecert_file: optional override of the spice cert file path
@type spicecacert_file: string
@param spicecacert_file: optional override of the spice CA cert file path
@type hmackey_file: string
@param hmackey_file: optional override of the hmac key file path
"""
# pylint: disable=R0913
# noded SSL certificate
utils.GenerateNewSslCert(
new_cluster_cert, nodecert_file, 1,
"Generating new cluster certificate at %s" % nodecert_file)
# confd HMAC key
if new_confd_hmac_key or not os.path.exists(hmackey_file):
logging.debug("Writing new confd HMAC key to %s", hmackey_file)
GenerateHmacKey(hmackey_file)
if rapi_cert_pem:
# Assume rapi_pem contains a valid PEM-formatted certificate and key
logging.debug("Writing RAPI certificate at %s", rapicert_file)
utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
else:
utils.GenerateNewSslCert(
new_rapi_cert, rapicert_file, 1,
"Generating new RAPI certificate at %s" % rapicert_file)
# SPICE
spice_cert_exists = os.path.exists(spicecert_file)
spice_cacert_exists = os.path.exists(spicecacert_file)
if spice_cert_pem:
# spice_cert_pem implies also spice_cacert_pem
logging.debug("Writing SPICE certificate at %s", spicecert_file)
utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True)
logging.debug("Writing SPICE CA certificate at %s", spicecacert_file)
utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True)
elif new_spice_cert or not spice_cert_exists:
if spice_cert_exists:
utils.CreateBackup(spicecert_file)
if spice_cacert_exists:
utils.CreateBackup(spicecacert_file)
logging.debug("Generating new self-signed SPICE certificate at %s",
spicecert_file)
(_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file, 1)
# Self-signed certificate -> the public certificate is also the CA public
# certificate
logging.debug("Writing the public certificate to %s",
spicecert_file)
utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem)
# Cluster domain secret
if cds:
logging.debug("Writing cluster domain secret to %s", cds_file)
utils.WriteFile(cds_file, data=cds, backup=True)
elif new_cds or not os.path.exists(cds_file):
logging.debug("Generating new cluster domain secret at %s", cds_file)
GenerateHmacKey(cds_file) | 26,086 |
def settings_notification(color: bool, messages: List[ExitMessage]) -> Form:
"""Generate a warning notification for settings errors.
:param messages: List of messages to display
:param color: Bool to reflect if color is transferred or not
:returns: The form to display
"""
# Take the initial warning if there is one
if messages[0].prefix is ExitPrefix.WARNING:
title = messages.pop(0).to_lines(color=False, width=console_width(), with_prefix=True)[0]
else:
title = "Warning"
formatted = ExitMessages(messages).to_strings(color=color, width=console_width())
formatted_curses = CursesLines(
tuple(ansi_to_curses(line) for line in formatted),
)
form = Form(
FormType.NOTIFICATION,
title=title,
title_color=Color.YELLOW,
fields=[
FieldCursesInformation(
name="settings_warning",
information=formatted_curses,
),
],
)
return form | 26,087 |
def message_type(ctx: 'Context', *types):
"""Filters massage_type with one of selected types.
Assumes update_type one of message, edited_message, channel_post, edited_channel_post.
:param ctx:
:param types:
:return: True or False
"""
m = None
if ctx.update.update_type is UpdateType.message:
m = ctx.update.message
elif ctx.update.update_type is UpdateType.edited_message:
m = ctx.update.edited_message
elif ctx.update.update_type is UpdateType.channel_post:
m = ctx.update.channel_post
elif ctx.update.update_type is UpdateType.edited_channel_post:
m = ctx.update.edited_channel_post
else:
return False
if m.message_type not in types:
return False
return True | 26,088 |
def VSphere(R):
"""
Volume of a sphere or radius R.
"""
return 4. * math.pi * R * R * R / 3. | 26,089 |
def test_user_level_override_base_level_with_same_name():
"""Test that user level that overrides a base level with same name finds flags.."""
base_config_file = os.path.join(os.path.dirname(__file__), "rsc", "config.yaml")
user_config_file = os.path.join(
os.path.dirname(__file__), "rsc", "user-level-same-name.yaml"
)
config = Config(base_config_file, user_config_file)
flags = config.get_tool_config("pylint", "threshold", "flags")
assert flags == "--user-override"
flags = config.get_tool_config("make", "threshold", "flags")
assert flags is None | 26,090 |
def resolve_variable(
var_name: str,
var_def: BlueprintVariableTypeDef,
provided_variable: Optional[Variable],
blueprint_name: str,
) -> Any:
"""Resolve a provided variable value against the variable definition.
Args:
var_name: The name of the defined variable on a blueprint.
var_def: A dictionary representing the defined variables attributes.
provided_variable: The variable value provided to the blueprint.
blueprint_name: The name of the blueprint that the variable is being applied to.
Returns:
The resolved variable value, could be any python object.
Raises:
MissingVariable: Raised when a variable with no default is not
provided a value.
UnresolvedBlueprintVariable: Raised when the provided variable is
not already resolved.
ValueError: Raised when the value is not the right type and cannot be
cast as the correct type. Raised by
:func:`runway.cfngin.blueprints.base.validate_variable_type`
ValidatorError: Raised when a validator raises an exception. Wraps the
original exception.
"""
try:
var_type = var_def["type"]
except KeyError:
raise VariableTypeRequired(blueprint_name, var_name) from None
if provided_variable:
if not provided_variable.resolved:
raise UnresolvedBlueprintVariable(blueprint_name, provided_variable)
value = provided_variable.value
else:
# Variable value not provided, try using the default, if it exists
# in the definition
try:
value = var_def["default"]
except KeyError:
raise MissingVariable(blueprint_name, var_name) from None
# If no validator, return the value as is, otherwise apply validator
validator = var_def.get("validator", lambda v: v)
try:
value = validator(value)
except Exception as exc:
raise ValidatorError(var_name, validator.__name__, value, exc) from exc
# Ensure that the resulting value is the correct type
value = validate_variable_type(var_name, var_type, value)
allowed_values = var_def.get("allowed_values")
if not validate_allowed_values(allowed_values, value):
raise ValueError(
f"Invalid value passed to {var_name} in Blueprint {blueprint_name}. "
f"Got '{value}', expected one of {allowed_values}."
)
return value | 26,091 |
def launch_servers_and_wait():
"""
Run a prometheus and grafana server, then suspend the thread
(ensuring prometheus remains up in case a task shuts it down).
Closes resources on Ctrl-C
"""
try:
print("Servers launching...")
if not launch_grafana_server():
print("Issue launching grafana, see above")
return
if not launch_prometheus_server():
print("Issue launching prometheus, see above")
return
print(f"Waiting for grafana server to come up.")
time.sleep(3)
dash_url = get_dash_url()
print(f"Dashboard is now running, you can access it at http://{dash_url}")
print(f"===========================")
print(f"| Default username: admin |")
print(f"| Default password: admin |")
print(f"===========================")
while True:
# Relaunch the server in case it's shut down by a
# task thread
time.sleep(5)
if not os.path.exists(PROMETHEUS_PID_FILE):
launch_prometheus_server()
except KeyboardInterrupt:
print("Caught Ctrl-C, shutting down servers")
finally:
shutdown_grafana_server()
shutdown_prometheus_server() | 26,092 |
def generate_navbar(structure, pathprefix):
"""
Returns 2D list containing the nested navigational structure of the website
"""
navbar = []
for section in structure['sections']:
navbar_section = []
section_data = structure['sections'][section]
section_url = os.path.join('/', pathprefix, section + '/')
section_title = section_data['title']
if 'navtitle' in section_data:
section_title = section_data['navtitle']
section_hassub = False
if 'pages' in section_data:
section_hassub = True
for page in section_data['pages']:
url = os.path.join('/', pathprefix, section, page)
title = structure['sections'][section]['pages'][page]['title']
if 'navtitle' in structure['sections'][section]['pages'][page]:
title = structure['sections'][section]['pages'][page]['navtitle']
if title:
# Only add a page to the navigation if it has a title, otherwise it's hidden
navbar_section.append((url, page, title))
if section_title:
navbar.append((section_url, section, section_title, section_hassub, navbar_section))
return navbar | 26,093 |
def process_map(file_in, validate):
"""Iteratively process each XML element and write to csv(s)"""
with codecs.open(NODES_PATH, 'w') as nodes_file, \
codecs.open(NODE_TAGS_PATH, 'w') as nodes_tags_file, \
codecs.open(WAYS_PATH, 'w') as ways_file, \
codecs.open(WAY_NODES_PATH, 'w') as way_nodes_file, \
codecs.open(WAY_TAGS_PATH, 'w') as way_tags_file:
nodes_writer = UnicodeDictWriter(nodes_file, NODE_FIELDS)
node_tags_writer = UnicodeDictWriter(nodes_tags_file, NODE_TAGS_FIELDS)
ways_writer = UnicodeDictWriter(ways_file, WAY_FIELDS)
way_nodes_writer = UnicodeDictWriter(way_nodes_file, WAY_NODES_FIELDS)
way_tags_writer = UnicodeDictWriter(way_tags_file, WAY_TAGS_FIELDS)
nodes_writer.writeheader()
node_tags_writer.writeheader()
ways_writer.writeheader()
way_nodes_writer.writeheader()
way_tags_writer.writeheader()
validator = cerberus.Validator()
for element in get_element(file_in, tags=('node', 'way')):
el = shape_element(element)
if el:
if validate is True:
validate_element(el, validator)
if element.tag == 'node':
nodes_writer.writerow(el['node'])
node_tags_writer.writerows(el['node_tags'])
elif element.tag == 'way':
ways_writer.writerow(el['way'])
way_nodes_writer.writerows(el['way_nodes'])
way_tags_writer.writerows(el['way_tags'])
# Note: Validation is ~ 10X slower. For the project consider using a small
# sample of the map when validating. | 26,094 |
def additional_args(**kwargs):
"""
Additional command-line arguments.
Provides additional command-line arguments that are unique to the extraction process.
Returns
-------
additional_args : dict
Dictionary of tuples in the form (fixed,keyword) that can be passed to an argument
parser to create a new command-line option
"""
module_name = kwargs.get('module_name', __name__)
base_defaults = get_defaults(module_name)
additional_args = {}
table_help = "The input metadata table to use."
table_args = ['table']
table_kwargs = {'help': table_help}
additional_args['table'] = (table_args, table_kwargs)
bkg_help = "Whether to subtract background before or after applying "
bkg_help += "flatfield. Default is 'flat_first'. Available options are "
bkg_help += "'flat_first', 'bkg_first' and 'bkg_only'."
bkg_args = ['-b', '--bkg_flat_order']
bkg_kwargs = {'dest': 'bkg_flat_order', 'default': base_defaults['bkg_flat_order'],
'help': bkg_help}
additional_args['bkg_flat_order'] = (bkg_args, bkg_kwargs)
plots_help = "Include result plots while running (default False)."
plots_args = ["-p", "--plots"]
plots_kwargs = {'dest': 'plots', 'action': 'store_true',
'default': base_defaults['plots'], 'help': trace_help}
additional_args['plots'] = (plots_args, plots_kwargs)
return additional_args | 26,095 |
def extract_images_url(url, source):
"""
Extract image url for a chapter
"""
r = s.get(url)
tree = html.fromstring(r.text)
if source == 'blogtruyen':
return tree.xpath('//*[@id="content"]/img/@src')
elif source == 'nettruyen':
return tree.xpath('//*[@class="reading-detail box_doc"]/div/img/@src')
elif source == 'image-container-manga':
return tree.xpath('//*[@class="image-container-manga"]/div/img/@src') | 26,096 |
def _ntuple_paths(
general_path: str,
region: Dict[str, Any],
sample: Dict[str, Any],
systematic: Dict[str, Any],
template: Optional[Literal["Up", "Down"]],
) -> List[pathlib.Path]:
"""Returns the paths to ntuples for a region-sample-systematic-template.
A path is built starting from the path specified in the general options in the
configuration file. This path can contain placeholders for region- and sample-
specific overrides, via ``{Region}`` and ``{Sample}``. For non-nominal templates, it
is possible to override the sample path if the ``SamplePaths`` option is specified
for the template. If ``SamplePaths`` is a list, return a list of paths (one per
entry in the list).
Args:
general_path (str): path specified in general settings, with sections that can
be overridden by region / sample settings
region (Dict[str, Any]): containing all region information
sample (Dict[str, Any]): containing all sample information
systematic (Dict[str, Any]): containing all systematic information
template (Optional[Literal["Up", "Down"]]): template considered: "Up", "Down",
or None for nominal
Returns:
List[pathlib.Path]: list of paths to ntuples
"""
# obtain region and sample paths, if they are defined
region_path = region.get("RegionPath", None)
sample_paths = sample.get("SamplePaths", None)
# check whether a systematic is being processed, and whether overrides exist
if template is not None:
# determine whether the template has an override for RegionPath specified
region_override = _check_for_override(systematic, template, "RegionPath")
if region_override is not None:
region_path = region_override
# check for SamplePaths override
sample_override = _check_for_override(systematic, template, "SamplePaths")
if sample_override is not None:
sample_paths = sample_override
region_template_exists = "{RegionPath}" in general_path
if region_path is not None:
if not region_template_exists:
log.warning(
"region override specified, but {RegionPath} not found in default path"
)
general_path = general_path.replace("{RegionPath}", region_path)
elif region_template_exists:
raise ValueError(f"no path setting found for region {region['Name']}")
sample_template_exists = "{SamplePaths}" in general_path
if sample_paths is not None:
if not sample_template_exists:
log.warning(
"sample override specified, but {SamplePaths} not found in default path"
)
# SamplePaths can be a list, so need to construct all possible paths
sample_paths = configuration._setting_to_list(sample_paths)
path_list = []
for sample_path in sample_paths:
path_list.append(general_path.replace("{SamplePaths}", sample_path))
elif sample_template_exists:
raise ValueError(f"no path setting found for sample {sample['Name']}")
else:
# no need for multiple paths, and no SamplePaths are present, so turn
# the existing path into a list
path_list = [general_path]
# convert the contents of path_lists to paths and return them
paths = [pathlib.Path(path) for path in path_list]
return paths | 26,097 |
def svn_ra_do_diff2(*args):
"""
svn_ra_do_diff2(svn_ra_session_t session, svn_revnum_t revision, char diff_target,
svn_boolean_t recurse, svn_boolean_t ignore_ancestry,
svn_boolean_t text_deltas,
char versus_url, svn_delta_editor_t diff_editor,
void diff_baton, apr_pool_t pool) -> svn_error_t
"""
return _ra.svn_ra_do_diff2(*args) | 26,098 |
def ntuple_dict_length(ntuple_dict):
"""Returns a dictionary from track types to the number of tracks of
that type. Raises an exception of any value lists within one of its
track properties dicts are different lengths."""
return dict(map(lambda track_type, track_prop_dict:
(track_type, track_prop_dict_length(track_prop_dict)),
ntuple_dict.keys(), ntuple_dict.values())) | 26,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.