content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def check_mca(config, should_be_mca, msg = None):
"""Checks that the configured account is an MCA or not based on the argument.
If it is not, it exits the program early."""
is_mca = 'isMCA' in config and config['isMCA']
if should_be_mca != is_mca:
if msg is not None:
print(msg)
else:
print('For this sample, you must%s use a multi-client account.' %
(' not' if is_mca else ''))
sys.exit(1) | 5,328,100 |
def ban_user(request, user):
"""Bans a given user."""
user = User.query.filter_by(username=user).first()
if user is None:
raise NotFound()
next = request.next_url or url_for('admin.bans')
if user.is_banned:
request.flash(_(u'The user is already banned.'))
return redirect(next)
if user == request.user:
request.flash(_(u'You cannot ban yourself.'), error=True)
return redirect(next)
admin_utils.ban_user(user)
request.flash(_(u'The user “%s” was successfully banned and notified.') %
user.username)
return redirect(next) | 5,328,101 |
def lastDate():
"""[summary]
lastDate() function: return the total revenue of the nearest day
Returns:
[type]: [description]
"""
lastDate = totalDate().tail(1)
last_date = lastDate.iloc[0]['total'].round(2)
return last_date | 5,328,102 |
def processlingrarow(col, rows, pixelWidth, pixelHeight, xO, yO, plot, netcdffile, rsdir, becsmosdir):
"""
Launch a single pixel of processing for LingraRS
:param col: column in Grassland raster file
:param rows: total rows in Grassland raster file
:param data: Value of pixel in Grassland raster file (0/1)
:param pixelWidth: Projected pixel width size
:param pixelHeight: Projected pixel height size
:param xO: Projected X origin
:param yO: Projected Y Origin
:param plot: Do you plot graphs of the run (False or True)
:param netcdffile: ERA5 netcdf file to extract weather data from
:param rsdir: RS data directory
:param becsmosdir: RS data directory for BEC-SMOS 1km soil moisture
:return: tiller, yielD, wlvg, wlvd1, parcu, grass, tracu, evacu
"""
# Import local libraries
from libmkMeteo import mkmeteo4lingrars
# import main lingraRS library
from liblingraRS import lingrars
result = []
for row in range(rows):
longitude = col * pixelWidth + xO
latitude = yO - row * pixelHeight
# print(col, row, longitude, latitude, data)
# Create the Meteo and RS data parameterisation for lingraRS
meteolist = mkmeteo4lingrars(netcdffile, rsdir, becsmosdir, longitude, latitude)
# Run the model
(tiller, yielD, wlvg, wlvd1, wa, grass, tracu, evacu) = lingrars(latitude, meteolist, plot)
# exit() TODO plot the graphs and check if all ok
# Let the pixels fit into each map (*1000 bc INT32 maps)
# TODO check values out for print("parcu=", parcu)
result.append([col, row, tiller * 1000, yielD * 1000, wlvg * 1000, wlvd1 * 1000, wa * 1000, grass * 1000, tracu, evacu])
return result | 5,328,103 |
def multitask_result(request):
"""多任务结果"""
task_id = request.GET.get('task_id')
task_obj = models.Task.objects.get(id=task_id)
results = list(task_obj.tasklog_set.values('id','status',
'host_user_bind__host__hostname',
'host_user_bind__host__ip_addr',
'result'
))
return HttpResponse(json.dumps(results)) | 5,328,104 |
async def ctrl_handler(client, user, operation, payload, db):
"""
Handler for *Inspection Control* API
:param client: client that sent payload
:type client: websockets.WebSocketCommonProtocol
:param user: user that sent payload
:type user: ecusers.User
:param operation: operation specified in payload
:type operation: str
:param payload: received payload
:type payload: dict[str, Any] | None
:param db: database object
:type db: ecdatabase.Database
"""
if operation == "get":
await push_to_ctrl(db, client)
elif operation == "save":
if (row := await ech.safe_extract(client, payload, {eclib.db.inspection.team_num: str, eclib.db.inspection.form_data: str, eclib.db.inspection.result: int})) is not None:
await db.update(eclib.db.inspection.table_, [(eclib.db.inspection.team_num, "==", row[eclib.db.inspection.team_num])], row)
await push_to_ctrl(db)
await push_to_team(ecusers.User.find_user(row[eclib.db.inspection.team_num]), db)
elif operation == "getInspect":
await load_inspection_form(payload, client, db)
elif operation == "invite":
if await ecmodules.queue.ctrl_invite(payload, client, user, db):
await load_inspection_form(payload, client, db)
elif operation == "remove":
await ecmodules.queue.ctrl_remove(payload, client, user, db) | 5,328,105 |
def process_xpath_list(node, property_manifest: Dict):
"""
Return a list of values as a result of running a list of XPath
expressions against an input node
:param node: Input node
:param property_manifest: Manifest snippet of the property
:return: List of values
"""
def complement_xpath(current_node, path):
"""
Return current node if XPath value is "."
else process XPath normally
"""
if path == ".":
return [current_node]
else:
return current_node.xpath(path)
if node:
return [
process_property_value(child_node, property_manifest)
for path in property_manifest["xpath"]
for child_node in complement_xpath(node, path)
]
return [] | 5,328,106 |
def collect_username_and_password(db: Session) -> UserCreate:
"""Collect username and password information and validate"""
username = get_username("Enter your username: ")
password = get_password("Enter your password: ")
verify_pass = get_password("Enter your password again: ")
if password != verify_pass:
raise Exception("Passwords do not match.")
user_data = UserCreate(username=username, password=password)
user = FidesopsUser.get_by(db, field="username", value=user_data.username)
if user:
raise Exception(f"User with username '{username}' already exists.")
return user_data | 5,328,107 |
def replace_header(input_df):
"""replace headers of the dataframe with first row of sheet"""
new_header = input_df.iloc[0]
input_df = input_df[1:]
input_df.columns=new_header
return input_df | 5,328,108 |
def verify():
"""
Manual verification of PSDs
"""
#update threshold
threshold= ui.threshEdit.text()
msg=subprocess.run(["python", os.path.join(script_dir,r"cli.py"), "verify", "--outlier_threshold", threshold])
if msg.returncode != 0:
ui.errorBrowser.setText(_translate("SAKEDSP",'ERROR: Unable to verify... \nCheck terminal for errors'))
return
ui.errorBrowser.setText(_translate("SAKEDSP",'Verified!'))
updateImage(os.path.join(script_dir,r"images\bomb4.png")) | 5,328,109 |
def cartToRadiusSq(cartX, cartY):
"""Convert Cartesian coordinates into their corresponding radius squared."""
return cartX**2 + cartY**2 | 5,328,110 |
def unique_entity_id(entity):
"""
:param entity: django model
:return: unique token combining the model type and id for use in HTML
"""
return "%s-%s" % (type(entity).__name__, entity.id) | 5,328,111 |
def normalize_tuple(value, n, name):
"""Transforms a single int or iterable of ints into an int tuple.
# Arguments
value: The value to validate and convert. Could be an int, or any iterable
of ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. `strides` or
`kernel_size`. This is only used to format error messages.
# Returns
A tuple of n integers.
# Raises
ValueError: If something else than an int/long or iterable thereof was
passed.
"""
if isinstance(value, int):
return (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError('The `{}` argument must be a tuple of {} '
'integers. Received: {}'.format(name, n, value))
if len(value_tuple) != n:
raise ValueError('The `{}` argument must be a tuple of {} '
'integers. Received: {}'.format(name, n, value))
for single_value in value_tuple:
try:
int(single_value)
except ValueError:
raise ValueError('The `{}` argument must be a tuple of {} '
'integers. Received: {} including element {} '
'of type {}'.format(name, n, value, single_value,
type(single_value)))
return value_tuple | 5,328,112 |
def load_user(user_id):
"""
@login_manager.user_loader Passes in a user_id to this function and in return the
function queries the database and gets a user's id as a response...
"""
return User.query.get(int(user_id)) | 5,328,113 |
def triage(sc, BOT_ID, kcache):
"""
We read and triage all messages. Messages directed to the bot will
be triaged to the command handler. Otherwise, the output is parsed for
special events
"""
# special events - Karma up or down, or @bot; add
AT_BOT = '<@' + BOT_ID + '>'
question = re.compile('.+\?{1,1}$')
weblink = re.compile('^<http.+>$') # slack doesn't handle <link>
for slack_message in sc.rtm_read():
text = slack_message.get('text')
user = slack_message.get('user')
channel = slack_message.get('channel')
if not text or not user:
continue
if user == 'USLACKBOT':
logger.debug(
'USLACKBOT sent message {} which is ignored'.format(text))
continue
# Need to add users to ignore here - if user in "ignore list"....
text_list = text.split()
if text_list[0] == AT_BOT and len(text_list) > 1:
logger.debug('Message directed at bot: {}'.format(text))
handle_command(sc, text_list, channel)
continue
elif question.search(text_list[0]) and len(text_list) == 1:
word = text_list[0].strip('?')
if dbopts.also_ask(word):
also = dbopts.also_ask(word)
if weblink.search(also):
logger.debug('trimming web link {}'.format(also))
also = also.strip('<>').split('|')[0]
sc.rtm_send_message(
channel, 'I remember hearing that {} is also {}'.format(
word, also))
continue
else: # karma and shame here
for word in list(set(text_list)):
if handle_word(sc, word, kcache, user, channel):
continue | 5,328,114 |
def replaceall(table, a, b):
"""
Convenience function to replace all instances of `a` with `b` under all
fields. See also :func:`convertall`.
.. versionadded:: 0.5
"""
return convertall(table, {a: b}) | 5,328,115 |
def test_factory():
"""
test ontology factory using wikidata as source and using PTSD.
"""
f = OntologyFactory()
ont = f.create('wdq:Q544006')
for n in ont.nodes():
print('{} "{}"'.format(n,ont.label(n)))
qids = ont.search('anxiety%')
assert len(qids) > 0
print(qids)
nodes = ont.traverse_nodes(qids, up=True, down=True)
print(nodes)
assert len(nodes) > 0
labels = [ont.label(n) for n in nodes]
print(labels)
# Note: it's possible wd may change rendering this false
assert 'fear of frogs' in labels
from ontobio.io.ontol_renderers import GraphRenderer
w = GraphRenderer.create('tree')
w.write(ont, query_ids=qids) | 5,328,116 |
def get_job_metadata(ibs, jobid):
"""
Web call that returns the metadata of a job
CommandLine:
# Run Everything together
python -m wbia.web.job_engine --exec-get_job_metadata
# Start job queue in its own process
python -m wbia.web.job_engine job_engine_tester --bg
# Start web server in its own process
./main.py --web --fg
pass
# Run foreground process
python -m wbia.web.job_engine --exec-get_job_metadata:0 --fg
Example:
>>> # xdoctest: +REQUIRES(--web-tests)
>>> # xdoctest: +REQUIRES(--slow)
>>> # xdoctest: +REQUIRES(--job-engine-tests)
>>> # xdoctest: +REQUIRES(--web-tests)
>>> from wbia.web.job_engine import * # NOQA
>>> import wbia
>>> with wbia.opendb_bg_web('testdb1', managed=True) as web_ibs: # , domain='http://52.33.105.88')
... # Test get metadata of a job id that does not exist
... response = web_ibs.send_wbia_request('/api/engine/job/metadata/', jobid='badjob')
"""
status = ibs.job_manager.jobiface.get_job_metadata(jobid)
return status | 5,328,117 |
def create_learning_rate_scheduler(max_learn_rate, end_learn_rate, warmup_proportion, n_epochs):
"""Learning rate scheduler, that increases linearly within warmup epochs
then exponentially decreases to end_learn_rate.
Args:
max_learn_rate: Float. Maximum learning rate.
end_learn_rate: Float. Scheduler converges to this value.
warmup_proportion: Float. How many epochs to increase linearly, before decaying.
n_epochs: Float. Maximum number of epochs training will run.
Returns:
Keras learning rate scheduler
"""
def lr_scheduler(epoch):
warmup_epoch_count = int(warmup_proportion * n_epochs)
if epoch < warmup_epoch_count:
res = (max_learn_rate / warmup_epoch_count) * (epoch + 1)
else:
res = max_learn_rate * math.exp(
math.log(end_learn_rate / max_learn_rate)
* (epoch - warmup_epoch_count + 1)
/ (n_epochs - warmup_epoch_count + 1)
)
return float(res)
learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1)
return learning_rate_scheduler | 5,328,118 |
def test_wrong_drm_controller_address(accelize_drm, conf_json, cred_json, async_handler):
"""Test when a wrong DRM Controller offset is given"""
async_cb = async_handler.create()
async_cb.reset()
driver = accelize_drm.pytest_fpga_driver[0]
ctrl_base_addr_backup = driver._drm_ctrl_base_addr
driver._drm_ctrl_base_addr += 0x10000
try:
with pytest.raises(accelize_drm.exceptions.DRMCtlrError) as excinfo:
drm_manager = accelize_drm.DrmManager(
conf_json.path, cred_json.path,
driver.read_register_callback,
driver.write_register_callback,
async_cb.callback
)
assert 'Unable to find DRM Controller registers.' in str(excinfo.value)
assert 'Please verify' in str(excinfo.value)
finally:
driver._drm_ctrl_base_addr = ctrl_base_addr_backup | 5,328,119 |
def matchyness(section, option):
"""Assign numerical 'matchyness' value between target and value
Parameters:
section -- target value
option -- proposed match
"""
if section != option:
return _hc.NEQ
if isinstance(section, rt.flask_placeholder):
if isinstance(option, rt.flask_placeholder):
return _hc.PP #Placeholder - placeholder
else:
return _hc.PS #Placeholder - string
else:
if option.value == section.value:
return _hc.SS #String - string
elif isinstance(option, rt.flask_placeholder):
return _hc.SP #String - placeholder
else:
return _hc.NEQ | 5,328,120 |
def log_sum_exp_elem(*a):
"""
:param a: elements
:return: (a[0].exp() + a[1].exp() + ...).log()
"""
bias = max(a).detach()
ans = bias + sum([(ai-bias).exp() for ai in a]).log()
return ans | 5,328,121 |
def car_portrayal(agent):
"""Visualises the cars for the Mesa webserver
:return: Dictionary containing the settings of an agent"""
if agent is None:
return
portrayal = {}
# update portrayal characteristics for each CarAgent object
if isinstance(agent, CarAgent):
if agent.is_from_traffic_light:
portrayal["Shape"] = "rect"
portrayal["w"], portrayal["h"] = .7, .7
else:
portrayal["Shape"] = "circle"
portrayal["r"] = .9
portrayal["Layer"] = 0
portrayal["Filled"] = "true"
# change the agents color to its velocity
portrayal["Color"] = colour_spectrum[agent.velocity - 1]
return portrayal | 5,328,122 |
def _load_spc_format_type_a(filepath: str):
"""load A(w,k) in the spc format type a
Args:
filepath (str): output filename
Returns:
np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray: kcrt, Awk, kdist, energy, kpath
"""
with open(filepath) as f:
lines = f.readlines()
lines2 = []
for line in lines:
lines2.append(line.strip())
lines = lines2
lines_iter = iter(lines)
line = next(lines_iter)
s = line.strip().split()
internal_format_type = s[-1]
if internal_format_type == "(a)":
line = next(lines_iter)
if False:
s = line.strip().split()
emin = float(s[1])
emax = float(s[2])
ne = int(s[3])
nhighsymkp = int(s[4])
line = next(lines_iter)
line = line.strip()
kstr_list = re.split(" +", line)
kpath = OrderedDict()
kcrt = []
for kstr in kstr_list[1:]:
s = kstr.split()
idx = int(s[0])
idx -= 1 # index convert from fortran to Python
name = " ".join(s[1:]).replace("'", "")
kpath[idx] = name
kcrt.append(idx)
kcrt = np.array(kcrt)
elif internal_format_type == "(a1)": # short format without kpoints
line = next(lines_iter)
line = line.strip()
kstr_list = line.split()
kpath = OrderedDict()
kcrt = []
kpoints = None
for kstr in kstr_list[2:]:
s = kstr.split()
idx = int(s[0])
idx -= 1 # index convert from fortran to Python
name = None
kpath[idx] = name
kcrt.append(idx)
kcrt = np.array(kcrt)
while True:
line = next(lines_iter)
if line.startswith("### end of header"):
break
line = next(lines_iter)
Awk, kdist, energy = _load_spc_format_type_a_Awk(lines_iter)
return kcrt, Awk, kdist, energy, kpath | 5,328,123 |
def worker(full_img_list, start_idx, end_idx, kwargs):
"""
Worker process.
"""
print(f'Processing {start_idx}->{end_idx}')
# process each image between start_idx and end_idx
for img_idx in range(start_idx, end_idx):
img_fpath = full_img_list[img_idx]
print(f'Converting {img_idx}"th RGB to grayscale object class img...')
print(img_fpath)
# img = imageio.imread(img_fpath) | 5,328,124 |
def variable_to_json(var):
"""Converts a Variable object to dict/json struct"""
o = {}
o['x'] = var.x
o['y'] = var.y
o['name'] = var.name
return o | 5,328,125 |
def convNodeToProblems(graphList,vecGraphList,masterEncoder,genre=["C","V"],targetParams=CF.targetParams):
"""
graphList: list of graphs (before vectorization)
vecGraphList: list of vectorized graphs (of graphList)
masterEncoder: masterEncoder
genre: genre to make problems: C: compound , V: numeric value, O: others *** this mode may not work..?
targetParams: target parametes to make problems
return: list of problem-type vectorized graphs, list of answers, list of parameter names of the answer
"""
probVecGraphList=[]
targetList=[]
neighborNodeNameList=[]
print("converting nodes to problems")
for graphID,vecGraph in tqdm(enumerate(vecGraphList)):
for node in vecGraph.nodes:
nodeLabel=graphList[graphID].nodes[node]["label"]
if str(nodeLabel).startswith("C_"):
category="C"
elif is_num(nodeLabel):
category="V"
else:
category="O"
#TODO: following codes are too complex and not clear....
flg=False
#find nodes of target parameters
for neighborNode in graphList[graphID].neighbors(node):
neighborNodeName=graphList[graphID].nodes[neighborNode]["label"]
if CF.targetParamMode and neighborNodeName in targetParams:
flg=True
break
#TODO: this func may not work with False..
if CF.targetParamMode==False:
flg=True
#TODO: genre mode may not work
if category in genre and flg ==True:
g,target=convGraphToProblem(vecGraph,node,masterEncoder)
probVecGraphList.append(g)
neighborNodeNameList.append(neighborNodeName)
if genre==["V"]:
targetList.append([target[-1]])
else:
targetList.append(target[CF.CATEGORY_DIM:])
return probVecGraphList,targetList,neighborNodeNameList | 5,328,126 |
def merge_log_err(hass):
"""Patch _merge_log_error from packages."""
with mock.patch("homeassistant.config._LOGGER.error") as logerr:
yield logerr | 5,328,127 |
def create_white_edge_cost_image(t1_file, t2_file, gm_proba_file, out_file):
"""
This class represents a...
:param t1_file:
:param t2_file:
:param gm_proba_file:
:param out_file:
:return:
"""
import SimpleITK as sitk
import os
gm_proba = sitk.ReadImage(gm_proba_file)
negative_gm_proba = 1 - gm_proba
t1 = sitk.ReadImage(t1_file)
t2 = sitk.ReadImage(t2_file)
t1_gradient = sitk.GradientMagnitude(t1)
t2_gradient = sitk.GradientMagnitude(t2)
multi_modal_gradient = sitk.Cast(
(t1_gradient + t2_gradient), negative_gm_proba.GetPixelID()
)
cost_image = multi_modal_gradient * negative_gm_proba
out_file = os.path.abspath(out_file)
sitk.WriteImage(cost_image, out_file)
return out_file | 5,328,128 |
def moments_simps(n, x, y):
"""
"""
print("mu00 = ", simps(simps(n, x), y))
print("mu10 = ", simps(simps(x*n, x), y), " mu01 = ", simps(simps(y*n, y), x))
print("mu11 = ", simps(simps(x*n, x)*y, y))
print("mu20 = ", simps(simps(x*x*n, x), y)," mu02 = ", simps(simps(y*y*n, y),x))
print("mu21 = ", simps(simps(x*x*n, x)*y, y),
" mu12 = ", simps(simps(y*y*n, y)*x, x))
print("mu30 = ", simps(simps(x*x*x*n, x), y),
" mu03 = ", simps(simps(y*y*y*n, y),x)) | 5,328,129 |
def check_and_train(opt, output_path):
""" Check if the experiments has already been performed.
If it is not, train otherwise retrieve the path relative to the experiment.
:param opt: Experiment instance. It contains the output path for the experiment
under study
:param output_path: the output path for the *.json file. necessary to change the *.json
at different time steps
"""
if opt.train_completed:
print("Object: ", opt)
print("Experiment already trained in " + opt.output_path)
return
if not os.path.exists(opt.output_path):
os.makedirs(opt.output_path)
data_path = output_path + opt.dataset.dataset_name
train_network(opt, data_path)
# we write an empty *.txt file with the completed experiment
flag_completed_dir = join(output_path, 'flag_completed')
os.makedirs(flag_completed_dir, exist_ok=True)
file_object = open(join(flag_completed_dir, "complete_%s.txt" % str(opt.id)), "w")
file_object.close() | 5,328,130 |
def getDewPoint(temp, humidity):
"""
A utility function to get the temperature to which an amount of air must be
cooled in order for water vapor to condense into water. This is only valid
for: 1) temperatures between 0C and 60C, 2) relative humidity between 1%
and 100%, and 3) dew points between 0C and 50C.
@param temp: temperature in degrees Celsius
@param humidity: percentage relative humidity
"""
if not 0 < temp < 60:
raise InvalidDewPoint("Temperature out of range.")
if not 1 < humidity < 100:
raise InvalidDewPoint("Humidity is out of range.")
a = 17.271
b = 237.7
def gamma(temp, humidity):
return (a * temp) / (b + temp) + math.log(humidity/100.0)
dewPoint = (b * gamma(temp, humidity)) / (a - gamma(temp, humidity))
if dewPoint < 0:
raise InvalidDewPoint("Computed dew point is too low.")
if dewPoint > 50:
raise InvalidDewPoint("Computed dew point is too high.")
return dewPoint | 5,328,131 |
def get_result_handler(rc_value, sa_file=None):
"""Returns dict of result handler config. Backwards compatible for JSON input.
rc_value (str): Result config argument specified.
sa_file (str): SA path argument specified.
"""
try:
result_handler = json.loads(rc_value)
except json.decoder.JSONDecodeError:
config = rc_value.split(".", 1)
if len(config) == 2:
result_handler = {
"type": "BigQuery",
"project_id": config[0],
"table_id": config[1],
}
else:
raise ValueError(f"Unable to parse result handler config: `{rc_value}`")
if sa_file:
result_handler["google_service_account_key_path"] = sa_file
return result_handler | 5,328,132 |
def onPacketFound(packet):
"""
Called by the scan function for each beacon packets found.
"""
data = bytearray.fromhex(packet)
if args.one:
tmp = packet[:-3]
if tmp in foundPackets:
return
foundPackets.add(tmp)
# Eddystone
if len(data) >= 20 and data[19] == 0xaa and data[20] == 0xfe:
serviceDataLength = data[21]
frameType = data[25]
# Eddystone-URL
if frameType == 0x10:
verboseOutput("Eddystone-URL")
onUrlFound(decodeUrl(data[27:22 + serviceDataLength]))
elif frameType == 0x00:
verboseOutput("Eddystone-UID")
elif frameType == 0x20:
verboseOutput("Eddystone-TLM")
else:
verboseOutput("Unknown Eddystone frame type: {}".format(frameType))
# UriBeacon
elif len(data) >= 20 and data[19] == 0xd8 and data[20] == 0xfe:
serviceDataLength = data[21]
verboseOutput("UriBeacon")
onUrlFound(decodeUrl(data[27:22 + serviceDataLength]))
else:
verboseOutput("Unknown beacon type")
verboseOutput(packet)
verboseOutput() | 5,328,133 |
def speedup_experiment_ts(args, model_iter_fn, model, example_inputs):
"""
Measure baseline performance (without using TorchDynamo) of TorchScript and optimize_for_inference.
Writes to ./baseline_ts.csv
"""
return baselines(
[
("eager", model),
("ts", try_script(model, example_inputs)),
(
"ofi",
backends.ofi(try_script(model, example_inputs), example_inputs),
),
# ("nnc", backends.nnc(try_script(model, example_inputs), example_inputs)),
# ("nvfuser", backends.nvfuser(try_script(model, example_inputs), example_inputs)),
],
model_iter_fn,
example_inputs,
args,
) | 5,328,134 |
def my_polyhedron_to_label(
rays: Rays_Base, dists: ArrayLike, points: ArrayLike, shape: Tuple[int, ...]
) -> npt.NDArray[np.int_]:
"""Convenience funtion to pass 1-d arrays to polyhedron_to_label."""
return polyhedron_to_label( # type: ignore [no-any-return]
np.expand_dims( # type: ignore [no-untyped-call]
np.clip(dists, 1e-3, None), axis=0
),
np.expand_dims(points, axis=0), # type: ignore [no-untyped-call]
rays,
shape,
verbose=False,
) | 5,328,135 |
def order_files_by_ranges(root_path: str, dest_path: str, date_ranges: list, *, save_unsorted: bool = True) -> list:
"""Copies all files (including subdirectories)
from given path to destination path
without any loss of data
and groups them into given subdirectories.
"""
t = tqdm(range(get_file_number(root_path)),unit=' img',desc='Progress',file=stdout)
error_file_list = []
if save_unsorted:
size_checked_dirs = {'Unsorted': 1}
else:
size_checked_dirs = {}
for dirpath, dirnames, filenames in walk(root_path):
# Get files only with jpg extension
for filename in filenames:
try:
check_filename = filename.lower()
if not check_filename.endswith(permitted_ext):
continue
t.update(1)
t.refresh()
# Get EXIF file data
tmp_path = path.join(dirpath, filename)
img = Image.open(tmp_path)
exif_data = img._getexif()
# Get year, month and day from EXIF dictionary
year, month, day = exif_data[36867][:10].split(':')
# Check if date is in range
# Else check if image was copied (why not for/else - user can select ranges that overlap each other)
copied = False
for n in date_ranges:
try:
d1 = n[0].split('.')
d1 = date(int(d1[2]),int(d1[1]),int(d1[0]))
d2 = n[1].split('.')
d2 = date(int(d2[2]),int(d2[1]),int(d2[0]))
if d1 <= date(int(year), int(month), int(day)) <= d2:
dir_path = path.join(dest_path, n[2])
# Get size of directory to estimate zfill value
if dir_path not in size_checked_dirs.keys():
size_checked_dirs[dir_path] = [1, len(str(len(listdir(dirpath))))]
# Create folder if doesn't exists
if not path.isdir(dir_path):
mkdir(dir_path)
photo_id = str(size_checked_dirs[dir_path][0]).zfill(size_checked_dirs[dir_path][1])
copy2(tmp_path, path.join(dir_path, f'{year}-{month}-{day} - {photo_id}.jpg'))
size_checked_dirs[dir_path][0] += 1
copied = True
break
except:
continue
if save_unsorted and not copied:
dir_path = path.join(dest_path, 'Unsorted')
if not path.isdir(dir_path):
mkdir(dir_path)
photo_id = str(size_checked_dirs['Unsorted']).zfill(5)
copy2(tmp_path, path.join(dir_path, f'{year}-{month}-{day} - {photo_id}.jpg'))
size_checked_dirs['Unsorted'] += 1
except:
error_file_list.append(tmp_path)
continue
t.close()
return error_file_list | 5,328,136 |
def scoreCard():
"""Score Card."""
for match in matches:
print(json.dumps(c.scorecard(match['id']),indent=4))
break | 5,328,137 |
def get_incident_comment(incident_comment_id: Optional[str] = None,
incident_id: Optional[str] = None,
operational_insights_resource_provider: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIncidentCommentResult:
"""
Represents an incident comment
API Version: 2019-01-01-preview.
:param str incident_comment_id: Incident comment ID
:param str incident_id: Incident ID
:param str operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights.
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['incidentCommentId'] = incident_comment_id
__args__['incidentId'] = incident_id
__args__['operationalInsightsResourceProvider'] = operational_insights_resource_provider
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:securityinsights:getIncidentComment', __args__, opts=opts, typ=GetIncidentCommentResult).value
return AwaitableGetIncidentCommentResult(
author=__ret__.author,
created_time_utc=__ret__.created_time_utc,
etag=__ret__.etag,
id=__ret__.id,
last_modified_time_utc=__ret__.last_modified_time_utc,
message=__ret__.message,
name=__ret__.name,
type=__ret__.type) | 5,328,138 |
def getOrElseUpdate(dictionary, key, opr):
"""If given key is already in the dictionary, returns associated value.
Otherwise compute the value with opr, update the dictionary and return it.
None dictionary are ignored.
>>> d = dict()
>>> getOrElseUpdate(d, 1, lambda _: _ + 1)
2
>>> print(d)
{1: 2}
@type dictionary: dictionary of A => B
@param dictionary: the dictionary
@type key: A
@param key: the key
@type opr: function of A => B
@param opr: the function to compute new value from keys
@rtype: B
@return: the value associated with the key
"""
if dictionary is None:
return opr(key)
else:
if key not in dictionary:
dictionary[key] = opr(key)
return dictionary[key] | 5,328,139 |
def gs_norm(f, g, q):
"""
Compute the squared Gram-Schmidt norm of the NTRU matrix generated by f, g.
This matrix is [[g, - f], [G, - F]].
This algorithm is equivalent to line 9 of algorithm 5 (NTRUGen).
"""
sqnorm_fg = sqnorm([f, g])
ffgg = add(mul(f, adj(f)), mul(g, adj(g)))
Ft = div(adj(g), ffgg)
Gt = div(adj(f), ffgg)
sqnorm_FG = (q ** 2) * sqnorm([Ft, Gt])
return max(sqnorm_fg, sqnorm_FG) | 5,328,140 |
def find_anomalous_scatterers(*args, **kwds):
"""
Wrapper for corresponding method in phaser.substructure, if phaser is
available and configured.
"""
if (not libtbx.env.has_module("phaser")):
if "log" in kwds:
print("Phaser not available", file=kwds["log"])
return None
from phaser import substructure
return substructure.find_anomalous_scatterers(*args, **kwds) | 5,328,141 |
def OpChr(ea, n):
"""
@param ea: linear address
@param n: number of operand
- 0 - the first operand
- 1 - the second, third and all other operands
- -1 - all operands
"""
return idaapi.op_chr(ea, n) | 5,328,142 |
def main(envs, cmd):
"""Run script and verify it exits 0."""
for env in envs:
key, val = env.split('=', 1)
print >>sys.stderr, '%s=%s' % (key, val)
os.environ[key] = val
if not cmd:
raise ValueError(cmd)
check(*cmd) | 5,328,143 |
def check_access(func):
"""
Check whether user is in policy owners group
"""
def inner(*args, **kwargs):
keycloak = get_keycloak()
if 'policy_id' in kwargs:
current_user = kwargs['user']
group_name = f'policy-{kwargs["policy_id"]}-owners'
group_list = keycloak.user_group_list(current_user)
groups = {group['name']: group for group in group_list}
if group_name in groups.keys():
# User has access to delete/edit policy
return func(*args, **kwargs)
else:
# User does not have access to delete/edit policy
return problem(403, 'Forbidden', 'You do not own this policy')
else:
return func(*args, **kwargs)
return inner | 5,328,144 |
def score_ranking(score_dict):
"""
用pandas实现分组排序
:param score_dict: dict {'591_sum_test_0601': 13.1, '591_b_tpg7': 13.1, '591_tdw_ltpg6': 14.14}
:return: DataFrame
pd.DataFrame([['591_sum_test_0601', 13.10, 2.0, 0.6667],
['591_b_tpg7', 13.10, 2.0, 0.6667],
['591_tdw_ltpg6', 14.14, 3.0, 1.0]],
columns=['dataset_id', 'score', 'ranking', 'ranking_perct'])
"""
sorted_list = sorted(score_dict.items(), key=lambda item: item[1])
dataset_id_list = []
score_list = []
for each_dataset in sorted_list:
dataset_id_list.append(each_dataset[0])
score_list.append(each_dataset[1])
score_dict = {"dataset_id": dataset_id_list, "score": score_list}
df = pd.DataFrame(data=score_dict)
df["ranking"] = df["score"].rank(method="max")
df["ranking_perct"] = (df["ranking"]) / len(df)
return df | 5,328,145 |
def dock_widget(widget, label="DockWindow", area="right", floating=False):
"""Dock the given widget properly for both M2016 and 2017+."""
# convert widget to Qt if needed
if not issubclass(widget.__class__, QObject):
widget = utils.to_qwidget(widget)
# make sure our widget has a name
name = widget.objectName()
if not name:
name, num = label + "_mainWindow", 1
while cmds.control(name, exists=True):
name = label + "_mainWindow" + str(num)
num += 1
widget.setObjectName(label + "_mainWindow")
# if `floating` is True, return with `widget.show()`
if floating is True:
if not widget.windowTitle():
widget.setWindowTitle(label)
widget.show()
return widget
# make sure the workspaceControl doesn't exist yet
control = name + "_WorkspaceControl"
if cmds.control(control, exists=True):
cmds.deleteUI(control)
# create workspaceControl (only works with Maya 2017+)
flags = {"dockToControl": ["ToolBox", "right"]}
if area == "right":
# If the ChannelBox is not visible, fallback on the AttributeEditor.
_control = "ChannelBoxLayerEditor"
if not cmds.workspaceControl(_control, query=True, visible=True):
_control = "AttributeEditor"
flags = {"tabToControl": [_control, -1]}
control = cmds.workspaceControl(control)
cmds.workspaceControl(control, edit=True, label=label, r=True, **flags)
# Convert workspace to Qt and add the widget into its layout.
workspace = utils.to_qwidget(control)
layout = workspace.layout()
layout.addWidget(widget)
return widget | 5,328,146 |
def compute_benjamin_feir_index(bandwidth, steepness, water_depth, peak_wavenumber):
"""Compute Benjamin-Feir index (BFI) from bandwidth and steepness estimates.
Reference:
Serio, Marina, et al. “On the Computation of the Benjamin-Feir Index.”
Nuovo Cimento Della Societa Italiana Di Fisica C, vol. 28, Nov. 2005, pp. 893–903.
ResearchGate, doi:10.1393/ncc/i2005-10134-1.
"""
kd = peak_wavenumber * water_depth
# side-step numerical issues
if kd > 100:
nu = alpha = beta = 1
else:
nu = 1 + 2 * kd / np.sinh(2 * kd)
alpha = -nu ** 2 + 2 + 8 * kd ** 2 * \
np.cosh(2 * kd) / np.sinh(2 * kd) ** 2
beta = (
(np.cosh(4 * kd) + 8 - 2 * np.tanh(kd) ** 2) / (8 * np.sinh(kd) ** 4)
- (2 * np.cosh(kd) ** 2 + 0.5 * nu) ** 2 /
(np.sinh(2 * kd) ** 2 * (kd / np.tanh(kd) - (nu / 2) ** 2))
)
return steepness / bandwidth * nu * np.sqrt(np.maximum(beta / alpha, 0)) | 5,328,147 |
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes": False, "y": False, "ye": False,
"no": True, "n": True}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n") | 5,328,148 |
def plaintext_property_map(name: str) -> Mapper:
"""
Arguments
---------
name : str
Name of the property.
Returns
-------
Mapper
Property map.
See Also
--------
property_map
"""
return property_map(
name,
python_to_api=plaintext_to_notion,
api_to_python=notion_to_plaintext,
markdown=False,
) | 5,328,149 |
def pooling_layer(net_input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1)):
"""
TensorFlow pooling layer
:param net_input: Input tensor
:param ksize: kernel size of pooling
:param strides: stride of pooling
:return: Tensor after pooling
"""
return tf.nn.max_pool(net_input, ksize=ksize, strides=strides, padding='VALID') | 5,328,150 |
def set_standard_attrs(da):
""" Add standard attributed to xarray DataArray"""
da.coords["lat"].attrs = OrderedDict(
[
("standard_name", "latitude"),
("units", "degrees_north"),
("axis", "Y"),
("long_name", "latitude"),
("out_name", "lat"),
("stored_direction", "increasing"),
("type", "double"),
("valid_max", "90.0"),
("valid_min", "-90.0"),
]
)
da.coords["lon"].attrs = OrderedDict(
[
("standard_name", "longitude"),
("units", "degrees_east"),
("axis", "X"),
("long_name", "longitude"),
("out_name", "lon"),
("stored_direction", "increasing"),
("type", "double"),
("valid_max", "180.0"),
("valid_min", "-180.0"),
]
)
da.coords["depth_coord"].attrs = OrderedDict(
[
("standard_name", "depth"),
("units", "m"),
("axis", "Z"),
("long_name", "ocean depth coordinate"),
("out_name", "lev"),
("positive", "down"),
("stored_direction", "increasing"),
("valid_max", "12000.0"),
("valid_min", "0.0"),
]
)
da.coords["time"].attrs = OrderedDict(
[
("standard_name", "time"),
("axis", "T"),
("long_name", "time"),
("out_name", "time"),
("stored_direction", "increasing"),
]
)
da.coords["time"].encoding["units"] = "days since '1900-01-01'"
return da | 5,328,151 |
def find_git_repos(folder):
"""
Returns a list of all git repos within the given ancestor folder.
"""
return [root for root, subfolders, files
in os.walk(folder)
if '.git' in subfolders] | 5,328,152 |
def isnotebook():
"""
Utility function to detect if the code being run is within a jupyter
notebook. Useful to change progress indicators for example.
Returns
-------
isnotebook : bool
True if the function is being called inside a notebook, False otherwise.
"""
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return True # Jupyter notebook or qtconsole
elif shell == "TerminalInteractiveShell":
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False | 5,328,153 |
def test_insert_b_ansible_pho_link(monkeypatch):
"""
insert_b_ansible_pho_link()のテスト
"""
core = create_ita1core()
configs = get_configs()
comobj = ActionDriverCommonModules()
rand = 'aaaaa'
target_table = 'C_OPERATION_LIST'
ary_config = ''
insert_row_data = ''
operation_data = [0] * Cstobj.TBL_COL_MAX
operation_data[Cstobj.COL_FUNCTION_NAME] = '登録'
operation_data[Cstobj.COL_OPERATION_NAME] = rand + core.trace_id + core.execution_order
operation_data[Cstobj.COL_OPERATION_DATE] = comobj.getStringNowDate()
monkeypatch.setattr(core.restobj, 'rest_insert', method_dummy_true)
is_registered = core._insert_b_ansible_pho_link(target_table, insert_row_data)
assert is_registered == 0
# todo 異常系 | 5,328,154 |
def geometric_anomaly_detection_grid_search(
model_dir: str,
model_name: str,
dataset_name: str,
vocab_size: int,
manifold_dimension: int,
search_size: int,
use_knn_annulus: bool,
min_annulus_parameter: float,
max_annulus_parameter: float,
search_params_max_diff: float,
use_ripser_plus_plus: bool,
num_cpus: int,
output_dir: str,
output_filepath_suffix: str,
) -> None:
"""
Performs grid search to find best set of annulus radii (inner and outer)
for computing geometric data anomaly detection on word embeddings.
Parameters
----------
model_dir : str
Directory of the model to load.
model_name : str
Name of the trained word2vec model.
dataset_name : str
Name of the dataset the model is trained on.
vocab_size : int
Size of the vocabulary to use.
manifold_dimension : int
Manifold dimension to be passed to geometric anomaly detection algorithm.
search_size : int
Number of radii parameters to use at most
(all for outer radius and (all - 1) for inner radius).
use_knn_annulus : bool
Whether or not to use KNN version of the GAD algorithm.
min_annulus_parameter : float
Minimal annulus radius to search over.
max_annulus_parameter : float
Maximal annulus radius to search over.
search_params_max_diff : float
Maximal difference between outer and inner radii for annulus.
use_ripser_plus_plus : bool
Whether or not to use Ripser++ and GPUs for computing Rips complices.
num_cpus : int
Number of CPUs to use (defaults -1 = to all CPUs).
output_dir : str
Output directory to save data
output_filepath_suffix : str
Output filepath suffix
"""
# Ensure output directory exists
makedirs(output_dir, exist_ok=True)
# Load output from training word2vec
print("Loading word2vec model...")
w2v_training_output = load_model_training_output(
model_training_output_dir=model_dir,
model_name=model_name,
dataset_name=dataset_name,
return_normalized_embeddings=True,
)
last_embedding_weights_normalized = w2v_training_output[
"last_embedding_weights_normalized"
]
model_id = f"{model_name}_{dataset_name}"
print("Done!")
# Compute pairwise distances for grid search using specified vocab size
vocabulary_word_ints = np.arange(vocab_size)
word_embeddings_pairwise_dists_grid_search = euclidean_distances(
last_embedding_weights_normalized[vocabulary_word_ints]
)
# Do grid search
(
best_gad_result_idx,
P_man_counts,
gad_results,
annulus_radii_grid,
) = grid_search_gad_annulus_radii(
data_points=last_embedding_weights_normalized,
manifold_dimension=manifold_dimension,
search_size=search_size,
use_knn_annulus=use_knn_annulus,
search_params_max_diff=search_params_max_diff,
min_annulus_parameter=min_annulus_parameter,
max_annulus_parameter=max_annulus_parameter,
data_point_ints=vocabulary_word_ints,
data_points_pairwise_distances=word_embeddings_pairwise_dists_grid_search,
use_ripser_plus_plus=use_ripser_plus_plus,
ripser_plus_plus_threshold=200,
return_annlus_persistence_diagrams=True,
progressbar_enabled=True,
n_jobs=num_cpus,
)
grid_search_result = {
"best_gad_result_idx": best_gad_result_idx,
"P_man_counts": P_man_counts,
"gad_results": gad_results,
"annulus_radii_grid": annulus_radii_grid,
}
grid_search_result_filepath = join(
output_dir, f"{model_id}_grid_search_result_{output_filepath_suffix}.joblib"
)
joblib.dump(grid_search_result, grid_search_result_filepath) | 5,328,155 |
def p_ExpressionNoIn(p):
"""ExpressionNoIn : AssignmentExpressionNoIn
| ExpressionNoIn ',' AssignmentExpressionNoIn"""
p[0] = "ExpressionNoIn"
p[0] = list(p) | 5,328,156 |
def get_poagraph(dagmaf: DAGMaf.DAGMaf,
fasta_provider: missings.FastaProvider,
metadata: Optional[msa.MetadataCSV]) -> \
Tuple[List[graph.Node], Dict[msa.SequenceID, graph.Sequence]]:
"""Gets poagraph from given dagmaf using fasta_provider and metadata.
Args:
dagmaf: DagMaf that will be converted to Poagraph.
fasta_provider: Provider of symbols missing in DagMaf.
metadata: MetadataCSV.
Returns:
Tuple of poagraph elements.
"""
sequences_in_dagmaf = _get_sequences_ids(dagmaf)
build_state = _BuildState(initial_nodes=[],
initial_sequences=_init_sequences(sequences_in_dagmaf, metadata),
initial_edges=_init_free_edges(sequences_in_dagmaf),
seqs_info=_get_seqs_info(dagmaf, sequences_in_dagmaf),
initial_column_id=graph.ColumnID(-1),
fasta_provider=fasta_provider)
_complement_starting_nodes(build_state)
for i, mafnode in enumerate(dagmaf.dagmaf_nodes):
_process_block(build_state, mafnode)
return build_state.nodes, build_state.sequences | 5,328,157 |
def const_bool(value):
"""Create an expression representing the given boolean value.
If value is not a boolean, it is converted to a boolean. So, for
instance, const_bool(1) is equivalent to const_bool(True).
"""
return ['constant', 'bool', ['{0}'.format(1 if value else 0)]] | 5,328,158 |
def long_to_bytes(n, blocksize=0):
"""Convert an integer to a byte string.
In Python 3.2+, use the native method instead::
>>> n.to_bytes(blocksize, 'big')
For instance::
>>> n = 80
>>> n.to_bytes(2, 'big')
b'\x00P'
If the optional :data:`blocksize` is provided and greater than zero,
the byte string is padded with binary zeros (on the front) so that
the total length of the output is a multiple of blocksize.
If :data:`blocksize` is zero or not provided, the byte string will
be of minimal length.
"""
# after much testing, this algorithm was deemed to be the fastest
s = b''
n = int(n)
pack = struct.pack
while n > 0:
s = pack('>I', n & 0xffffffff) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != b'\000'[0]:
break
else:
# only happens when n == 0
s = b'\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b'\000' + s
return s | 5,328,159 |
def vector_matrix_mul(v, M):
"""
returns the product of vector v and matrix M
Consider using brackets notation v[...] in your procedure
to access entries of the input vector. This avoids some sparsity bugs.
"""
assert M.D[0] == v.D
res = {k: 0 for k in M.D[1]}
for i, j in M.f:
res[j] += (M[i, j] * v[i])
return Vec(M.D[1], res) | 5,328,160 |
def peek(library, session, address, width):
"""Read an 8, 16 or 32-bit value from the specified address.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param address: Source address to read the value.
:param width: Number of bits to read.
:return: Data read from bus.
:rtype: bytes
"""
if width == 8:
return peek_8(library, session, address)
elif width == 16:
return peek_16(library, session, address)
elif width == 32:
return peek_32(library, session, address)
elif width == 64:
return peek_64(library, session, address)
raise ValueError('%s is not a valid size. Valid values are 8, 16, 32 or 64' % width) | 5,328,161 |
def create_text_image(text, image_export=False, **kwargs):
"""
Create a jpg with given text and return in bytes format
"""
text_canvas_w = 720
text_canvas_h = 744
text_canvas_bg = 'white'
text_canvas_fg = 'black'
text_canvas_font_name = 'open_sans_emoji.ttf'
def load_font(size):
return ImageFont.truetype(pkg_resources.resource_stream(__name__, text_canvas_font_name), size)
def find_optimal_size(msg, min_size=20, max_size=400, min_line_w=1, max_line_w=80, padding=0):
"""
Find optimal font size and line width for a given text
"""
if min_line_w >= max_line_w:
raise Exception("illegal arguments, min_line_w < max_line_w needed")
def line_width(font_size, line_padding=70):
l = min_line_w
r = max_line_w
font = load_font(font_size)
while l < r:
n = floor((l + r) / 2)
t = ''.join([char * n for char in '1'])
font_w, font_h = font.getsize(t)
font_w = font_w + (2 * line_padding)
if font_w >= text_canvas_w:
r = n - 1
pass
else:
l = n + 1
pass
return n
size_l = min_size
size_r = max_size
last_line_w = 0
last_size = 0
while size_l < size_r:
size = floor((size_l + size_r) / 2.0)
last_size = size
line_w = line_width(size)
last_line_w = line_w
lines = []
for line in msg.splitlines():
cur_lines = textwrap.wrap(line, width=line_w)
for cur_line in cur_lines:
lines.append(cur_line)
font = load_font(size)
total_w, line_h = font.getsize(msg)
tot_height = len(lines) * line_h
if tot_height + (2 * padding) < text_canvas_h:
start_y = (text_canvas_h - tot_height) / 2
else:
start_y = 0
if start_y == 0:
size_r = size - 1
else:
# does fit
size_l = size + 1
return last_size, last_line_w
def center_y(lines, font_h):
tot_height = len(lines) * font_h
if tot_height < text_canvas_h:
return (text_canvas_h - tot_height) // 2
else:
return 0
size, line_w = find_optimal_size(text, padding=50)
logger.debug(f'using font with size: {size}, width: {line_w}')
font = load_font(size)
font_w, font_h = font.getsize(text)
lines = []
for line in text.splitlines():
cur_lines = textwrap.wrap(line, width=line_w)
for cur_line in cur_lines:
lines.append(cur_line)
text_y_start = center_y(lines, font_h)
canvas = Image.new('RGB', (text_canvas_w, text_canvas_h), text_canvas_bg)
draw = ImageDraw.Draw(canvas)
for line in lines:
width, height = font.getsize(line)
draw.text(((text_canvas_w - width) // 2, text_y_start), line,
font=font,
fill=text_canvas_fg,
embedded_color=True)
text_y_start += (height)
if image_export:
name = strftime("postcard_creator_export_%Y-%m-%d_%H-%M-%S_text.jpg", gmtime())
path = os.path.join(_get_trace_postcard_sent_dir(), name)
logger.info('exporting image to {} (image_export=True)'.format(path))
canvas.save(path)
img_byte_arr = io.BytesIO()
canvas.save(img_byte_arr, format='jpeg')
return img_byte_arr.getvalue() | 5,328,162 |
async def send_message_to_exchange(rabbitmq_config: dict, queue_config: dict, message: dict) -> None:
"""
Send Messages to rabbit mq exchange. Don't use directly.
:param rabbitmq_config: dict config for rabbitmq instance config: {'host': '', 'port': '', 'username': '',
'password': ''}
:param queue_config: dict config of consumer queues: {'exchange': '', 'queues': [], 'routing_key': '',
'exchange_type': '', 'error_messaging': dict}
:param message: dict object to be sent
:return: None
"""
connection, channel = None, None
try:
# connect to the RabbitMQ broker
connection = await asynqp.connect(rabbitmq_config['host'], rabbitmq_config['port'],
rabbitmq_config['username'], rabbitmq_config['password'])
# Open a communications channel
channel = await connection.open_channel()
# Create an exchange and QUEUE on the broker
amqp_exchange = await channel.declare_exchange(queue_config['exchange'], queue_config['exchange_type'])
for queue in queue_config['queues']:
amqp_queue = await channel.declare_queue(queue)
await amqp_queue.bind(amqp_exchange, queue_config['routing_key'])
# If you pass in a dict it will be automatically converted to JSON
msg = asynqp.Message(message, content_type="application/json")
amqp_exchange.publish(msg, queue_config['routing_key'])
print("Published message: {msg} to {exchange}-{routing_key}".
format(msg=str(message), exchange=queue_config['exchange'], routing_key=queue_config['routing_key']))
except asynqp.AMQPError:
print("Unable to publish message to exchange: {exchange} and routing key: {routing_key}".
format(exchange=queue_config['exchange'], routing_key=queue_config['routing_key']))
if 'error_messaging' in queue_config.keys():
error_message = dict(message=message, exchange=queue_config['exchange'], queues=queue_config['queues'],
routing_key=queue_config['routing_key'], exchange_type=queue_config['exchange_type'])
await send_request_to_error_queue(rabbitmq_config, queue_config['error_messaging'], error_message)
except asyncio.CancelledError:
print("Asyncio error : cancelled routine")
finally:
if channel is not None:
await channel.close()
if connection is not None:
await connection.close()
print("Queue channel and connection closed by producer for msg-{msg}".format(msg=message)) | 5,328,163 |
def spacesToTabs(filename, spacesPerTab):
"""
Converts tabs to spaces in-place given a filename and a number of spaces per tab.
:param filename: str
:param spacesPerTab: int
"""
with open(filename) as inFile:
lines = inFile.readlines()
with open(filename, 'w') as outFile:
for line in lines:
numTabs = 0
for n, ch in enumerate(line):
if ch != '\t':
numTabs = n
break
numSpaces = int(numTabs * spacesPerTab)
numSpacesToRemove = numSpaces * spacesPerTab
newLine = (' ' * numSpaces) + line[numTabs:]
outFile.write(newLine) | 5,328,164 |
def mtf_from_psf(psf, dx=None):
"""Compute the MTF from a given PSF.
Parameters
----------
psf : `prysm.RichData` or `numpy.ndarray`
object with data property having 2D data containing the psf,
or the array itself
dx : `float`
sample spacing of the data
Returns
-------
RichData
container holding the MTF, ready for plotting or slicing.
"""
data, df = transform_psf(psf, dx)
cy, cx = (int(np.ceil(s / 2)) for s in data.shape)
dat = abs(data)
dat /= dat[cy, cx]
return RichData(data=dat, dx=df, wavelength=None) | 5,328,165 |
def check_intersection(vertical_line: Line, other_line: Line) -> bool:
"""
Check for intersection between two line segments.
:param vertical_line: The first line segment. Guaranteed to be vertical.
:param other_line: The second line segment.
:return: Whether or not they intersect.
"""
intersection = get_intersection_point(vertical_line, other_line)
return not not intersection | 5,328,166 |
def get_session_info(config):
"""Gets information about the session (for --info)
Overwrites any existing manifest file.
Separated from print_session_info for ease of testing"""
from krun.scheduler import ManifestManager
from krun.platform import detect_platform
platform = detect_platform(None, config)
manifest = ManifestManager(config, platform, new_file=True)
return {
"n_proc_execs": manifest.total_num_execs,
"n_in_proc_iters": manifest.get_total_in_proc_iters(config),
"skipped_keys": manifest.skipped_keys,
"non_skipped_keys": manifest.non_skipped_keys,
} | 5,328,167 |
def rotate(x, y, a):
"""Rotate vector (x, y) by an angle a."""
return x * np.cos(a) + y * np.sin(a), -x * np.sin(a) + y * np.cos(a) | 5,328,168 |
def cli(command, config_file):
"""command-line interface
Parameters
----------
command : string
One of {'prep', 'train', 'eval', 'predict', 'finetune', 'learncurve'}
config_file : str, Path
path to a config.toml file
"""
if command == 'prep':
prep(toml_path=config_file)
elif command == 'train':
train(toml_path=config_file)
elif command == 'eval':
eval(toml_path=config_file)
elif command == 'predict':
predict(toml_path=config_file)
elif command == 'learncurve':
learning_curve(toml_path=config_file)
elif command == 'finetune':
raise NotImplementedError
else:
raise ValueError(
f'command not recognized: {command}'
) | 5,328,169 |
def call_and_exit(command, shell=True):
"""Call a shell command and exit if error
"""
code = call(command, shell=shell)
if code != 0:
exit(1) | 5,328,170 |
def test_disarm_wrong_mimetype(client):
"""Should return 415 if JSON is not used"""
result = client.simulate_delete(
"/api/v0/alarms",
headers={
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": "Bearer 127d9a48-927a-43f3-a3e3-842f3f2b7393",
},
body="code=1234567890",
)
assert result.status_code == 415 | 5,328,171 |
def main(
name: str,
vm_params_dict: dict,
outdir: Path,
logger: Logger = qclogging.get_basic_logger(),
):
"""
vm_params_dict loaded from vm_params.yaml doesn't have all info plot_vm() needs.
This function gathers and works out the necessary input (except SRF-relevant info) to run this file as a stand-alone script
Parameters
----------
name : name of the fault/event
vm_params_dict : Dictionary extracted from vm_params.yaml
outdir :
logger :
"""
from rel2vm_params import (
get_vm_land_proportion,
corners2region,
NZ_CENTRE_LINE,
NZ_LAND_OUTLINE,
)
# vm_params_dict is the dictionary directly loaded from vm_params.yaml
with TemporaryDirectory(prefix=f"_tmp_{name}_", dir=outdir) as ptemp:
ptemp = Path(ptemp)
vm_params_dict["name"] = name
origin = (vm_params_dict["MODEL_LON"], vm_params_dict["MODEL_LAT"])
xlen = vm_params_dict["extent_x"]
ylen = vm_params_dict["extent_y"]
c1, c2, c3, c4 = geo.build_corners(
origin, vm_params_dict["MODEL_ROT"], xlen, ylen
)
vm_params_dict[
"path_mod"
] = "{:.6f}\t{:.6f}\n{:.6f}\t{:.6f}\n{:.6f}\t{:.6f}\n{:.6f}\t{:.6f}\n".format(
c1[0], c1[1], c2[0], c2[1], c3[0], c3[1], c4[0], c4[1]
)
vm_params_dict["path"] = vm_params_dict["path_mod"]
vm_params_dict["adjusted"] = False
vm_params_dict["xlen"] = xlen
vm_params_dict["ylen"] = ylen
vm_params_dict["land"] = get_vm_land_proportion(c1, c2, c3, c4)
vm0_region = corners2region(c1, c2, c3, c4)
plot_region = (
vm0_region[0] - 1,
vm0_region[1] + 1,
vm0_region[2] - 1,
vm0_region[3] + 1,
)
vm_params_dict["plot_region"] = plot_region
# plotting the domain of VM. No SRF
plot_vm(
vm_params_dict,
[],
NZ_LAND_OUTLINE,
NZ_CENTRE_LINE,
vm_params_dict["mag"],
outdir,
ptemp,
logger=logger,
) | 5,328,172 |
def filt_all(list_, func):
"""Like filter but reverse arguments and returns list"""
return [i for i in list_ if func(i)] | 5,328,173 |
def auth_user_logout(payload,
override_authdb_path=None,
raiseonfail=False,
config=None):
"""Logs out a user.
Deletes the session token from the session store. On the next request
(redirect from POST /auth/logout to GET /), the frontend will issue a new
one.
The frontend MUST unset the cookie as well.
Parameters
----------
payload : dict
The payload dict should contain the following keys:
- session_token: str
- user_id: int
In addition to these items received from an authnzerver client, the
payload must also include the following keys (usually added in by a
wrapping function):
- reqid: int or str
- pii_salt: str
override_authdb_path : str or None
The SQLAlchemy database URL to use if not using the default auth DB.
raiseonfail : bool
If True, and something goes wrong, this will raise an Exception instead
of returning normally with a failure condition.
config : SimpleNamespace object or None
An object containing systemwide config variables as attributes. This is
useful when the wrapping function needs to pass in some settings
directly from environment variables.
Returns
-------
dict
Returns a dict containing the result of the password verification check.
"""
for key in ('reqid', 'pii_salt'):
if key not in payload:
LOGGER.error(
"Missing %s in payload dict. Can't process this request." % key
)
return {
'success': False,
'failure_reason': (
"invalid request: missing '%s' in request" % key
),
'user_id': None,
'messages': ["Invalid user logout request."],
}
for key in ('session_token', 'user_id'):
if key not in payload:
LOGGER.error(
'[%s] Invalid user logout request, missing %s.' %
(payload['reqid'], key)
)
return {
'success': False,
'failure_reason': (
"invalid request: missing '%s' in request" % key
),
'messages': ["Invalid user logout request. "
"No %s provided." % key],
}
# check if the session token exists
session = auth_session_exists(
{'session_token': payload['session_token'],
'reqid': payload['reqid'],
'pii_salt': payload['pii_salt']},
override_authdb_path=override_authdb_path,
raiseonfail=raiseonfail)
if session['success']:
# check the user ID
if payload['user_id'] == session['session_info']['user_id']:
deleted = auth_session_delete(
{'session_token': payload['session_token'],
'reqid': payload['reqid'],
'pii_salt': payload['pii_salt']},
override_authdb_path=override_authdb_path,
raiseonfail=raiseonfail
)
if deleted['success']:
LOGGER.info(
"[%s] User logout request successful for "
"session_token: %s, user_id: %s. " %
(payload['reqid'],
pii_hash(payload['session_token'],
payload['pii_salt']),
pii_hash(payload['user_id'],
payload['pii_salt']))
)
return {
'success': True,
'user_id': session['session_info']['user_id'],
'messages': ["Logout successful."]
}
else:
LOGGER.error(
"[%s] User logout request failed for "
"session_token: %s, user_id: %s. "
"Invalid user_id provided for "
"corresponding session token." %
(payload['reqid'],
pii_hash(payload['session_token'],
payload['pii_salt']),
pii_hash(payload['user_id'],
payload['pii_salt']))
)
return {
'success': False,
'failure_reason': (
"delete session failed"
),
'user_id': payload['user_id'],
'messages': ["Logout failed. Invalid "
"session_token for user_id."]
}
else:
LOGGER.error(
"[%s] User logout request failed for "
"session_token: %s, user_id: %s. "
"Invalid user_id provided for "
"corresponding session token." %
(payload['reqid'],
pii_hash(payload['session_token'],
payload['pii_salt']),
pii_hash(payload['user_id'],
payload['pii_salt']))
)
return {
'success': False,
'failure_reason': (
"user does not exist"
),
'user_id': payload['user_id'],
'messages': [
"Logout failed. Invalid session_token for user_id."
]
}
else:
LOGGER.error(
"[%s] User logout request failed for "
"session_token: %s, user_id: %s. "
"Invalid user_id provided for "
"corresponding session token." %
(payload['reqid'],
pii_hash(payload['session_token'],
payload['pii_salt']),
pii_hash(payload['user_id'],
payload['pii_salt']))
)
return {
'success': False,
'failure_reason': (
"session does not exist"
),
'user_id': payload['user_id'],
'messages': ["Logout failed. Invalid "
"session_token for user_id."]
} | 5,328,174 |
def rx_observer(on_next: NextHandler, on_error: ErrorHandler = default_error, on_completed: CompleteHandler = default_on_completed) -> Observer:
"""Return an observer.
The underlying implementation use an named tuple.
Args:
on_next (NextHandler): on_next handler which process items
on_error (ErrorHandler): on_error handler (default with default_error
which raise Exception)
on_completed (CompleteHandler): on_completed handler (default with noop)
Returns:
(Observer): an Observer
"""
return ObserverDefinition(on_next=on_next, on_error=on_error, on_completed=on_completed) | 5,328,175 |
def _rav_setval_ ( self , value ) :
"""Assign the valeu for the variable
>>> var = ...
>>> var.value = 10
"""
value = float ( value )
self.setVal ( value )
return self.getVal() | 5,328,176 |
def set_annex_version(version):
"""Override the git-annex version.
This temporarily masks the git-annex version present in external_versions
and make AnnexRepo forget its cached version information.
"""
from datalad.support.annexrepo import AnnexRepo
ar_vers = AnnexRepo.git_annex_version
with patch.dict(
"datalad.support.annexrepo.external_versions._versions",
{"cmd:annex": version}):
try:
AnnexRepo.git_annex_version = None
yield
finally:
AnnexRepo.git_annex_version = ar_vers | 5,328,177 |
def import_matrix_as_anndata(matrix_path, barcodes_path, genes_path):
"""Import a matrix as an Anndata object.
:param matrix_path: path to the matrix ec file
:type matrix_path: str
:param barcodes_path: path to the barcodes txt file
:type barcodes_path: str
:param genes_path: path to the genes txt file
:type genes_path: str
:return: a new Anndata object
:rtype: anndata.Anndata
"""
df_barcodes = pd.read_csv(
barcodes_path, index_col=0, header=None, names=['barcode']
)
df_genes = pd.read_csv(
genes_path, header=None, index_col=0, names=['gene_id'], sep='\t'
)
return anndata.AnnData(
X=scipy.io.mmread(matrix_path).tocsr(), obs=df_barcodes, var=df_genes
) | 5,328,178 |
def get_representations(dataset, pretrained_model, alphabet, batch_size=128):
"""Returns: N x 1280 numpy array"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
pretrained_model = pretrained_model.to(device)
dataloader = DataLoader(
dataset, batch_size=batch_size, shuffle=False, collate_fn=dataset.collate_fn
)
batch_converter = alphabet.get_batch_converter()
sequence_representations = []
progress_bar = tqdm(dataloader, ascii=True)
for i, (tokens, labels, seqs) in enumerate(progress_bar):
esm_batch = list(zip(labels, seqs))
batch_labels, batch_strs, batch_tokens = batch_converter(esm_batch)
batch_tokens = batch_tokens.to(device)
with torch.no_grad():
results = pretrained_model(
batch_tokens, repr_layers=[33], return_contacts=True
)
token_representations = results["representations"][33]
outputs = token_representations[:, 0] # get the <cls> token
sequence_representations.append(outputs.cpu().numpy())
return np.vstack(sequence_representations) | 5,328,179 |
def test_init_empty_force_field(empty_force_field, attribute, value):
"""
Test that an empty force field has the expected attributes.
"""
assert getattr(empty_force_field, attribute) == value | 5,328,180 |
def rem_hap_cands():
"""json endpoint to set a sample or set of
sample's haplotype candidate designation to false"""
form = flask.request.form
samples = form['samples']
return mds.remove_hap_cands(samples) | 5,328,181 |
def construct_subdirExample(str_dirname):
"""
Método auxiliar para utilizarmos nosso exemplo.
Constrói um conjunto de Diretórios e arquivos
para serem testados.
DirOrigem/
|
├── dir01
│ ├── arq01.dat
│ ├── arq02.dat
│ ├── f001.txt
│ ├── f002.txt
│ └── f003.txt
└── dir02
├── a001.txt
├── a002.txt
└── f.zip
2 directories, 5 files
"""
dir_origem = Path('.') / Path(str_dirname)
subdir_01 = Path(str_dirname) / 'dir01'
subdir_02 = Path(str_dirname) / 'dir02'
# Vefifica se o diretorio existe
if not dir_origem.is_dir():
dir_origem.mkdir(parents=True, exist_ok=True)
if not subdir_01.is_dir():
subdir_01.mkdir(parents=True, exist_ok=True)
if not subdir_02.is_dir():
subdir_02.mkdir(parents=True, exist_ok=True)
path_file = Path(subdir_01, 'f001.txt')
if not path_file.is_file():
path_file.parent.mkdir(parents=True, exist_ok=True)
path_file.touch()
path_file = Path(subdir_01, 'f002.txt')
if not path_file.is_file():
path_file.parent.mkdir(parents=True, exist_ok=True)
path_file.touch()
path_file = Path(subdir_01, 'f003.txt')
if not path_file.is_file():
path_file.parent.mkdir(parents=True, exist_ok=True)
path_file.touch()
path_file = Path(subdir_01, 'arq01.dat')
if not path_file.is_file():
path_file.parent.mkdir(parents=True, exist_ok=True)
path_file.touch()
path_file = Path(subdir_01, 'arq02.dat')
if not path_file.is_file():
path_file.parent.mkdir(parents=True, exist_ok=True)
path_file.touch()
path_file = Path(subdir_02, 'a001.dat')
if not path_file.is_file():
path_file.parent.mkdir(parents=True, exist_ok=True)
path_file.touch()
path_file = Path(subdir_02, 'a002.dat')
if not path_file.is_file():
path_file.parent.mkdir(parents=True, exist_ok=True)
path_file.touch()
path_file = Path(subdir_02, 'f.zip')
if not path_file.is_file():
path_file.parent.mkdir(parents=True, exist_ok=True)
path_file.touch()
lst_files = []
# Loop pelos diretorios e arquivos
for item in dir_origem.glob('**/*'):
if item.is_file():
# Path Relativo + Nome do Arquivo
print(item)
# Nome do arquivo
print(item.name)
# Extensão do Arquivo
print(item.suffix)
# Path Absoluto (desde a raiz)
print(item.resolve())
# Tamanho do arquivo
print(item.stat().st_size)
# Armazena Path Relativo como String
lst_files.append(str(item))
if item.is_dir():
print(item.parent)
print(' CRIADO O DIRETÓRIO DE EXEMPLO COM SUCESSO! ')
return lst_files | 5,328,182 |
def geojson_to_df(in_geojson, encoding="utf-8", drop_geometry=True):
"""Converts a GeoJSON object to a pandas DataFrame.
Args:
in_geojson (str | dict): The input GeoJSON file or dict.
encoding (str, optional): The encoding of the GeoJSON object. Defaults to "utf-8".
drop_geometry (bool, optional): Whether to drop the geometry column. Defaults to True.
Raises:
FileNotFoundError: If the input GeoJSON file could not be found.
Returns:
pd.DataFrame: A pandas DataFrame containing the GeoJSON object.
"""
import json
import pandas as pd
from urllib.request import urlopen
if isinstance(in_geojson, str):
if in_geojson.startswith("http"):
with urlopen(in_geojson) as f:
data = json.load(f)
else:
in_geojson = os.path.abspath(in_geojson)
if not os.path.exists(in_geojson):
raise FileNotFoundError("The provided GeoJSON file could not be found.")
with open(in_geojson, encoding=encoding) as f:
data = json.load(f)
elif isinstance(in_geojson, dict):
data = in_geojson
df = pd.json_normalize(data["features"])
df.columns = [col.replace("properties.", "") for col in df.columns]
if drop_geometry:
df = df[df.columns.drop(list(df.filter(regex="geometry")))]
return df | 5,328,183 |
def UndistortImage(image,image_size,\
image_rotation=None,image_center=None,\
out_xs=None,out_ys=None,\
direction='fwd',regenerate_grids=True,\
**kwargs):
"""Remember the recipe for fixin gwyddion image orientation: `image0=image0.T[:,::-1]`"""
global grids
if out_xs is None: out_xs=default_out_xs
if out_ys is None: out_ys=default_out_ys
if grids is None or regenerate_grids:
s=source_pts[direction]; d=destination_pts[direction]
grids=numrec.AffineGridsFromFeaturePoints(d,[s],xs=out_xs,ys=out_ys)
in_Xs,in_Ys=getXYGrids(image.shape,image_size,\
rotation=image_rotation,center=image_center)
undistorted=numrec.InterpolateImageToAffineGrid(image,grid_pts=grids['grid_pts'][0],\
image_xgrid=in_Xs,image_ygrid=in_Ys,
**kwargs)
return AWA(undistorted,axes=[out_xs,out_ys]) | 5,328,184 |
def plot_scores(df, corpus, output_dir, eval):
"""Plots phone classification performance for each model in DataFrame. Plots F1 score by default"""
output_dir = Path(output_dir, corpus, eval)
os.makedirs(output_dir, exist_ok=True)
df_group = df.groupby("method")
for index, group in df_group:
print(f"Plotting results for {index}")
f = sns.lineplot(data=group, x="layer", y="f1", hue="classifier", ci=None).set_title(f"Phone-classification {eval} scores for {index} on {group['train'].to_list()[0]}")
f.figure.savefig(str(output_dir) + f"/{index}_{eval}.png")
f.figure.clf()
print(f"Plots stored at {output_dir}") | 5,328,185 |
async def handle(output_cfg: dict, queue: asyncio.Queue, _metrics: statistics.Statistics, start_shutdown: asyncio.Event):
"""Connect to rabbit and push the messages from the queue"""
rabbitmq = RabbitMQ(output_cfg=output_cfg)
clogger.info("Output handler: rabbitmq: Enabled")
while not start_shutdown.is_set():
try:
tapmsg = await asyncio.wait_for(queue.get(), timeout=0.5)
except asyncio.TimeoutError:
continue
msg = transform.convert_dnstap(fmt=output_cfg["format"], tapmsg=tapmsg)
rabbitmq.publish(msg)
queue.task_done()
# tell producer to shut down
clogger.info("Output handler: rabbitmq: Triggering producer shutdown")
rabbitmq.close_connection() | 5,328,186 |
def quartic_oscillator(grids, k=1.):
"""Potential of quantum quartic oscillator.
Args:
grids: numpy array of grid points for evaluating 1d potential.
(num_grids,)
k: strength constant for potential.
Returns:
vp: Potential on grid.
(num_grid,)
"""
vp = 0.5 * k * grids ** 4
return vp | 5,328,187 |
def compute_row_similarities(A):
"""
Compute pairwise similarities between the rows of a binary sparse matrix.
Parameters
----------
A: scipy csr_matrix, shape (rows, cols)
Binary matrix.
Returns
-------
sim: numpy array, shape (rows, rows)
Pairwise column similarities.
"""
# normalize A in row-axis
# 1) compute per-row norm
norm = np.sqrt(A.sum(axis=1)) # Y is binary: \sum 1^2 = \sum 1
norm = sparse.csr_matrix(norm) # save as sparse
# 2) build left-multiplying norm (https://stackoverflow.com/questions/16043299/substitute-for-numpy-broadcasting-using-scipy-sparse-csc-matrix)
# summary: sparse arrays don't broadcast and something like
# np.where(norm[:, na]==0., 0., A/norm[:, na]) wouldn't work
# we need to use the left-multiplying trick to achieve that
data = 1. / norm.data
indices = np.where(np.diff(norm.indptr) != 0)[0]
indptr = norm.indptr
rows = A.shape[0]
left_norm = sparse.csr_matrix((data, indices, indptr), shape=(rows, rows))
# 3) compute row-wise normalized version of A
A_norm = left_norm.dot(A)
# compute pairwise row similarities
sim = A_norm.dot(A_norm.T)
return sim | 5,328,188 |
def sha9(R, S):
"""Shape functions for a 4-noded quad element
Parameters
----------
x : float
x coordinate for a point within the element.
y : float
y coordinate for a point within the element.
Returns
-------
N : Numpy array
Array of interpolation functions.
Examples
--------
We can check evaluating at two different points, namely (0, 0) and
(1, 1). Thus
>>> N = sha9(0, 0)
>>> N_ex = np.array([
... [1/4, 0, 1/4, 0, 1/4, 0, 1/4, 0],
... [0, 1/4, 0, 1/4, 0, 1/4, 0, 1/4]])
>>> np.allclose(N, N_ex)
True
and
>>> N = sha9(1, 1)
>>> N_ex = np.array([
... [0, 0, 0, 0, 1, 0, 0, 0],
... [0, 0, 0, 0, 0, 1, 0, 0]])
>>> np.allclose(N, N_ex)
True
"""
N = np.zeros((2, 18))
SN = np.zeros((9))
ONE = 1.0
QUART = 0.25
HALF = 0.5
RP =ONE+R
RM =ONE-R
RMS=ONE-R*R
SP =ONE+S
SM =ONE-S
SMS=ONE-S*S
#
SN[8]=RMS*SMS
SN[7]=HALF*SMS*RM-HALF*SN[8]
SN[6]=HALF*RMS*SP-HALF*SN[8]
SN[5]=HALF*SMS*RP-HALF*SN[8]
SN[4]=HALF*RMS*SM-HALF*SN[8]
SN[0]=QUART*RM*SM-HALF*SN[7]-HALF*SN[4]-QUART*SN[8]
SN[1]=QUART*RP*SM-HALF*SN[5]-HALF*SN[4]-QUART*SN[8]
SN[2]=QUART*RP*SP-HALF*SN[5]-HALF*SN[6]-QUART*SN[8]
SN[3]=QUART*RM*SP-HALF*SN[7]-HALF*SN[6]-QUART*SN[8]
#
N[0, ::2] = SN
N[1, 1::2] = SN
#
return N | 5,328,189 |
def padding(seq, size, mode):
"""
Parameters
----------
seq: np.array
The sequence to be padded.
mode: str
Select padding mode among {"zero", "repeat"}.
Returns
-------
seq: np.ndarray
"""
if mode == "zero":
seq = np.array(trimmer(seq, size, filler=0))
elif mode == "repeat":
seq = np.array(repeat_padding(seq, size))
return seq | 5,328,190 |
def get_dense_span_ends_from_starts(dense_span_starts,
dense_span_ends):
"""For every mention start positions finds the corresponding end position."""
seq_len = tf.shape(dense_span_starts)[0]
start_pos = tf.cast(tf.where(tf.equal(dense_span_starts, 1)), tf.int32)
end_pos = tf.cast(
tf.squeeze(tf.where(tf.equal(dense_span_ends, 1)), 1), tf.int32)
dense_span_ends_from_starts = tf.zeros(seq_len, dtype=tf.int32)
dense_span_ends_from_starts = tf.tensor_scatter_nd_add(
dense_span_ends_from_starts, start_pos, end_pos)
return dense_span_ends_from_starts | 5,328,191 |
def test_put_correct_existing(test_client, test_cases_positive, test_headers):
"""
GIVEN a Flask application configured for testing
WHEN the '/hello/<username>' page is requested (PUT) and there is such user already exists
THEN check that the response is valid and user updated
"""
data = {"dateOfBirth": "1995-04-08"}
user = test_cases_positive[3]['username']
response = test_client.put(f'/hello/{user}', data=json.dumps(data), headers=test_headers)
assert response.status_code == 204 | 5,328,192 |
def bb_to_plt_plot(x, y, w, h):
""" Converts a bounding box to parameters
for a plt.plot([..], [..])
for actual plotting with pyplot
"""
X = [x, x, x+w, x+w, x]
Y = [y, y+h, y+h, y, y]
return X, Y | 5,328,193 |
def handler404(request, *args):
"""
Renders 404 page.
:param request: the request object used
:type request: HttpRequest
"""
return render(request, '404.html', status=404) | 5,328,194 |
def test_regression():
"""Test a regression model saved with joblib"""
model_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'model-lr.pkl'))
# Load the model
if skversion > '0.23':
with raises(ValueError):
ScikitLearnModel.create_model(model_path, n_input_columns=2, serialization_method='joblib',
classes=np.array(['number']))
else:
model_info = ScikitLearnModel.create_model(model_path, n_input_columns=2,
serialization_method='joblib',
classes=np.array(['number']))
# Check that the metadata is as expected
assert model_info.servable.methods["run"].method_details["method_name"] == "predict"
assert model_info.list_files() == [model_path]
assert model_info.servable.options["classes"] == ["number"]
assert model_info.servable.methods["run"].output.shape == [None] | 5,328,195 |
def merge_dimensions(z, axis, sizes):
"""Merge dimensions of a tensor into one dimension. This operation is the opposite
of :func:`split_dimension`.
Args:
z (tensor): Tensor to merge.
axis (int): Axis to merge into.
sizes (iterable[int]): Sizes of dimensions to merge.
Returns:
tensor: Reshaped version of `z`.
"""
shape = B.shape(z)
# The indexing below will only be correct for positive `axis`, so resolve the index.
axis = resolve_axis(z, axis)
return B.reshape(
z,
*shape[: axis - len(sizes) + 1],
np.prod(sizes),
*shape[axis + 1 :],
) | 5,328,196 |
def get_assignment_map_replaced(init_ckpt,
name_replacement_dict={},
list_vars=None):
""" name_replacement_dict = { old_name_str_chunk: new_name_str_chunk }
"""
if list_vars is None:
list_vars = tf.global_variables()
#
name_to_variable = collections.OrderedDict()
for var in list_vars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
#
#
ckpt_vars = tf.train.list_variables(init_ckpt)
#
assignment_map = collections.OrderedDict()
for x in ckpt_vars:
(name, var) = (x[0], x[1])
#
for k, v in name_replacement_dict.items():
if k in name:
name_new = name.replace(k, v)
break
else:
continue
#
if name_new not in name_to_variable:
continue
#
assignment_map[name] = name_new
print("name_old: %s" % name)
print("name_new: %s" % name_new)
#
return assignment_map | 5,328,197 |
def RULE110():
"""RULE 110 celular automata node.
.. code::
000 : 0
001 : 1
010 : 1
011 : 1
100 : 0
101 : 1
110 : 1
111 : 0
"""
return BooleanNode.from_output_list(outputs=[0,1,1,1,0,1,1,0], name="RULE 110") | 5,328,198 |
def _log_request(rpc_state, request):
"""
Writes a message with request into debug logs
:param rpc_state: An id of request
:param request: A received response
:return: None
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug("%s: request = { %s }", rpc_state, _message_to_string(request)) | 5,328,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.