content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def main(*args: Tuple[str, ...]):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
:param args: command line arguments
"""
app = ArticleEditor(*args)
app.run()
| 14,100
|
def parse_field_pubblicazione(field):
"""
Extracts year, place and publisher from the field `pubblicazione` by applying a cascade of regexps.
"""
exp2 = r'^(?P<place>\D+)(?:\s?\W\s?)(?P<publisher>.*?)\D{1}?(?P<year>\d+)?$'
exp1 = r'^(?P<place>.*?)(?::)(?P<publisher>.*?)\D{1}?(?P<year>\d+)?$'
exp3 = r'(?:.*?)?(?P<year>\d{4})'
exp4 = r'^(?P<place>\D{3,})$'
not_matched = 0
partly_matched = 0
result = {}
result1 = re.match(exp1,field)
if(result1 is None):
result2 = re.match(exp2,field)
if(result2 is None):
result3 = re.match(exp3,field)
if(result3 is None):
result4 = re.match(exp4,field)
if(result4 is None):
not_matched += 1
else:
result = result4.groupdict()
else:
result = result3.groupdict()
else:
result = result2.groupdict()
else:
result = result1.groupdict()
return result
| 14,101
|
def test_change_node_prop_DNE(loaded_lpg):
"""Test that we get error if we try to change a property that DNE."""
loaded_lpg.add_node_props('Charlie', kidneys=1)
with pytest.raises(AttributeError):
loaded_lpg.change_node_prop('Charlie', 'horn', 1)
| 14,102
|
def test_invalid_landscape():
"""Invalid landscape type must raise error"""
with pytest.raises(ValueError):
BioSim(island_map="WWW\nWRW\nWWW", ini_pop=[], seed=1, vis_years=0)
| 14,103
|
def docker_login(ip: str) -> None:
"""
Login-in to docker on the server to avoid docker rate limit quota violation
Args:
ip: The ip of the server that should be logged in
"""
docker_username = os.environ.get('DOCKERHUB_USER')
docker_password = os.environ.get('DOCKERHUB_PASSWORD')
container_engine_type = 'podman' if is_redhat_instance(ip) else 'docker'
try:
check_output(
f'ssh {SSH_USER}@{ip} cd /home/demisto && sudo -u demisto {container_engine_type} '
f'login --username {docker_username} --password-stdin'.split(),
input=docker_password.encode())
except Exception:
logging.exception(f'Could not login to {container_engine_type} on server {ip}')
| 14,104
|
def test_api_calendar():
"""Return a test calendar object used in API responses."""
return TEST_API_CALENDAR
| 14,105
|
def apply_custom_colormap(image_gray, cmap=plt.get_cmap("seismic")):
"""
Implementation of applyColorMap in OpenCV using colormaps in Matplotlib.
"""
assert image_gray.dtype == np.uint8, "must be np.uint8 image"
if image_gray.ndim == 3:
image_gray = image_gray.squeeze(-1)
# Initialize the matplotlib color map
sm = plt.cm.ScalarMappable(cmap=cmap)
# Obtain linear color range
color_range = sm.to_rgba(np.linspace(0, 1, 256))[:, 0:3] # color range RGBA => RGB
color_range = (color_range * 255.0).astype(np.uint8) # [0,1] => [0,255]
color_range = np.squeeze(
np.dstack([color_range[:, 2], color_range[:, 1], color_range[:, 0]]), 0
) # RGB => BGR
# Apply colormap for each channel individually
channels = [cv2.LUT(image_gray, color_range[:, i]) for i in range(3)]
return np.dstack(channels)
| 14,106
|
def _cond_with_per_branch_args(pred,
true_operand, true_fun: Callable,
false_operand, false_fun: Callable):
"""Conditionally apply ``true_fun`` or ``false_fun``.
Has equivalent semantics to this Python implementation::
def cond(pred, true_operand, true_fun, false_operand, false_fun):
if pred:
return true_fun(true_operand)
else:
return false_fun(false_operand)
Pred has to be a scalar type, collection types (list, tuple) are not supported
"""
if not (callable(true_fun) and callable(false_fun)):
raise TypeError("lax.cond: true_fun and false_fun arguments should be callable.")
return _cond(pred,
lambda op: true_fun(op[0]),
lambda op: false_fun(op[1]),
(true_operand, false_operand))
| 14,107
|
def msgSet(key, notUsed, queryString, body):
"""no treatment on the body (we send exactly the body like we received it)"""
dict = urllib.parse.parse_qs(body.decode('utf-8'))
#sendSMS.writeRawMsg(body)
user = dict['user'][0]
print(dict)
sendSMS.writeMsgUser(dict['msg'][0], user)
return "Message sent to " + user
| 14,108
|
def example_2():
"""Example with hierarchical basis functions"""
#############
# settings: #
#############
basis = Hat # Hat or, Gauss
level = 1 # highest level of basis functions
eval_num = 60 # number of function evaluations
# evaluation coordinates
input = torch.linspace(0, 1, eval_num)
# function evaluations
target = torch.linspace(0, 1, eval_num).apply_(f)
#############
# compute number of basis functions
bf_num = 2**(level + 1) - 1
# create 1D basis with hierarchical basis functions
bf = basis.hierarchical(level)
# create model
model = Model(bf, bf_num)
# create solver
solver = Solver(model, input, target)
# solve linear equation / least squares
solver.le()
# plot
plot(model, f, name='Example 2')
| 14,109
|
def _strip_build_number(api_version):
"""Removes the build number component from a full api version string."""
match = re.match(r"^([A-Z]+-)?([0-9]+)(\.[0-9]+){2}$", api_version)
if match:
return api_version[:match.start(3)]
# if there aren't exactly 3 version number components, just leave it unchanged
return api_version
| 14,110
|
def get_me():
"""サインインしている自分自身の情報を取得"""
jia_user_id = get_user_id_from_session()
return {"jia_user_id": jia_user_id}
| 14,111
|
def dec_file(name, out=None, **kwargs):
"""
This is a helper function to decrypt a file and return its contents.
You can provide an optional output file using `out`
`name` can be a local file or when not using `salt-run` can be a url like `salt://`, `https://` etc.
CLI Examples:
.. code-block:: bash
salt-run nacl.dec_file name=/tmp/id_rsa.nacl
salt-call nacl.dec_file name=salt://crt/mycert.nacl out=/tmp/id_rsa
salt-run nacl.dec_file name=/tmp/id_rsa.nacl box_type=secretbox \
sk_file=/etc/salt/pki/master/nacl.pub
"""
kwargs["opts"] = __opts__
return salt.utils.nacl.dec_file(name, out, **kwargs)
| 14,112
|
async def conversation_steps(month: int = Query(default=1, ge=2, le=6), current_user: User = Depends(Authentication.get_current_user_and_bot)):
"""
Fetches the number of conversation steps that took place in the chat between the users and the agent
"""
return Utility.trigger_history_server_request(
current_user.get_bot(),
f'/api/history/{current_user.get_bot()}/metrics/conversation/steps',
{'month': month}
)
| 14,113
|
def off(app: str) -> dict:
"""
Switches the app offline, if it isn't already.
:param app: The name of the Heroku app in which you want formation
:return: dictionary containing information about the app
"""
return Herokron(app).off()
| 14,114
|
def put_pets_pet_id(
pet_id: str = Path(..., alias='petId'), body: PetForm = None
) -> None:
"""
update a pet
"""
pass
| 14,115
|
def test_invalid_getter_and_setter(
assert_errors,
parse_ast_tree,
default_options,
code,
mode,
):
"""Testing that wrong use of getter/setter is prohibited."""
tree = parse_ast_tree(mode(code))
visitor = WrongClassBodyVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [
UnpythonicGetterSetterViolation,
UnpythonicGetterSetterViolation,
])
| 14,116
|
def parse_csv_from_response(response):
"""
Convenience function for working with CSV responses.
Parses the CSV rows and returns a list of dicts, using the
keys as columns
"""
file_from_string = io.StringIO(response.content.decode("utf-8"))
parsed_rows = []
reader = csv.DictReader(file_from_string)
for row in reader:
logger.debug(row)
parsed_rows.append(row)
return parsed_rows
| 14,117
|
def log_handler(handler: Any) -> None:
"""Override tornado's logging."""
# log only errors (status >= 500)
if handler.get_status() >= 500:
access_log.error(
'{} {}'.format(handler.get_status(), handler._request_summary())
)
| 14,118
|
def count_weekday(start, stop, wd_target=0):
"""
Returns the number of days between start and stop inclusive which is the
first day of the month and is the specified weekday, with 0 being Monday.
"""
counter = 0
while start != stop + timedelta(days=1):
if start.weekday() == wd_target and start.day == 1:
counter += 1
start += timedelta(days=1)
return counter
| 14,119
|
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i+order])
ngram_counts[ngram] += 1
return ngram_counts
| 14,120
|
def update_template_resource(template, resource, bucket_name, bucket_dir,
resource_param='CodeUri',
s3_object=None, s3_uri=None):
"""Legacy -> Will be moved to the SAMTemplate class."""
if s3_object:
s3_uri = f's3://{bucket_name}/{bucket_dir}/{s3_object}'
template['Resources'][resource]['Properties'][resource_param] = s3_uri
| 14,121
|
def test_second_generator() -> None:
"""
important remarks on secp256-zkp prefix for
compressed encoding of the second generator:
https://github.com/garyyu/rust-secp256k1-zkp/wiki/Pedersen-Commitment
"""
H = (
0x50929B74C1A04954B78B4B6035E97A5E078A5A0F28EC96D547BFEE9ACE803AC0,
0x31D3C6863973926E049E637CB1B5F40A36DAC28AF1766968C30C2313F3A38904,
)
assert H == pedersen.second_generator(secp256k1, sha256)
_ = pedersen.second_generator(secp256r1, sha256)
_ = pedersen.second_generator(secp384r1, sha384)
| 14,122
|
def pkg_topics_list(data_dict):
"""
Get a list of topics
"""
pkg = model.Package.get(data_dict['id'])
vocabulary = model.Vocabulary.get('Topics')
topics = []
if vocabulary:
topics = pkg.get_tags(vocab=vocabulary)
return topics
| 14,123
|
def get_adrill_cdbs(adrill_user_cfg, adrill_shared_cfg=None):
"""Return the names and locatinos of all user defined MSC Adams Drill databases (cdbs)
Parameters
----------
adrill_user_cfg : str
Full path to an Adams Drill user configuration file. This hould be in the users HOME directory.
adrill_shared_cfg : str
Full path to an Adams Drill shared configuration file. This should be in the Adams Drill installation directory. (the default is None, which means that only user cdbs will be returned.)
Returns
-------
dict
A dictionary in which the cdb names are keys and the cdb locations are values.
"""
cdbs = {}
with open(adrill_user_cfg,'r') as fid:
for line in fid:
if line.startswith('DATABASE'):
# try:
cdb_name = re.split('[\t ]+',line.lstrip())[1]
cdb_loc = thornpy.utilities.convert_path(re.split('[\t ]+', line, maxsplit=2)[-1].replace('\n','').replace('$HOME', os.path.expanduser('~')))
cdbs[cdb_name] = cdb_loc
# except:
# raise cdbError('The following line in {} could not be interpreted.\n\n{}'.format(adrill_user_cfg,line))
if adrill_shared_cfg:
top_dir = os.path.split(os.path.split(adrill_shared_cfg)[0])[0]
with open(adrill_shared_cfg,'r') as fid:
for line in fid:
if line.startswith('DATABASE'):
# try:
cdb_name = re.split('[\t ]+', line, maxsplit=2)[1]
cdb_loc = thornpy.utilities.convert_path(re.split('[\t ]+', line, maxsplit=2)[-1].replace('\n','').replace('$HOME', os.path.expanduser('~')).replace('$topdir', top_dir))
cdbs[cdb_name] = cdb_loc
# except:
# raise cdbError('The following line in {} could not be interpreted.\n\n{}'.format(adrill_shared_cfg,line))
return cdbs
| 14,124
|
def copy_assets(app, exception):
""" Copy asset files to the output """
if "getLogger" in dir(logging):
log = logging.getLogger(__name__).info # pylint: disable=no-member
warn = logging.getLogger(__name__).warning # pylint: disable=no-member
else:
log = app.info
warn = app.warning
builders = get_compatible_builders(app)
if exception:
return
if app.builder.name not in builders:
if not app.config["sphinx_tabs_nowarn"]:
warn(
"Not copying tabs assets! Not compatible with %s builder"
% app.builder.name
)
return
log("Copying tabs assets")
installdir = Path(app.builder.outdir) / "_static" / "sphinx_tabs"
for path in FILES:
source = resource_filename("sphinx_tabs", path)
dest = installdir / path
destdir = dest.parent
if not destdir.exists():
destdir.mkdir(parents=True)
copyfile(source, dest)
| 14,125
|
def five_five(n):
"""
This checks if n is a power of 2 (or 0).
This is because the only way that n and (n-1) have none of the same bits (the
& check) is when n is a power of 2, or 0.
"""
return ((n & (n-1)) == 0)
| 14,126
|
async def send_message(user_id: int,
text: str,
buttons: Optional[list[dict[str, str]]] = None,
disable_notification: bool = False) -> bool:
"""
Safe messages sender
:param user_id:
:param text:
:param buttons: List of inline buttons in format [{'text': 'text', 'callback_data': 'callback_data', **kwargs}].
A button can have all the same keys that InlineKeyboardButton() take
:param disable_notification:
:return:
"""
from main import bot
try:
await bot.send_message(user_id, text, reply_markup=InlineKeyboardMarkup(
row_width=2,
resize_keyboard=True,
one_time_keyboard=True, ).add(
*[InlineKeyboardButton(**button) for button in buttons])
if buttons else None,
disable_notification=disable_notification)
log.info(f"Sent message to target [ID:{user_id}]")
except exceptions.BotBlocked:
log.error(f"Target [ID:{user_id}]: blocked by user")
except exceptions.ChatNotFound:
log.error(f"Target [ID:{user_id}]: invalid user ID")
except exceptions.RetryAfter as e:
log.error(f"Target [ID:{user_id}]: Flood limit is exceeded. Sleep {e.timeout} seconds.")
await asyncio.sleep(e.timeout)
return await send_message(user_id, text, buttons) # Recursive call
except exceptions.UserDeactivated:
log.error(f"Target [ID:{user_id}]: user is deactivated")
except exceptions.TelegramAPIError:
log.exception(f"Target [ID:{user_id}]: failed")
else:
log.info(f"Target [ID:{user_id}]: success")
return True
return False
| 14,127
|
def convert_bytes_to_size(some_bytes):
"""
Convert number of bytes to appropriate form for display.
:param some_bytes: A string or integer
:return: A string
"""
some_bytes = int(some_bytes)
suffix_dict = {
'0': 'B',
'1': 'KiB',
'2': 'MiB',
'3': 'GiB',
'4': 'TiB',
'5': 'PiB'
}
counter = 0
while some_bytes > 1 and counter <= 5:
tmp = some_bytes / 1024
if tmp < 1:
break
else:
some_bytes = tmp
counter += 1
return str(format(some_bytes, '.2f')) + ' ' + str(suffix_dict[str(counter)])
| 14,128
|
def create_bbregister_func_to_anat(fieldmap_distortion=False,
name='bbregister_func_to_anat'):
"""
Registers a functional scan in native space to structural. This is meant to be used
after create_nonlinear_register() has been run and relies on some of it's outputs.
Parameters
----------
fieldmap_distortion : bool, optional
If field map-based distortion correction is being run, FLIRT should
take in the appropriate field map-related inputs.
name : string, optional
Name of the workflow.
Returns
-------
register_func_to_anat : nipype.pipeline.engine.Workflow
Notes
-----
Workflow Inputs::
inputspec.func : string (nifti file)
Input functional scan to be registered to MNI space
inputspec.anat_skull : string (nifti file)
Corresponding full-head scan of subject
inputspec.linear_reg_matrix : string (mat file)
Affine matrix from linear functional to anatomical registration
inputspec.anat_wm_segmentation : string (nifti file)
White matter segmentation probability mask in anatomical space
inputspec.bbr_schedule : string (.sch file)
Boundary based registration schedule file for flirt command
Workflow Outputs::
outputspec.func_to_anat_linear_xfm : string (mat file)
Affine transformation from functional to anatomical native space
outputspec.anat_func : string (nifti file)
Functional data in anatomical space
"""
register_bbregister_func_to_anat = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['func',
'anat_skull',
'linear_reg_matrix',
'anat_wm_segmentation',
'bbr_schedule',
'fieldmap',
'fieldmapmask'
]),
name='inputspec')
inputNode_echospacing = pe.Node(
util.IdentityInterface(fields=['echospacing']),
name='echospacing_input')
inputNode_pedir = pe.Node(util.IdentityInterface(fields=['pedir']),
name='pedir_input')
outputspec = pe.Node(util.IdentityInterface(fields=['func_to_anat_linear_xfm',
'anat_func']),
name='outputspec')
wm_bb_mask = pe.Node(interface=fsl.ImageMaths(),
name='wm_bb_mask')
wm_bb_mask.inputs.op_string = '-thr 0.5 -bin'
register_bbregister_func_to_anat.connect(inputspec, 'anat_wm_segmentation',
wm_bb_mask, 'in_file')
def bbreg_args(bbreg_target):
return '-cost bbr -wmseg ' + bbreg_target
bbreg_func_to_anat = pe.Node(interface=fsl.FLIRT(),
name='bbreg_func_to_anat')
bbreg_func_to_anat.inputs.dof = 6
register_bbregister_func_to_anat.connect(inputspec, 'bbr_schedule',
bbreg_func_to_anat, 'schedule')
register_bbregister_func_to_anat.connect(wm_bb_mask, ('out_file', bbreg_args),
bbreg_func_to_anat, 'args')
register_bbregister_func_to_anat.connect(inputspec, 'func',
bbreg_func_to_anat, 'in_file')
register_bbregister_func_to_anat.connect(inputspec, 'anat_skull',
bbreg_func_to_anat, 'reference')
register_bbregister_func_to_anat.connect(inputspec, 'linear_reg_matrix',
bbreg_func_to_anat, 'in_matrix_file')
if fieldmap_distortion:
def convert_pedir(pedir):
# FSL Flirt requires pedir input encoded as an int
conv_dct = {'x': 1, 'y': 2, 'z': 3, '-x': -1, '-y': -2, '-z': -3}
if not isinstance(pedir, str):
raise Exception("\n\nPhase-encoding direction must be a "
"string value.\n\n")
if pedir not in conv_dct.keys():
raise Exception("\n\nInvalid phase-encoding direction "
"entered: {0}\n\n".format(pedir))
return conv_dct[pedir]
register_bbregister_func_to_anat.connect(inputNode_pedir, ('pedir', convert_pedir),
bbreg_func_to_anat, 'pedir')
register_bbregister_func_to_anat.connect(inputspec, 'fieldmap',
bbreg_func_to_anat, 'fieldmap')
register_bbregister_func_to_anat.connect(inputspec, 'fieldmapmask',
bbreg_func_to_anat, 'fieldmapmask')
register_bbregister_func_to_anat.connect(inputNode_echospacing, 'echospacing',
bbreg_func_to_anat, 'echospacing')
register_bbregister_func_to_anat.connect(bbreg_func_to_anat, 'out_matrix_file',
outputspec, 'func_to_anat_linear_xfm')
register_bbregister_func_to_anat.connect(bbreg_func_to_anat, 'out_file',
outputspec, 'anat_func')
return register_bbregister_func_to_anat
| 14,129
|
def pointShiftFromRange(dataSize, x = all, y = all, z = all, **args):
"""Calculate shift of points given a specific range restriction
Arguments:
dataSize (str): data size of the full image
x,y,z (tuples or all): range specifications
Returns:
tuple: shift of points from original origin of data to origin of range reduced data
"""
if isinstance(dataSize, str):
dataSize = self.dataSize(dataSize)
dataSize = list(dataSize)
d = len(dataSize)
rr = []
if d > 0:
rr.append(toDataRange(dataSize[0], r = x))
if d > 1:
rr.append(toDataRange(dataSize[1], r = y))
if d > 2:
rr.append(toDataRange(dataSize[2], r = z))
if d > 3 or d < 1:
raise RuntimeError('shiftFromRange: dimension %d to big' % d)
return [r[0] for r in rr]
| 14,130
|
def callparser():
"""Parses a group of expressions."""
def cull_seps(tokens):
return tokens[0] or tokens[1]
return RepeatParser(exprparser() + OptionParser(dlmparser(',')) ^ cull_seps)
| 14,131
|
def write_section(section_name, section, keys, writer) -> bool:
"""
Saves the specified section to the specified writer starting at the current
point in the writer. It will not throw an exception. On error (IO exception
or not being able to write the section) it will return false. WARNING: It can
not scan the destination to see if this section has already been written, so
typically this method is called when writing out an entire configuration with
multiple sections in sequence.
Returns True on success and False on failure.
"""
keys = keys if keys else section.keys()
ret = False
# OBSOLETE with io.TextIOWrapper(writer) as writer2:
try:
writer.write(section_name + ":\n")
for k in keys:
val = section.get(k)
if val:
output = " " + k + _COLONSPACE + val + "\n"
writer.write(output)
ret = True
except OSError as err:
_printerr(err) # Just return false
return ret
| 14,132
|
def _rotation_270(image):
"""Rotate an image with 270 degrees (clockwise).
Parameters
----------
image : np.ndarray
Image to rotate with shape (y, x, channels).
Returns
-------
image_rotated : np.ndarray
Image rotated with shape (y, x, channels).
"""
image_rotated = _flip_v(image)
image_rotated = _transpose(image_rotated)
return image_rotated
| 14,133
|
def classification_id_for_objs(object_id: str, url: str, token: str):
"""
Get classification id for a given object
Arguments
----------
object_id : str
Object id to get classification id for
url : str
Skyportal url
token : str
Skyportal token
Returns
----------
status_code : int
HTTP status code
data : list
List of classification ids and their author ids
"""
classifications = api(
"GET",
f"{url}/api/sources/{object_id}/classifications",
token=token,
)
data = {}
if classifications.status_code == 200:
data = {
"id": classifications.json()["data"][0]["id"],
"author_id": classifications.json()["data"][0]["author_id"],
}
return classifications.status_code, data
| 14,134
|
def ciede2000(Lab_1, Lab_2):
"""Calculates CIEDE2000 color distance between two CIE L*a*b* colors."""
C_25_7 = 6103515625 # 25**7
L1, a1, b1 = Lab_1[0], Lab_1[1], Lab_1[2]
L2, a2, b2 = Lab_2[0], Lab_2[1], Lab_2[2]
C1 = math.sqrt(a1**2 + b1**2)
C2 = math.sqrt(a2**2 + b2**2)
C_ave = (C1 + C2) / 2
G = 0.5 * (1 - math.sqrt(C_ave**7 / (C_ave**7 + C_25_7)))
L1_, L2_ = L1, L2
a1_, a2_ = (1 + G) * a1, (1 + G) * a2
b1_, b2_ = b1, b2
C1_ = math.sqrt(a1_**2 + b1_**2)
C2_ = math.sqrt(a2_**2 + b2_**2)
if b1_ == 0 and a1_ == 0: h1_ = 0
elif a1_ >= 0: h1_ = math.atan2(b1_, a1_)
else: h1_ = math.atan2(b1_, a1_) + 2 * math.pi
if b2_ == 0 and a2_ == 0: h2_ = 0
elif a2_ >= 0: h2_ = math.atan2(b2_, a2_)
else: h2_ = math.atan2(b2_, a2_) + 2 * math.pi
dL_ = L2_ - L1_
dC_ = C2_ - C1_
dh_ = h2_ - h1_
if C1_ * C2_ == 0: dh_ = 0
elif dh_ > math.pi: dh_ -= 2 * math.pi
elif dh_ < -math.pi: dh_ += 2 * math.pi
dH_ = 2 * math.sqrt(C1_ * C2_) * math.sin(dh_ / 2)
L_ave = (L1_ + L2_) / 2
C_ave = (C1_ + C2_) / 2
_dh = abs(h1_ - h2_)
_sh = h1_ + h2_
C1C2 = C1_ * C2_
if _dh <= math.pi and C1C2 != 0: h_ave = (h1_ + h2_) / 2
elif _dh > math.pi and _sh < 2 * math.pi and C1C2 != 0: h_ave = (h1_ + h2_) / 2 + math.pi
elif _dh > math.pi and _sh >= 2 * math.pi and C1C2 != 0: h_ave = (h1_ + h2_) / 2 - math.pi
else: h_ave = h1_ + h2_
T = 1 - 0.17 * math.cos(h_ave - math.pi / 6) + 0.24 * math.cos(2 * h_ave) + 0.32 * math.cos(3 * h_ave + math.pi / 30) - 0.2 * math.cos(4 * h_ave - 63 * math.pi / 180)
h_ave_deg = h_ave * 180 / math.pi
if h_ave_deg < 0: h_ave_deg += 360
elif h_ave_deg > 360: h_ave_deg -= 360
dTheta = 30 * math.exp(-(((h_ave_deg - 275) / 25)**2))
R_C = 2 * math.sqrt(C_ave**7 / (C_ave**7 + C_25_7))
S_C = 1 + 0.045 * C_ave
S_H = 1 + 0.015 * C_ave * T
Lm50s = (L_ave - 50)**2
S_L = 1 + 0.015 * Lm50s / math.sqrt(20 + Lm50s)
R_T = -math.sin(dTheta * math.pi / 90) * R_C
k_L, k_C, k_H = 1, 1, 1
f_L = dL_ / k_L / S_L
f_C = dC_ / k_C / S_C
f_H = dH_ / k_H / S_H
dE_00 = math.sqrt(f_L**2 + f_C**2 + f_H**2 + R_T * f_C * f_H)
return dE_00
| 14,135
|
def ifft2c_new(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The IFFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
| 14,136
|
def _make_options(context, base):
"""Return pyld options for given context and base."""
options = {}
if context is None:
context = default_context()
options['expandContext'] = context
if base is not None:
options['base'] = base
return options
| 14,137
|
def datatable(table_config: DatatableConfig, table_id: str, class_name: str = ''):
"""
Deprecated, use instead
<table id="{table_id}" data-datatable-url="{url}" class="{class_name}"></table>
"""
return {
"rich_columns": table_config.enabled_columns,
"search_box_enabled": table_config.search_box_enabled,
"table_id": table_id,
"class_name": class_name,
"expand_client_renderer": table_config.expand_client_renderer
}
| 14,138
|
def main():
#"""Prepare neuromorphic MNIST image datasets for use in caffe
#Each dataset will be generated with different number of unique spikes
#"""
#initial_size = 1e6 #best to make this big enough avoid expensive re-allocation
#test_dir = os.path.abspath('testFull')
#train_dir = os.path.abspath('trainFull')
#for num_spikes in range(150, 260, 10):
# #test directory
# image_dataset = generate_nmnist_dataset(initial_size, test_dir, num_spikes, 0.75)
# output_lmdb = 'testlmdb' + str(num_spikes)
# database = save_to_lmdb(image_dataset, output_lmdb)
# #database.process_all_data(show_lmdb_datum)
# #train directory
# image_dataset = generate_nmnist_dataset(initial_size, train_dir, num_spikes, 0.75)
# output_lmdb = 'trainlmdb' + str(num_spikes)
# save_to_lmdb(image_dataset, output_lmdb)
##TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))
"""Prepare neuromorphic MNIST image datasets for use in caffe
Datasets generated are for continuous spike processing by TrueNorth layers
"""
initial_size = 6e5 #best to make this big enough avoid expensive re-allocation
test_dir = os.path.abspath('testFull')
train_dir = os.path.abspath('trainFull')
#test directory
image_dataset = generate_nmnist_continuous_dataset(initial_size, test_dir)
database = save_to_lmdb(image_dataset, 'testlmdb_continuous', True)
save_to_mat(image_dataset, 'MNIST_continuous_test.mat');
#database.process_all_data(show_lmdb_datum)
#train directory
image_dataset = generate_nmnist_continuous_dataset(initial_size, train_dir)
save_to_lmdb(image_dataset, 'trainlmdb_continuous', True)
save_to_mat(image_dataset, 'MNIST_continuous_train.mat');
#TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))
| 14,139
|
def npelpt(point, ellipse):
"""npelpt(ConstSpiceDouble [3] point, ConstSpiceDouble [NELLIPSE] ellipse)"""
return _cspyce0.npelpt(point, ellipse)
| 14,140
|
def register_module():
"""Registers this module in the registry."""
# provide parser to verify
verify.parse_content = content.parse_string_in_scope
# setup routes
courses_routes = [('/faq', utils_faq.FaqHandler),('/allresources', utils_allresources.AllResourcesHandler)]
global custom_module
custom_module = custom_modules.Module(
'Course',
'FAQ Module',
[], courses_routes, notify_module_enabled = notify)
return custom_module
| 14,141
|
def degreeList(s):
"""Convert degrees given on command line to a list.
For example, the string '1,2-5,7' is converted to [1,2,3,4,5,7]."""
l = []
for r in s.split(','):
t = r.split('-')
if len(t) == 1:
l.append(int(t[0]))
else:
a = int(t[0])
b = int(t[1])
l.extend(range(a,b+1, (1 if a <= b else -1)))
return sorted(l)
| 14,142
|
def _gen_simple_while_loop(base_dir):
"""Generates a saved model with a while loop."""
class Module(module.Module):
"""A module with a while loop."""
@def_function.function(
input_signature=[tensor_spec.TensorSpec((), dtypes.float32)])
def compute(self, value):
acc, _ = control_flow_ops.while_loop(
cond=lambda acc, i: i > 0,
body=lambda acc, i: (acc + i, i - 1),
loop_vars=(constant_op.constant(0.0), value))
return acc
to_save = Module()
saved_model.save(
to_save, export_dir=os.path.join(base_dir, "SimpleWhileLoop"))
| 14,143
|
def _get_corrected_msm(msm: pd.DataFrame, elevation: float, ele_target: float):
"""MSMデータフレーム内の気温、気圧、重量絶対湿度を標高補正
Args:
df_msm(pd.DataFrame): MSMデータフレーム
ele(float): 平均標高 [m]
elevation(float): 目標地点の標高 [m]
Returns:
pd.DataFrame: 補正後のMSMデータフレーム
"""
TMP = msm['TMP'].values
PRES = msm['PRES'].values
MR = msm['MR'].values
# 標高差
ele_gap = ele_target - elevation
# 気温補正
TMP_corr = get_corrected_TMP(TMP, ele_gap)
# 気圧補正
PRES_corr = get_corrected_PRES(PRES, ele_gap, TMP_corr)
# 重量絶対湿度補正
MR_corr = get_corrected_mixing_ratio(
MR=MR,
TMP=TMP_corr,
PRES=PRES_corr
)
# 補正値をデータフレームに戻す
msm = msm.copy()
msm['TMP'] = TMP_corr
msm['PRES'] = PRES_corr
msm['MR'] = MR_corr
# なぜ 気圧消すのか?
# msm.drop(['PRES'], axis=1, inplace=True)
return msm
| 14,144
|
def get_answers_by_qname(sim_reads_sam_file):
"""Get a dictionary of Direction Start CIGAR MDtag by ReadID (qname)."""
answers_by_qname = {}
reads_file = open(sim_reads_sam_file)
reads_file.next() #skip header line
for line in reads_file:
id, dir, start, cigar, mdtag = line.strip().split('\t')
answers_by_qname[id] = (dir, start, cigar, mdtag)
reads_file.close()
return answers_by_qname
| 14,145
|
def post_times(post: Post) -> html_tag:
"""Display time user created post.
If user has edited their post show the timestamp for that as well.
:param post: Post ORM object.
:return: Rendered paragraph tag with post's timestamp information.
"""
p = tags.p(cls="small")
p.add(f"{_('Posted')}: ")
p.add(moment(post.created).fromNow())
if post.edited is not None:
p.add(tags.br(), f"{_('Edited')}: ", moment(post.edited).fromNow())
return p
| 14,146
|
def read_articles_stat(path):
"""
读取articles_stat文件,生成可以读取法条正负样本数量的字典列表
:param path: articles_stat文件位置
:return: ret: [{'第一条': (负样本数量, 正样本数量), ...}, {...}, ..., {...}]
"""
df = pd.read_csv(path, header=0, index_col=0)
ret = [{} for i in range(4)]
for index, row in df.iterrows():
ret[row['name']][row['number']] = (row['negatives'], row['positives'])
# print(ret)
return ret
| 14,147
|
def get_bounding_box(font):
""" Returns max and min bbox of given truetype font """
ymin = 0
ymax = 0
if font.sfntVersion == 'OTTO':
ymin = font['head'].yMin
ymax = font['head'].yMax
else:
for g in font['glyf'].glyphs:
char = font['glyf'][g]
if hasattr(char, 'yMin') and ymin > char.yMin:
ymin = char.yMin
if hasattr(char, 'yMax') and ymax < char.yMax:
ymax = char.yMax
return ymin, ymax
| 14,148
|
def show_NativeMethods(dx) :
"""
Show the native methods
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
"""
d = dx.get_vm()
for i in d.get_methods() :
if i.get_access_flags() & 0x100 :
print i.get_class_name(), i.get_name(), i.get_descriptor()
| 14,149
|
def display_users_bill():
"""display_users_bill():
This function is used to display the users shopping cart.
The option to view the shopping cart has not been added for this program, but it can be achieved by adding code
between lines 214 and 219 in a different function and this function can be called when the user types
-h (--help): Helps the users with a certain short-key manual
-l (--list): Displays teh users shopping cart in between each input statement
"""
global total_items
global items_in_cart
global users_cart
global users_cart_cost
global users_cart_quantity
print(f"{bcolors.BOLD}Thank you for shopping with us today! {bcolors.ENDC}\n")
# calculating the cost of all the items the user has added to his cart
sum_cart = sum(users_cart_cost)
print(f"Bill Amount: ₹{bcolors.OKBLUE}{sum_cart}{bcolors.ENDC}")
print(f"Bill Amount: ₹{bcolors.OKBLUE}{sum_cart * 118/100}{bcolors.ENDC} (inc. GST)")
sys.exit(0)
| 14,150
|
def look_over(file_name):
"""
# 查看十六进制文件的内容
:param file_name: 文件的目录
:return: 文件的内容
"""
with open(file_name, "rb") as f:
print(binascii.hexlify(f.read()))
| 14,151
|
def untag_resource(ResourceArn=None, TagKeys=None):
"""
Deletes a tag key and value from an AppConfig resource.
See also: AWS API Documentation
Exceptions
:example: response = client.untag_resource(
ResourceArn='string',
TagKeys=[
'string',
]
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe ARN of the resource for which to remove tags.\n
:type TagKeys: list
:param TagKeys: [REQUIRED]\nThe tag keys to delete.\n\n(string) --\n\n
:returns:
AppConfig.Client.exceptions.ResourceNotFoundException
AppConfig.Client.exceptions.BadRequestException
AppConfig.Client.exceptions.InternalServerException
"""
pass
| 14,152
|
def create_user(client, profile, user, resend=False):
""" Creates a new user in the specified user pool """
try:
if resend:
# Resend confirmation email for get back password
response = client.admin_create_user(
UserPoolId=profile["user_pool_id"],
Username=user.email,
MessageAction="RESEND",
)
else:
response = client.admin_create_user(
UserPoolId=profile["user_pool_id"],
Username=user.email,
UserAttributes=[
{"Name": "email", "Value": user.email},
{"Name": "email_verified", "Value": "true"},
],
)
if response["ResponseMetadata"]["HTTPStatusCode"] == 200:
if resend:
print(f"Resend confirmation to user {user.email} successfully")
else:
print(f"User {user.email} was created successfully")
return response
except client.exceptions.UsernameExistsException as error:
print(f"User {user.email} exists")
return error.response
except client.exceptions.ClientError as error:
print(f"Fail to create user {user.email}: {error.response}")
return error.response
| 14,153
|
def generate_move_probabilities(
in_probs: np.ndarray,
move_dirn: float,
nu_par: float,
dir_bool: np.ndarray
):
""" create move probabilities from a 1d array of values"""
out_probs = np.asarray(in_probs.copy())
if np.isnan(out_probs).any():
print('NANs in move probabilities!')
out_probs = get_directional_probs(move_dirn * np.pi / 180.)
out_probs = out_probs.clip(min=0.)
out_probs[4] = 0.
out_probs = [ix * float(iy) for ix, iy in zip(out_probs, dir_bool)]
if np.count_nonzero(out_probs) == 0:
out_probs = get_directional_probs(move_dirn * np.pi / 180.)
#out_probs = np.random.rand(len(out_probs))
out_probs[4] = 0.
out_probs = [ix * float(iy) for ix, iy in zip(out_probs, dir_bool)]
if np.count_nonzero(out_probs) == 0:
out_probs = get_directional_probs(move_dirn * np.pi / 180.)
out_probs /= np.sum(out_probs)
out_probs = np.power(out_probs, nu_par)
out_probs /= np.sum(out_probs)
return out_probs
| 14,154
|
def add_feed(feed_URL=None):
"""
Description: Adds to the feed to the database
Argument(s):
feed_URL - The http(s):// URL to the feed that you wish to pull from
Returns:
a composite string of tidy JSON to post to the channel if the addition
was successful or not
"""
| 14,155
|
def test_body(query, selectors, expected):
"""Tests getting the patchable body from a selector"""
sql = pgmock.sql(query, *selectors)
assert sql == expected
| 14,156
|
def register_default_actions():
"""Register default actions for Launcher"""
api.register_plugin(api.Action, ProjectManagerAction)
api.register_plugin(api.Action, LoaderAction)
| 14,157
|
def _is_diagonal(x):
"""Helper to identify if `LinearOperator` has only a diagonal component."""
return (isinstance(x, tf.linalg.LinearOperatorIdentity) or
isinstance(x, tf.linalg.LinearOperatorScaledIdentity) or
isinstance(x, tf.linalg.LinearOperatorDiag))
| 14,158
|
def index():
"""Index Controller"""
return render_template('login.html')
| 14,159
|
def match_twosided(desc1,desc2):
""" Two-sided symmetric version of match(). """
matches_12 = match(desc1,desc2)
matches_21 = match(desc2,desc1)
ndx_12 = matches_12.nonzero()[0]
# remove matches that are not symmetric
for n in ndx_12:
if matches_21[int(matches_12[n])] != n:
matches_12[n] = 0
return matches_12
| 14,160
|
def parse_header_links(value):
"""Return a list of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
value = value.strip(replace_chars)
if not value:
return links
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
| 14,161
|
def validate_term(fun):
"""Compares current local (node's) term and request (sender's) term:
- if current local (node's) term is older:
update current local (node's) term and become a follower
- if request (sender's) term is older:
respond with {'success': False}
args:
- data object received from other members
returns:
- True if term validation succeeds, False otherwise
"""
@functools.wraps(fun)
def wrapped(self, data):
logger.debug(f'{self.id} validate_term() start.')
if self.storage['term'] < data['term']:
self.storage.update({'term': data['term']})
if not isinstance(self, Follower):
self.to_follower()
logger.debug(f'{self.id} validate_term() done, bad term, moved to Follower.')
return False
if self.storage['term'] > data['term'] and not data['type'].endswith('_response'):
response = {
'success': False,
'term': self.storage['term'],
'type': f'{data["type"]}_response',
}
sender = self.raft.members.get(data['sender_id'])
host = sender[0]
port = sender[1]
asyncio.ensure_future(
self.raft.send(data=response, dest_host=host, dest_port=port), loop=self.loop
)
logger.debug(f'{self.id} validate_term() done, bad term, responded with False.')
return False
logger.debug(f'{self.id} validate_term() done, good term.')
return fun(self, data)
return wrapped
| 14,162
|
def test_dict_comprehension_func(template, value):
"""Test running a dict comprehension in a declarative function.
"""
source = template.format('{i: %s for i in range(10)}' % value)
win = compile_source(source, 'Main')()
assert win.call()
| 14,163
|
def rsync_sitemaps(dry_run=None, remote_host='ves-pg-a4'):
"""
Copy cached sitemaps from local folder to remote one.
"""
sitemaps_path = os.path.join(settings.PROJECT_PATH, 'rnacentral', 'sitemaps')
cmd = 'rsync -avi{dry_run} --delete {src}/ {remote_host}:{dst}'.format(
src=sitemaps_path,
dst=sitemaps_path,
remote_host=remote_host,
dry_run='n' if dry_run else '',
)
local(cmd)
slack("Rsynced sitemaps to %s" % remote_host)
| 14,164
|
def _friends_bootstrap_radius(args):
"""Internal method used to compute the radius (half-side-length) for each
ball (cube) used in :class:`RadFriends` (:class:`SupFriends`) using
bootstrapping."""
# Unzipping.
points, ftype = args
rstate = np.random
# Resampling.
npoints, ndim = points.shape
idxs = rstate.randint(npoints, size=npoints) # resample
idx_in = np.unique(idxs) # selected objects
sel = np.ones(npoints, dtype='bool')
sel[idx_in] = False
idx_out = np.where(sel)[0] # "missing" objects
if len(idx_out) < 2: # edge case
idx_out = np.append(idx_out, [0, 1])
points_in, points_out = points[idx_in], points[idx_out]
# Construct KDTree to enable quick nearest-neighbor lookup for
# our resampled objects.
kdtree = spatial.KDTree(points_in)
if ftype == 'balls':
# Compute distances from our "missing" points its closest neighbor
# among the resampled points using the Euclidean norm
# (i.e. "radius" of n-sphere).
dists, ids = kdtree.query(points_out, k=1, eps=0, p=2)
elif ftype == 'cubes':
# Compute distances from our "missing" points its closest neighbor
# among the resampled points using the Euclidean norm
# (i.e. "half-side-length" of n-cube).
dists, ids = kdtree.query(points_out, k=1, eps=0, p=np.inf)
# Conservative upper-bound on radius.
dist = max(dists)
return dist
| 14,165
|
def _parametrize_plus(argnames=None, # type: Union[str, Tuple[str], List[str]]
argvalues=None, # type: Iterable[Any]
indirect=False, # type: bool
ids=None, # type: Union[Callable, Iterable[str]]
idstyle=None, # type: Optional[Union[str, Callable]]
idgen=_IDGEN, # type: Union[str, Callable]
auto_refs=True, # type: bool
scope=None, # type: str
hook=None, # type: Callable[[Callable], Callable]
debug=False, # type: bool
**args):
"""
:return: a tuple (decorator, needs_inject) where needs_inject is True if decorator has signature (f, host)
and False if decorator has signature (f)
"""
# first handle argnames / argvalues (new modes of input)
argnames, argvalues = _get_argnames_argvalues(argnames, argvalues, **args)
# argnames related
initial_argnames = ','.join(argnames)
nb_params = len(argnames)
# extract all marks and custom ids.
# Do not check consistency of sizes argname/argvalue as a fixture_ref can stand for several argvalues.
marked_argvalues = argvalues
has_cust_ids = (idgen is not _IDGEN or len(args) > 0) or (ids is not None)
p_ids, p_marks, argvalues, fixture_indices, mod_lvid_indices = \
_process_argvalues(argnames, marked_argvalues, nb_params, has_cust_ids, auto_refs=auto_refs)
# idgen default
if idgen is _IDGEN:
# default: use the new id style only when some keyword **args are provided and there are no fixture refs
idgen = AUTO if (len(args) > 0 and len(fixture_indices) == 0 and ids is None) else None
if idgen is AUTO:
# note: we use a "trick" here with mini_idval to get the appropriate result (argname='', idx=v)
def _make_ids(**args):
for n, v in args.items():
yield "%s=%s" % (n, mini_idval(val=v, argname='', idx=v))
idgen = lambda **args: "-".join(_make_ids(**args)) # noqa
# generate id
if idgen is not None:
if ids is not None:
raise ValueError("Only one of `ids` and `idgen` should be provided")
ids = _gen_ids(argnames, argvalues, idgen)
if len(fixture_indices) == 0:
# No fixture refernce: fallback to a standard pytest.mark.parametrize
if debug:
print("No fixture reference found. Calling @pytest.mark.parametrize...")
print(" - argnames: %s" % initial_argnames)
print(" - argvalues: %s" % marked_argvalues)
print(" - ids: %s" % ids)
# handle infinite iterables like latest pytest, for convenience
ids = resolve_ids(ids, marked_argvalues, full_resolve=False)
# no fixture reference: shortcut, do as usual (note that the hook wont be called since no fixture is created)
_decorator = pytest.mark.parametrize(initial_argnames, marked_argvalues, indirect=indirect,
ids=ids, scope=scope)
if indirect:
return _decorator, False
else:
# wrap the decorator to check if the test function has the parameters as arguments
def _apply(test_func):
if not safe_isclass(test_func):
# a Function: raise a proper error message if improper use
s = signature(test_func)
for p in argnames:
if p not in s.parameters:
raise ValueError("parameter '%s' not found in test function signature '%s%s'"
"" % (p, test_func.__name__, s))
else:
# a Class: we cannot really perform any check.
pass
return _decorator(test_func)
return _apply, False
else:
# there are fixture references: we will create a specific decorator replacing the params with a "union" fixture
if indirect:
warn("Using `indirect=True` at the same time as fixture references in `@parametrize` is not guaranteed to "
"work and is strongly discouraged for readability reasons. See "
"https://github.com/smarie/python-pytest-cases/issues/150")
# First unset the pytest.param id we have set earlier in _process_argvalues: indeed it is only needed in
# the case above where we were defaulting to legacy @pytest.mark.parametrize .
# Here we have fixture refs so we will create a fixture union with several ParamAlternative, and their id will
# anyway be generated with `mini_idvalset` which tackles the case of lazy_value used for a tuple of args
for i in mod_lvid_indices:
p_ids[i] = None
if p_marks[i]:
marked_argvalues[i] = ParameterSet(values=marked_argvalues[i].values, id=None, marks=p_marks[i])
else:
marked_argvalues[i] = argvalues[i] # we can even remove the pytest.param wrapper
if indirect:
raise ValueError("Setting `indirect=True` is not yet supported when at least a `fixure_ref` is present in "
"the `argvalues`.")
if debug:
print("Fixture references found. Creating references and fixtures...")
param_names_str = '_'.join(argnames).replace(' ', '')
# Are there explicit ids provided ?
explicit_ids_to_use = False
ids = resolve_ids(ids, argvalues, full_resolve=False)
if isinstance(ids, list):
explicit_ids_to_use = True
# First define a few functions that will help us create the various fixtures to use in the final "union"
def _create_params_alt(fh, test_func, union_name, from_i, to_i, hook): # noqa
""" Routine that will be used to create a parameter fixture for argvalues between prev_i and i"""
# is this about a single value or several values ?
if to_i == from_i + 1:
i = from_i
del from_i
# If an explicit list of ids was provided, slice it. Otherwise use the provided callable
if ids is not None:
_id = ids[i] if explicit_ids_to_use else ids(argvalues[i])
else:
_id = None
return SingleParamAlternative.create(new_fixture_host=fh, test_func=test_func,
param_union_name=union_name, argnames=argnames, i=i,
argvalue=marked_argvalues[i], id=_id,
hook=hook, debug=debug)
else:
# If an explicit list of ids was provided, slice it. Otherwise the provided callable will be used later
_ids = ids[from_i:to_i] if explicit_ids_to_use else ids
return MultiParamAlternative.create(new_fixture_host=fh, test_func=test_func,
param_union_name=union_name, argnames=argnames, from_i=from_i,
to_i=to_i, argvalues=marked_argvalues[from_i:to_i], ids=_ids,
hook=hook, debug=debug)
def _create_fixture_ref_alt(union_name, test_func, i): # noqa
# If an explicit list of ids was provided, slice it. Otherwise use the provided callable
if ids is not None:
_id = ids[i] if explicit_ids_to_use else ids(argvalues[i])
else:
_id = None
# Get the referenced fixture name
f_fix_name = argvalues[i].fixture
if debug:
print(" - Creating reference to existing fixture %r" % (f_fix_name,))
# Create the alternative
f_fix_alt = FixtureParamAlternative(union_name=union_name, fixture_ref=argvalues[i],
decorated=test_func, argnames=argnames, param_index=i, id=_id)
# Finally copy the custom id/marks on the FixtureParamAlternative if any
if is_marked_parameter_value(marked_argvalues[i]):
f_fix_alt = ParameterSet(values=(f_fix_alt,),
id=get_marked_parameter_id(marked_argvalues[i]),
marks=get_marked_parameter_marks(marked_argvalues[i]))
return f_fix_alt
def _create_fixture_ref_product(fh, union_name, i, fixture_ref_positions, test_func, hook): # noqa
# If an explicit list of ids was provided, slice it. Otherwise the provided callable will be used
_id = ids[i] if explicit_ids_to_use else ids
# values to use:
param_values = argvalues[i]
# Create a unique fixture name
p_fix_name = "%s_%s_P%s" % (test_func.__name__, param_names_str, i)
p_fix_name = check_name_available(fh, p_fix_name, if_name_exists=CHANGE, caller=parametrize)
if debug:
print(" - Creating new fixture %r to handle parameter %s that is a cross-product" % (p_fix_name, i))
# Create the fixture
_make_fixture_product(fh, name=p_fix_name, hook=hook, caller=parametrize,
fixtures_or_values=param_values, fixture_positions=fixture_ref_positions)
# Create the corresponding alternative
p_fix_alt = ProductParamAlternative(union_name=union_name, alternative_name=p_fix_name, decorated=test_func,
argval=argvalues[i], argnames=argnames, param_index=i, id=_id)
# copy the custom id/marks to the ParamAlternative if any
if is_marked_parameter_value(marked_argvalues[i]):
p_fix_alt = ParameterSet(values=(p_fix_alt,),
id=get_marked_parameter_id(marked_argvalues[i]),
marks=get_marked_parameter_marks(marked_argvalues[i]))
return p_fix_alt
# Then create the decorator per se
def parametrize_plus_decorate(test_func, fixtures_dest):
"""
A decorator that wraps the test function so that instead of receiving the parameter names, it receives the
new fixture. All other decorations are unchanged.
:param test_func:
:return:
"""
test_func_name = test_func.__name__
# first check if the test function has the parameters as arguments
if safe_isclass(test_func):
# a test class: not supported yet
raise NotImplementedError("@parametrize can not be used to decorate a Test class when the argvalues "
"contain at least one reference to a fixture.")
old_sig = signature(test_func)
for p in argnames:
if p not in old_sig.parameters:
raise ValueError("parameter '%s' not found in test function signature '%s%s'"
"" % (p, test_func_name, old_sig))
# The name for the final "union" fixture
# style_template = "%s_param__%s"
main_fixture_style_template = "%s_%s"
fixture_union_name = main_fixture_style_template % (test_func_name, param_names_str)
fixture_union_name = check_name_available(fixtures_dest, fixture_union_name, if_name_exists=CHANGE,
caller=parametrize)
# Retrieve (if ref) or create (for normal argvalues) the fixtures that we will union
fixture_alternatives = []
prev_i = -1
for i, j_list in fixture_indices: # noqa
# A/ Is there any non-empty group of 'normal' parameters before the fixture_ref at <i> ? If so, handle.
if i > prev_i + 1:
# create a new "param" fixture parametrized with all of that consecutive group.
# Important note: we could either wish to create one fixture for parameter value or to create
# one for each consecutive group as shown below. This should not lead to different results but perf
# might differ. Maybe add a parameter in the signature so that users can test it ?
# this would make the ids more readable by removing the "P2toP3"-like ids
p_fix_alt = _create_params_alt(fixtures_dest, test_func=test_func, hook=hook,
union_name=fixture_union_name, from_i=prev_i + 1, to_i=i)
fixture_alternatives.append(p_fix_alt)
# B/ Now handle the fixture ref at position <i>
if j_list is None:
# argvalues[i] contains a single argvalue that is a fixture_ref : add the referenced fixture
f_fix_alt = _create_fixture_ref_alt(union_name=fixture_union_name, test_func=test_func, i=i)
fixture_alternatives.append(f_fix_alt)
else:
# argvalues[i] is a tuple, some of them being fixture_ref. create a fixture refering to all of them
prod_fix_alt = _create_fixture_ref_product(fixtures_dest, union_name=fixture_union_name, i=i,
fixture_ref_positions=j_list,
test_func=test_func, hook=hook)
fixture_alternatives.append(prod_fix_alt)
prev_i = i
# C/ handle last consecutive group of normal parameters, if any
i = len(argvalues) # noqa
if i > prev_i + 1:
p_fix_alt = _create_params_alt(fixtures_dest, test_func=test_func, hook=hook,
union_name=fixture_union_name, from_i=prev_i + 1, to_i=i)
fixture_alternatives.append(p_fix_alt)
# if fixtures_to_union has length 1, simplify ? >> No, we leave such "optimization" to the end user
# Handle the list of alternative names. Duplicates should be removed here
fix_alt_names = []
for alt in fixture_alternatives:
if is_marked_parameter_value(alt):
# wrapped by a pytest.param
alt = get_marked_parameter_values(alt, nbargs=1)
assert len(alt) == 1, "Error with alternative please report"
alt = alt[0]
if alt.alternative_name not in fix_alt_names:
fix_alt_names.append(alt.alternative_name)
else:
# non-unique alt fixture names should only happen when the alternative is a fixture reference
assert isinstance(alt, FixtureParamAlternative), "Created fixture names not unique, please report"
# Finally create a "main" fixture with a unique name for this test function
if debug:
print("Creating final union fixture %r with alternatives %r"
% (fixture_union_name, UnionFixtureAlternative.to_list_of_fixture_names(fixture_alternatives)))
# use the custom subclass of idstyle that was created for ParamAlternatives
if idstyle is None or isinstance(idstyle, string_types):
_idstyle = ParamIdMakers.get(idstyle)
else:
_idstyle = idstyle
# note: the function automatically registers it in the module
_make_fixture_union(fixtures_dest, name=fixture_union_name, hook=hook, caller=parametrize,
fix_alternatives=fixture_alternatives, unique_fix_alt_names=fix_alt_names,
idstyle=_idstyle, scope=scope)
# --create the new test function's signature that we want to expose to pytest
# it is the same than existing, except that we want to replace all parameters with the new fixture
# first check where we should insert the new parameters (where is the first param we remove)
_first_idx = -1
for _first_idx, _n in enumerate(old_sig.parameters):
if _n in argnames:
break
# then remove all parameters that will be replaced by the new fixture
new_sig = remove_signature_parameters(old_sig, *argnames)
# finally insert the new fixture in that position. Indeed we can not insert first or last, because
# 'self' arg (case of test class methods) should stay first and exec order should be preserved when possible
new_sig = add_signature_parameters(new_sig, custom_idx=_first_idx,
custom=Parameter(fixture_union_name,
kind=Parameter.POSITIONAL_OR_KEYWORD))
if debug:
print("Creating final test function wrapper with signature %s%s" % (test_func_name, new_sig))
# --Finally create the fixture function, a wrapper of user-provided fixture with the new signature
def replace_paramfixture_with_values(kwargs): # noqa
# remove the created fixture value
encompassing_fixture = kwargs.pop(fixture_union_name)
# and add instead the parameter values
if nb_params > 1:
for i, p in enumerate(argnames): # noqa
try:
kwargs[p] = encompassing_fixture[i]
except TypeError:
raise Exception("Unable to unpack parameter value to a tuple: %r" % encompassing_fixture)
else:
kwargs[argnames[0]] = encompassing_fixture
# return
return kwargs
if not isgeneratorfunction(test_func):
# normal test or fixture function with return statement
@wraps(test_func, new_sig=new_sig)
def wrapped_test_func(*args, **kwargs): # noqa
if kwargs.get(fixture_union_name, None) is NOT_USED:
# TODO why this ? it is probably useless: this fixture
# is private and will never end up in another union
return NOT_USED
else:
replace_paramfixture_with_values(kwargs)
return test_func(*args, **kwargs)
else:
# generator test or fixture function (with one or several yield statements)
@wraps(test_func, new_sig=new_sig)
def wrapped_test_func(*args, **kwargs): # noqa
if kwargs.get(fixture_union_name, None) is NOT_USED:
# TODO why this ? it is probably useless: this fixture
# is private and will never end up in another union
yield NOT_USED
else:
replace_paramfixture_with_values(kwargs)
for res in test_func(*args, **kwargs):
yield res
# move all pytest marks from the test function to the wrapper
# not needed because the __dict__ is automatically copied when we use @wraps
# move_all_pytest_marks(test_func, wrapped_test_func)
# With this hack we will be ordered correctly by pytest https://github.com/pytest-dev/pytest/issues/4429
try:
# propagate existing attribute if any
wrapped_test_func.place_as = test_func.place_as
except: # noqa
# position the test at the original function's position
wrapped_test_func.place_as = test_func
# return the new test function
return wrapped_test_func
return parametrize_plus_decorate, True
| 14,166
|
def _clip_grad(clip_value, grad):
"""
Clip gradients.
Inputs:
clip_value (float): Specifies how much to clip.
grad (tuple[Tensor]): Gradients.
Outputs:
tuple[Tensor], clipped gradients.
"""
dt = ops.dtype(grad)
new_grad = nn.ClipByNorm()(grad, ops.cast(ops.tuple_to_array((clip_value,)), dt))
return new_grad
| 14,167
|
def sample_cast(user, name='David'):
"""Creates a sample Cast"""
return Cast.objects.create(user=user, name=name)
| 14,168
|
def sort_special_vertex_groups(vgroups,
special_vertex_group_pattern='STYMO:',
global_special_vertex_group_suffix='Character'):
"""
Given a list of special vertex group names, all with the prefix of
special_vertex_group_pattern, selects all that start with global_special_vertex_group_suffix
and puts them at the start of the list. This enables e.g. to easily define
top-level vertex groups that always go first, followed by details that
overwrite top level assignments.
"""
global_vg_name_pattern = special_vertex_group_pattern + \
global_special_vertex_group_suffix
first = []
last = []
for g in vgroups:
if re.match(global_vg_name_pattern, g) is not None:
first.append(g)
else:
last.append(g)
first.sort()
last.sort()
first.extend(last)
return first
| 14,169
|
def set_players():
"""Set players pawns and name before start the party"""
if "" in [p1_pawn.get(), p1_name.get(), p2_pawn.get(), p2_name.get()]: # check if any empty entry
showerror("Error", "A entry is blank !") # Show a warning
elif p1_pawn.get() == p2_pawn.get(): # Check if players pawns are same
showerror("Error", "Players pawns are identical !") # Show a warning
elif p1_name.get() == p2_name.get(): # Check if players names are same
showerror("Error", "Players names are identical !") # Show a warning
else: # If everything is fine
players.destroy()
| 14,170
|
def _GetFullDesktopName(window_station, desktop) -> str:
"""Returns a full name to a desktop.
Args:
window_station: Handle to window station.
desktop: Handle to desktop.
"""
return "\\".join([
win32service.GetUserObjectInformation(handle, win32service.UOI_NAME)
for handle in [window_station, desktop]
])
| 14,171
|
def decrypt(plain_text: str, a: np.ndarray, b: np.ndarray, space: str) -> str:
"""Decrypts the given text with given a, b and space
:param plain_text: Text you want to decrypt
:type plain_text: str
:param a: An integer that corresponds to the A parameter in block cypher
:type a: np.ndarray
:param b: An integer that corresponds to the B parameter in block cypher
:type b: np.ndarray
:param space: Target space
:type space: str
:return: Decrypted text in string form
:rtype: str
"""
result = []
t = math_utils.get_inverse_matrix(a)
pairs = cryption_utils.get_pairs_of_int_two_from_text(plain_text, space)
for pair in pairs:
c = math_utils.create_nested_list_from_flat_list(pair)
subtracted_matrix = math_utils.sub_matrices(c, b)
dot_product = math_utils.dot_product_with_multiple_matrices(
[t, np.array(subtracted_matrix)]
)
result_list = space_utils.convert_nested_ints_to_char(dot_product, space)
result.append("".join(result_list))
return "".join(result)
| 14,172
|
def merge_rois(roi_list: List,
temporal_coefficient: float, original_2d_vol: np.ndarray,
roi_eccentricity_limit=1.0, widefield=False):
# TODO is this the most efficient implementation I can do
"""
Merges rois based on temporal and spacial overlap
Parameters
----------
roi_list
List of Rois in format: [[np.array of pixels roi 1],
[np.array of pixels roi 2] ... ]
temporal_coefficient
The coefficient limiting merging based of temporal information, 0 merge all
1 merge none
original_2d_vol
Volume of each pixel's time trace
Returns
-------
List of new rois in format: [[np.array of pixels roi 1],
[np.array of pixels roi 2] ... ]
"""
A = np.zeros([original_2d_vol.shape[0], len(roi_list)], dtype=int) # create 2d
# matrix of zeros with dims number of pixels in image by number of rois
# Change pixels of each roi to 1
for num, roi in enumerate(roi_list):
A[roi, num] = 1
# Create graph of which rois have pixels which intersect with each other.
A_graph = np.matmul(A.transpose(), A)
connected_rois = np.nonzero(A_graph)
# print(A_graph)
timetraces = [np.mean(original_2d_vol[roi], axis=0) for roi in roi_list]
A_graph_new = np.identity(A_graph.shape[0], dtype=float)
# print(list(zip(*connected_rois)))
for x in list(zip(*connected_rois)):
# applies a 10% overlap condition to the rois.
if x[0] != x[1] and (widefield or (
A_graph[x[0], x[1]] > len(roi_list[x[1]]) * .1 and A_graph[
x[0], x[1]] > len(roi_list[x[0]]) * .1)):
A_graph_new[x[0], x[1]] = compare_time_traces(timetraces[x[0]],
timetraces[x[1]])
# print(A_graph_new[x[0],x[1]])
A_graph_new[x[1], x[0]] = A_graph_new[x[0], x[1]]
A_graph[x[0], x[1]] = False
A_graph[x[1], x[0]] = False
A_components_to_merge = A_graph_new >= temporal_coefficient
A_csr = csr_matrix(A_components_to_merge)
# Use connected components to group these rois together
connected = connected_components_graph(A_csr, False, return_labels=True)
# processes connected components putting each group of rois into roi_groups list
roi_groups = [[] for _ in range(len(roi_list))]
for num in range(len(roi_list)):
roi_groups[connected[1][num]].append(roi_list[num])
new_rois = []
for group in roi_groups:
if len(group) != 0:
# combine those rois that should be merged with first roi.
first_roi = list(reduce(combine_rois, group))
new_rois.append(np.array(first_roi))
return new_rois
| 14,173
|
def well2D_to_df1D(xlsx_path, sheet, data_col):
"""
Convert new 2D output format (per well) to 1D dataframe
:param str xlsx_path: path to the xlsx file
:param str sheet: sheet name to load
:param str data_col: new column name of the linearized values
:return dataframe df: linearized dataframe
"""
df = pd.read_excel(xlsx_path, sheet_name=sheet, index_col=0)
df = df.unstack().reset_index(name=data_col) # unpivot (linearize) the table
df.rename(columns={'level_1': 'row_id', 'level_0': 'col_id'}, inplace=True)
df['well_id'] = df.row_id + df.col_id.map(str)
df = df[['well_id', data_col]]
return df
| 14,174
|
def obtain_file_hash(path, hash_algo="md5"):
"""Obtains the hash of a file using the specified hash algorithm
"""
hash_algo = hashlib.sha256() if hash_algo=="sha256" else hashlib.md5()
block_size = 65535
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(block_size),b''):
hash_algo.update(chunk)
return hash_algo.hexdigest()
| 14,175
|
def b_q_bar(z_c):
"""Result of integrating from z_c to 1/2 of the
hard collinear part of the quark splitting function"""
b_q_zc = CF * (-3. + 6. * z_c + 4.* np.log(2. - 2.*z_c))/2.
return b_q_zc
| 14,176
|
def _prepare_libarchive() -> None: # coverage: ignore
"""
There are some well documented issues in MacOS about libarchive.
Let's try to do things ourselves...
https://github.com/dsoprea/PyEasyArchive
"""
if sys.platform != "darwin":
return
if "LA_LIBRARY_FILEPATH" in os.environ:
return
command = ["brew", "info", "--json=v1", "libarchive"]
try:
result = subprocess.check_output(command)
except Exception as e:
logging.error("Could not lookup 'libarchive' package info", e)
info = json.loads(result)
installed_versions = info[0]["installed"]
if len(installed_versions) == 0:
logging.warning("libarchive is not currently installed via Brew")
return
version = installed_versions[0]["version"]
command = ["brew", "--cellar", "libarchive"]
package_path = subprocess.check_output(command)[:-1]
library_path = os.path.join(package_path.decode(), version, "lib")
os.environ["LA_LIBRARY_FILEPATH"] = os.path.join(
library_path, "libarchive.dylib"
)
return
| 14,177
|
def getsize(store, path=None):
"""Compute size of stored items for a given path."""
path = normalize_storage_path(path)
if hasattr(store, 'getsize'):
# pass through
return store.getsize(path)
elif isinstance(store, dict):
# compute from size of values
prefix = _path_to_prefix(path)
size = 0
for k in listdir(store, path):
try:
v = store[prefix + k]
except KeyError:
pass
else:
try:
size += buffer_size(v)
except TypeError:
return -1
return size
else:
return -1
| 14,178
|
def split_2DL5AB(GL, cursor, log):
"""
splits the KIR2DL5 GL-string into 2 separate GL strings for 2DL5A and 2DL5B
:param GL: GL-string for KIR2DL5, combining both A and B
:param cursor: cursor to a connection to the nextype archive
:param log: logger instance
"""
log.info("Splitting 2DL5-alleles...")
proc_name = "GL_STRINGS_MGMT.SPLIT_GL_STRING_2DL5@ngsa"
proc_params = [GL]
proc_params2 = [2, 'KIR', 'J', 'J', '2DL5', 'J', '2DL5', 'N']
success, values = call_procedure(proc_name, proc_params, 2, proc_params2, cursor, log)
if success:
log.info("\t=> Success!")
[part1, part2] = values
if "2DL5A" in part1:
A = part1
B = part2
else:
A = part2
B = part1
A_alleles = A.replace("2DL5A*", "")
B_alleles = B.replace("2DL5B*", "")
else:
log.info("\t=> Procedure call did not work. :-(")
A_alleles = ""
B_alleles = ""
return A_alleles, B_alleles
| 14,179
|
def set_system_bios(context, settings, system_id=None, workaround=False):
"""
Finds a system matching the given ID and sets the BIOS settings
Args:
context: The Redfish client object with an open session
settings: The settings to apply to the system
system_id: The system to locate; if None, perform on the only system
workaround: Indicates if workarounds should be attempted for non-conformant services
Returns:
The response of the PATCH
"""
# Locate the system
system = get_system(context, system_id)
# Get the BIOS resource and determine if the settings need to be applied to the resource itself or the settings
# object
if "Bios" not in system.dict:
raise RedfishSystemBiosNotFoundError("System '{}' does not support representing BIOS".format(system.dict["Id"]))
bios_uri = system.dict["Bios"]["@odata.id"]
bios = context.get(bios_uri)
etag = bios.getheader("ETag")
if "@Redfish.Settings" in bios.dict:
bios_settings = get_system_bios_settings(context, bios, system.dict["Id"], workaround)
bios_uri = bios_settings.dict["@odata.id"]
etag = bios_settings.getheader("ETag")
# Update the settings
payload = {"Attributes": settings}
headers = None
if etag is not None:
headers = {"If-Match": etag}
response = context.patch(bios_uri, body=payload, headers=headers)
verify_response(response)
return response
| 14,180
|
def geometric_augmentation(images,
flow = None,
mask = None,
crop_height = 640,
crop_width = 640,
probability_flip_left_right = 0.5,
probability_flip_up_down = 0.1,
probability_scale = 0.8,
probability_relative_scale = 0.,
probability_stretch = 0.8,
probability_rotation = 0.0,
probability_relative_rotation = 0.0,
probability_crop_offset = 0.0,
min_bound_scale = -0.2,
max_bound_scale = 0.6,
max_strech_scale = 0.2,
min_bound_relative_scale = -0.1,
max_bound_relative_scale = 0.1,
max_rotation_deg = 15,
max_relative_rotation_deg = 3,
max_relative_crop_offset = 5,
return_full_scale=False):
"""Applies geometric augmentations to an image pair and corresponding flow.
Args:
images: Image pair of shape [2, height, width, channels].
flow: Corresponding forward flow field of shape [height, width, 2].
mask: Mask indicating which positions in the flow field hold valid flow
vectors of shape [height, width, 1]. Non-valid poisitions are encoded with
0, valid positions with 1.
crop_height: Height of the final augmented output.
crop_width: Width of the final augmented output.
probability_flip_left_right: Probability of applying left/right flip.
probability_flip_up_down: Probability of applying up/down flip
probability_scale: Probability of applying scale augmentation.
probability_relative_scale: Probability of applying scale augmentation to
only the second frame of the the image pair.
probability_stretch: Probability of applying stretch augmentation (scale
without keeping the aspect ratio).
probability_rotation: Probability of applying rotation augmentation.
probability_relative_rotation: Probability of applying rotation augmentation
to only the second frame of the the image pair.
probability_crop_offset: Probability of applying a relative offset while
cropping.
min_bound_scale: Defines the smallest possible scaling factor as
2**min_bound_scale.
max_bound_scale: Defines the largest possible scaling factor as
2**max_bound_scale.
max_strech_scale: Defines the smallest and largest possible streching factor
as 2**-max_strech_scale and 2**max_strech_scale.
min_bound_relative_scale: Defines the smallest possible scaling factor for
the relative scaling as 2**min_bound_relative_scale.
max_bound_relative_scale: Defines the largest possible scaling factor for
the relative scaling as 2**max_bound_relative_scale.
max_rotation_deg: Defines the maximum angle of rotation in degrees.
max_relative_rotation_deg: Defines the maximum angle of rotation in degrees
for the relative rotation.
max_relative_crop_offset: Defines the maximum relative offset in pixels for
cropping.
return_full_scale: bool. If this is passed, the full size images will be
returned in addition to the geometrically augmented (cropped and / or
resized) images. In addition to the resized images, the crop height,
width, and any padding applied will be returned.
Returns:
if return_full_scale is False:
Augmented images, flow and mask (if not None).
if return_full_scale is True:
Augmented images, flow, mask, full_size_images, crop_h, crop_w, pad_h,
and pad_w.
"""
# apply geometric augmentation
if probability_flip_left_right > 0:
images, flow, mask = random_flip_left_right(
images, flow, mask, probability_flip_left_right)
if probability_flip_up_down > 0:
images, flow, mask = random_flip_up_down(
images, flow, mask, probability_flip_up_down)
if probability_scale > 0 or probability_stretch > 0:
images, flow, mask = random_scale(
images,
flow,
mask,
min_scale=min_bound_scale,
max_scale=max_bound_scale,
max_strech=max_strech_scale,
probability_scale=probability_scale,
probability_strech=probability_stretch)
if probability_relative_scale > 0:
images, flow, mask = random_scale_second(
images, flow, mask,
min_scale=min_bound_relative_scale,
max_scale=max_bound_relative_scale,
probability_scale=probability_relative_scale)
if probability_rotation > 0:
images, flow, mask = random_rotation(
images, flow, mask,
probability=probability_rotation,
max_rotation=max_rotation_deg, not_empty_crop=True)
if probability_relative_rotation > 0:
images, flow, mask = random_rotation_second(
images, flow, mask,
probability=probability_relative_rotation,
max_rotation=max_relative_rotation_deg, not_empty_crop=True)
images_uncropped = images
images, flow, mask, offset_h, offset_w = random_crop(
images, flow, mask, crop_height, crop_width,
relative_offset=max_relative_crop_offset,
probability_crop_offset=probability_crop_offset)
# Add 100 / 200 pixels to crop height / width for full scale warp
pad_to_size_h = crop_height + 200
pad_to_size_w = crop_width + 400
if return_full_scale:
if pad_to_size_w:
uncropped_shape = tf.shape(images_uncropped)
if images.shape[1] > uncropped_shape[1] or images.shape[
2] > uncropped_shape[2]:
images_uncropped = images
uncropped_shape = tf.shape(images_uncropped)
offset_h = tf.zeros_like(offset_h)
offset_w = tf.zeros_like(offset_w)
if uncropped_shape[1] > pad_to_size_h:
crop_ht = offset_h - (200 // 2)
crop_hb = offset_h + crop_height + (200 // 2)
crop_hb += tf.maximum(0, -crop_ht)
crop_ht -= tf.maximum(0, -(uncropped_shape[1] - crop_hb))
crop_ht = tf.maximum(crop_ht, 0)
crop_hb = tf.minimum(crop_hb, uncropped_shape[1])
offset_h -= crop_ht
images_uncropped = images_uncropped[:, crop_ht:crop_hb, :, :]
if uncropped_shape[2] > pad_to_size_w:
crop_wt = offset_w - (400 // 2)
crop_wb = offset_w + crop_width + (400 // 2)
crop_wb += tf.maximum(0, -crop_wt)
crop_wt -= tf.maximum(0, -(uncropped_shape[2] - crop_wb))
crop_wt = tf.maximum(crop_wt, 0)
crop_wb = tf.minimum(crop_wb, uncropped_shape[2])
offset_w -= crop_wt
images_uncropped = images_uncropped[:, :, crop_wt:crop_wb, :]
uncropped_shape = tf.shape(images_uncropped)
# remove remove_pixels_w from the width while keeping the crop centered
pad_h = pad_to_size_h - uncropped_shape[1]
pad_w = pad_to_size_w - uncropped_shape[2]
with tf.control_dependencies([
tf.compat.v1.assert_greater_equal(pad_h, 0),
tf.compat.v1.assert_greater_equal(pad_w, 0)
]):
images_uncropped = tf.pad(images_uncropped,
[[0, 0], [pad_h, 0], [pad_w, 0], [0, 0]])
images_uncropped = tf.ensure_shape(images_uncropped,
[2, pad_to_size_h, pad_to_size_w, 3])
return images, flow, mask, images_uncropped, offset_h, offset_w, pad_h, pad_w
return images, flow, mask
| 14,181
|
def test_rmse():
"""Test to check RMSE calculation"""
data_url = ('https://raw.githubusercontent.com/jonescompneurolab/hnn/'
'master/data/MEG_detection_data/yes_trial_S1_ERP_all_avg.txt')
if not op.exists('yes_trial_S1_ERP_all_avg.txt'):
urlretrieve(data_url, 'yes_trial_S1_ERP_all_avg.txt')
extdata = np.loadtxt('yes_trial_S1_ERP_all_avg.txt')
exp_dpl = Dipole(times=extdata[:, 0],
data=np.c_[extdata[:, 1], extdata[:, 1], extdata[:, 1]])
hnn_core_root = op.join(op.dirname(hnn_core.__file__))
params_fname = op.join(hnn_core_root, 'param', 'default.json')
params = read_params(params_fname)
expected_rmse = 0.1
test_dpl = Dipole(times=extdata[:, 0],
data=np.c_[extdata[:, 1] + expected_rmse,
extdata[:, 1] + expected_rmse,
extdata[:, 1] + expected_rmse])
avg_rmse = _rmse(test_dpl, exp_dpl, tstop=params['tstop'])
assert_allclose(avg_rmse, expected_rmse)
| 14,182
|
def main():
"""Function gets google assistant request, handle it and returns answer."""
_ = set(variables['Targets_Commands'].split(' '))
targets, commands = _ & {'pc', 'laptop'}, _ - {'pc', 'laptop'}
for target in targets:
for command in commands:
send_given_reqest(target, command)
action_thread = Thread(target=wait_for_request)
action_thread.start()
action_thread.join(timeout=20)
stop_event.set()
if not google_assistant_reply:
google_assistant_reply.append("Sorry, server is unreachable.")
print(google_assistant_reply)
variables[ "Google_assistant_answers" ] = google_assistant_reply
| 14,183
|
def edit_distance_between_seqs(seq1, seq2):
"""Input is two strings. They are globally aligned
and the edit distance is returned. An indel of any length
is counted as one edit"""
aln1, aln2 = _needleman_wunsch(seq1, seq2)
return edit_distance_from_aln_strings(aln1, aln2)
| 14,184
|
def sentence_prediction(sentence):
"""Predict the grammar score of a sentence.
Parameters
----------
sentence : str
The sentence to be predicted.
Returns
-------
float
The predicted grammar probability.
"""
tokenizer = config.TOKENIZER.from_pretrained(
config.MODEL_PATH, local_files_only=True
)
model = config.MODEL.from_pretrained(config.MODEL_PATH, local_files_only=True)
max_len = config.MAX_LEN
sentence = str(sentence)
sentence = " ".join(sentence.split())
inputs = tokenizer.encode_plus(
sentence,
add_special_tokens=True,
max_length=max_len,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors="pt",
truncation=True,
)
ids = torch.LongTensor(inputs["input_ids"][0]).unsqueeze(0)
mask = torch.LongTensor(inputs["attention_mask"][0]).unsqueeze(0)
ids = ids.to(DEVICE)
mask = mask.to(DEVICE)
model.to(DEVICE)
outputs = model(ids, token_type_ids=None, attention_mask=mask, return_dict=True)
outputs = torch.sigmoid(outputs.logits).cpu().detach().numpy()
return outputs[0][0]
| 14,185
|
def DB():
"""Create a DB wrapper object connecting to the test database."""
db = pg.DB(dbname, dbhost, dbport)
if debug:
db.debug = debug
return db
| 14,186
|
def build_seq(variants, phased_genotype, ref, pre_start, ref_end=None):
"""
Build or extend the haplotype according to provided genotype. We marked the start position iterator of each haplotype and
update with variant alternative base.
"""
seqs = ""
position = pre_start
for variant, phased in zip(variants, phased_genotype):
if variant.start < pre_start:
if variant.start == pre_start - 1 and phased != 0: # this only happen when pre pos is deletion and current pos is insertion
ref_base = variant.reference_bases
alt_base = variant.alternate_bases[phased - 1]
if len(alt_base) > len(ref_base): # is an insertion
# print ('has insertion and deletion overlap'.format(variant.start))
return alt_base[1:], position
if phased != 0: # impossible # sometimes happen in true vcf
return None, None
else:
return "", pre_start # do not do anything if 0 allele
else:
seqs += ref.query(pre_start, variant.start)
allele = variant.reference_bases if phased == 0 else variant.alternate_bases[phased - 1]
if phased == 0:
allele = allele[0]
position = variant.start + 1
seqs += allele # only add one ref base
else:
ref_base = variant.reference_bases
alt_base = variant.alternate_bases[phased-1]
ref_base, alt_base = remove_common_suffix(ref_base, [alt_base])
end = variant.start + len(ref_base)
position = end
seqs += alt_base[0]
return seqs, position
| 14,187
|
def handle_str(x):
"""
handle_str returns a random string of the same length as x.
"""
return random_string(len(x))
| 14,188
|
def clean_profile(profile, sid, state_final, state_canceled):
"""
This method will prepare a profile for consumption in radical.analytics. It
performs the following actions:
- makes sure all events have a `ename` entry
- remove all state transitions to `CANCELLED` if a different final state
is encountered for the same uid
- assignes the session uid to all events without uid
- makes sure that state transitions have an `ename` set to `state`
"""
entities = dict() # things which have a uid
if not isinstance(state_final, list):
state_final = [state_final]
for event in profile:
uid = event['uid' ]
state = event['state']
time = event['time' ]
name = event['event']
# we derive entity_type from the uid -- but funnel
# some cases into the session
if uid:
event['entity_type'] = uid.split('.',1)[0]
else:
event['entity_type'] = 'session'
event['uid'] = sid
uid = sid
if uid not in entities:
entities[uid] = dict()
entities[uid]['states'] = dict()
entities[uid]['events'] = list()
if name == 'advance':
# this is a state progression
assert(state)
assert(uid)
event['event_name'] = 'state'
if state in state_final and state != state_canceled:
# a final state other than CANCELED will cancel any previous
# CANCELED state.
if state_canceled in entities[uid]['states']:
del(entities[uid]['states'][state_canceled])
if state in entities[uid]['states']:
# ignore duplicated recordings of state transitions
# FIXME: warning?
continue
# raise ValueError('double state (%s) for %s' % (state, uid))
entities[uid]['states'][state] = event
else:
# FIXME: define different event types (we have that somewhere)
event['event_name'] = 'event'
entities[uid]['events'].append(event)
# we have evaluated, cleaned and sorted all events -- now we recreate
# a clean profile out of them
ret = list()
for uid,entity in entities.iteritems():
ret += entity['events']
for state,event in entity['states'].iteritems():
ret.append(event)
# sort by time and return
ret = sorted(ret[:], key=lambda k: k['time'])
return ret
| 14,189
|
def _validate_and_apply_configs(app: Sphinx, config: Config):
"""Callback of `config-inited`` event. """
config.graphtik_default_graph_format is None or _valid_format_option(
config.graphtik_default_graph_format
)
| 14,190
|
def test_ap_bss_add_out_of_memory(dev, apdev):
"""Running out of memory while adding a BSS"""
hapd2 = hostapd.add_ap(apdev[1], {"ssid": "open"})
ifname1 = apdev[0]['ifname']
ifname2 = apdev[0]['ifname'] + '-2'
confname1 = hostapd.cfg_file(apdev[0], "bss-1.conf")
confname2 = hostapd.cfg_file(apdev[0], "bss-2.conf")
hapd_bss_out_of_mem(hapd2, 'phy3', confname1, 1, 'hostapd_add_iface')
for i in range(1, 3):
hapd_bss_out_of_mem(hapd2, 'phy3', confname1,
i, 'hostapd_interface_init_bss')
hapd_bss_out_of_mem(hapd2, 'phy3', confname1,
1, 'ieee802_11_build_ap_params')
hostapd.add_bss(apdev[0], ifname1, confname1)
hapd_bss_out_of_mem(hapd2, 'phy3', confname2,
1, 'hostapd_interface_init_bss')
hapd_bss_out_of_mem(hapd2, 'phy3', confname2,
1, 'ieee802_11_build_ap_params')
hostapd.add_bss(apdev[0], ifname2, confname2)
hostapd.remove_bss(apdev[0], ifname2)
hostapd.remove_bss(apdev[0], ifname1)
| 14,191
|
def get_process_causality_network_activity_query(endpoint_ids: str, args: dict) -> str:
"""Create the process causality network activity query.
Args:
endpoint_ids (str): The endpoint IDs to use.
args (dict): The arguments to pass to the query.
Returns:
str: The created query.
"""
process_causality_id_list = args.get('process_causality_id', '')
if not process_causality_id_list:
raise DemistoException('Please provide a process_causality_id argument.')
process_causality_id_list = wrap_list_items_in_double_quotes(process_causality_id_list)
return f'''dataset = xdr_data | filter agent_id in ({endpoint_ids}) and event_type = NETWORK
and actor_process_causality_id in ({process_causality_id_list}) | fields agent_hostname, agent_ip_addresses,agent_id,
action_local_ip, action_remote_ip, action_remote_port, dst_action_external_hostname,dns_query_name,
action_app_id_transitions, action_total_download, action_total_upload, action_country,action_as_data,
actor_process_image_sha256, actor_process_image_name , actor_process_image_path,actor_process_signature_vendor,
actor_process_signature_product, actor_causality_id,actor_process_image_command_line, actor_process_instance_id'''
| 14,192
|
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
try:
client = TickTick()
client.login(data.get("username"), data.get("password"))
except RequestException as exc:
raise CannotConnect from exc
except ValueError as exc:
raise InvalidAuth from exc
# Return some info we want to store in the config entry.
return {"title": "TickTick"}
| 14,193
|
def _shard_batch(xs):
"""Shards a batch for a pmap, based on the number of devices."""
local_device_count = jax.local_device_count()
def _prepare(x):
return x.reshape((local_device_count, -1) + x.shape[1:])
return jax.tree_map(_prepare, xs)
| 14,194
|
def get_time_limit(component_limit, overall_limit):
"""
Return the minimum time limit imposed by the component and overall limits.
"""
limit = component_limit
if overall_limit is not None:
try:
elapsed_time = util.get_elapsed_time()
except NotImplementedError:
returncodes.exit_with_driver_unsupported_error(CANNOT_LIMIT_TIME_MSG)
else:
remaining_time = max(0, overall_limit - elapsed_time)
if limit is None or remaining_time < limit:
limit = remaining_time
return limit
| 14,195
|
def remove_alias(conn, alias):
"""
Removes a specific alias reference for all directories with that alias
:param conn: the db connection
:param alias: the alias to remove
:return:
"""
with conn:
c = conn.cursor()
c.execute("""UPDATE projects
SET alias = null
WHERE alias = '%(alias)s'
""" % locals())
| 14,196
|
def test_tetragonal_bravais():
"""Tests initialization of a TetragonalBravais object."""
# parameters of Wulfenite
# https://www.mindat.org/min-4322.html
a, c = 5.433, 12.11
symbols = ["X"]
center = "P"
tetragonal = TetragonalBravais(a, c, symbols, center)
assert tetragonal.n_atoms == 1
assert tetragonal.n_symbols == 1
with pytest.raises(NotImplementedError):
tetragonal.reorient(np.identity(3))
with pytest.raises(NotImplementedError):
tetragonal.repeat((1, 2, 3))
| 14,197
|
def keras_decay(step, decay=0.0001):
"""Learning rate decay in Keras-style"""
return 1. / (1. + decay * step)
| 14,198
|
def get_swagger():
""" Request handler for the /swagger path.
GET: returns the My Cars API spec as a swagger json doc.
"""
try:
return _make_response(response=validator.get_swagger_spec())
except Exception as e:
return _make_error(500, e.message)
| 14,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.