content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def onRequestLogin(loginName, password, clientType, datas):
"""
KBEngine method.
账号请求登陆时回调
此处还可以对登陆进行排队,将排队信息存放于datas
"""
INFO_MSG('onRequestLogin() loginName=%s, clientType=%s' % (loginName, clientType))
errorno = KBEngine.SERVER_SUCCESS
if len(loginName) > 64:
errorno = KBEngine.SERVER_ERR_NAME
if len(password) > 64:
errorno = KBEngine.SERVER_ERR_PASSWORD
return (errorno, loginName, password, clientType, datas) | 08c428f5ac27e009b9d1ffe1af9daee1c5cca2f1 | 26,200 |
import logging
from typing import Callable
from typing import Any
def log_calls(
logger: logging.Logger, log_result: bool = True
) -> GenericDecorator:
"""
Log calls to the decorated function.
Can also decorate classes to log calls to all its methods.
:param logger: object to log to
"""
def log_function(
target: Callable[..., TargetReturnT], # TargetFunctionT,
*args: Any,
**kwargs: Any
) -> TargetReturnT:
logger.info(f"{target.__name__} args: {args!r} {kwargs!r}")
result = target(*args, **kwargs)
if log_result:
logger.info(f"{target.__name__} result: {result!r}")
return result
decorator = GenericDecorator(log_function)
return decorator | 43e71d224ed45a4f92ddda5f4ac042922b254104 | 26,201 |
def bag(n, c, w, v):
"""
测试数据:
n = 6 物品的数量,
c = 10 书包能承受的重量,
w = [2, 2, 3, 1, 5, 2] 每个物品的重量,
v = [2, 3, 1, 5, 4, 3] 每个物品的价值
"""
# 置零,表示初始状态
value = [[0 for j in range(c + 1)] for i in range(n + 1)]
for i in range(1, n + 1):
for j in range(1, c + 1):
value[i][j] = value[i - 1][j]
# 背包总容量够放当前物体,遍历前一个状态考虑是否置换
if j >= w[i - 1]:
value[i][j] = max(value[i-1][j],value[i - 1][j - w[i - 1]] + v[i - 1])
for x in value:
print(x)
return value | 27dd9c5f9367afe865686c8f68853bc966bcdaa6 | 26,202 |
def _to_bytes(key: type_utils.Key) -> bytes:
"""Convert the key to bytes."""
if isinstance(key, int):
return key.to_bytes(128, byteorder='big') # Use 128 as this match md5
elif isinstance(key, bytes):
return key
elif isinstance(key, str):
return key.encode('utf-8')
else:
raise TypeError(f'Invalid key type: {type(key)}') | 8df22eb02674cb61ef0e8a413f1eed7dc92f0aef | 26,203 |
def _online(machine):
""" Is machine reachable on the network? (ping)
"""
# Skip excluded hosts
if env.host in env.exclude_hosts:
print(red("Excluded"))
return False
with settings(hide('everything'), warn_only=True, skip_bad_hosts=True):
if local("ping -c 2 -W 1 %s" % machine).failed:
print(yellow('%s is Offline \n' % machine))
return False
return True | 114b9b708d81c6891a2f99c5c600d9d4aa59c138 | 26,204 |
import sys
def download_subs_from_public_amara(amara, ytid, lang):
"""Returns tuple subtitles downloaded from Public Amara"""
# Check whether the video is already on Amara
video_url = 'https://www.youtube.com/watch?v=%s' % ytid
amara_response = amara.check_video(video_url)
if amara_response['meta']['total_count'] == 0:
eprint("ERROR: Source video is not on Public Amara! YTID=%s" % ytid)
sys.exit(1)
amara_id_public = amara_response['objects'][0]['id']
amara_title = amara_response['objects'][0]['title']
print("\n######################################\n")
print("Title: %s YTID=%s" % (amara_title, ytid))
# Check whether subtitles for a given language are present,
is_present, sub_version_public = amara.check_language(amara_id_public, lang)
if is_present:
print("Subtitle revision %d (Public Amara)" % sub_version_public)
else:
eprint("ERROR: Amara does not have subtitles in %s language for this \
video!" % lang)
sys.exit(1)
# Download subtitles from Public Amara for a given language
subs = amara.download_subs(amara_id_public, lang, SUB_FORMAT)
return subs, sub_version | 626a5c801aeb83b4169a015c503d3037f8d8bc3b | 26,205 |
import os
def profile_exists_at(path):
"""
Check that a profile exists at the given path.
:param path: path to a profile
:type path: str
:returns: True if profile exists in given path, else False
:rtype: bool
"""
return os.path.isdir(path) and os.path.isfile(
os.path.join(path, PROFILE_MANIFEST_FILENAME)) | ee0e9a8521592d710cdb0a7135348eae99412629 | 26,206 |
def get_expectation_value(
qubit_op: QubitOperator, wavefunction: Wavefunction, reverse_operator: bool = False
) -> complex:
"""Get the expectation value of a qubit operator with respect to a wavefunction.
Args:
qubit_op: the operator
wavefunction: the wavefunction
reverse_operator: whether to reverse order of qubit operator
before computing expectation value. This should be True if the convention
of the basis states used for the wavefunction is the opposite of the one in
the qubit operator. This is the case when the wavefunction uses
Rigetti convention (https://arxiv.org/abs/1711.02086) of ordering qubits.
Returns:
the expectation value
"""
n_qubits = wavefunction.amplitudes.shape[0].bit_length() - 1
# Convert the qubit operator to a sparse matrix. Note that the qubit indices
# must be reversed because OpenFermion and our Wavefunction use
# different conventions for how to order the computational basis states!
if reverse_operator:
qubit_op = reverse_qubit_order(qubit_op, n_qubits=n_qubits)
sparse_op = get_sparse_operator(qubit_op, n_qubits=n_qubits)
# Computer the expectation value
exp_val = openfermion_expectation(sparse_op, wavefunction.amplitudes)
return exp_val | e0e634bc6c0d5c9d5252e29963333fac3ccc3acd | 26,207 |
def create_proxy_to(logger, ip, port):
"""
:see: ``HostNetwork.create_proxy_to``
"""
action = CREATE_PROXY_TO(
logger=logger, target_ip=ip, target_port=port)
with action:
encoded_ip = unicode(ip).encode("ascii")
encoded_port = unicode(port).encode("ascii")
# The first goal is to configure "Destination NAT" (DNAT). We're just
# going to rewrite the destination address of traffic arriving on the
# specified port so it looks like it is destined for the specified ip
# instead of destined for "us". This gets the packets delivered to the
# right destination.
iptables(logger, [
# All NAT stuff happens in the netfilter NAT table.
b"--table", b"nat",
# Destination NAT has to happen "pre"-routing so that the normal
# routing rules on the machine will use the re-written destination
# address and get the packet to that new destination. Accomplish
# this by appending the rule to the PREROUTING chain.
b"--append", b"PREROUTING",
# Only re-route traffic with a destination port matching the one we
# were told to manipulate. It is also necessary to specify TCP (or
# UDP) here since that is the layer of the network stack that
# defines ports.
b"--protocol", b"tcp", b"--destination-port", encoded_port,
# And only re-route traffic directed at this host. Traffic
# originating on this host directed at some random other host that
# happens to be on the same port should be left alone.
b"--match", b"addrtype", b"--dst-type", b"LOCAL",
# Tag it as a flocker-created rule so we can recognize it later.
b"--match", b"comment", b"--comment", FLOCKER_PROXY_COMMENT_MARKER,
# If the filter matched, jump to the DNAT chain to handle doing the
# actual packet mangling. DNAT is a built-in chain that already
# knows how to do this. Pass an argument to the DNAT chain so it
# knows how to mangle the packet - rewrite the destination IP of
# the address to the target we were told to use.
b"--jump", b"DNAT", b"--to-destination", encoded_ip,
])
# Bonus round! Having performed DNAT (changing the destination) during
# prerouting we are now prepared to send the packet on somewhere else.
# On its way out of this system it is also necessary to further
# modify and then track that packet. We want it to look like it
# comes from us (the downstream client will be *very* confused if
# the node we're passing the packet on to replies *directly* to them;
# and by confused I mean it will be totally broken, of course) so we
# also need to "masquerade" in the postrouting chain. This changes
# the source address (ip and port) of the packet to the address of
# the external interface the packet is exiting upon. Doing SNAT here
# would be a little bit more efficient because the kernel could avoid
# looking up the external interface's address for every single packet.
# But it requires this code to know that address and it requires that
# if it ever changes the rule gets updated and it may require some
# steps to do port allocation (not sure what they are yet). So we'll
# just masquerade for now.
iptables(logger, [
# All NAT stuff happens in the netfilter NAT table.
b"--table", b"nat",
# As described above, this transformation happens after routing
# decisions have been made and the packet is on its way out of the
# system. Therefore, append the rule to the POSTROUTING chain.
b"--append", b"POSTROUTING",
# We'll stick to matching the same kinds of packets we matched in
# the earlier stage. We might want to change the factoring of this
# code to avoid the duplication - particularly in case we want to
# change the specifics of the filter.
#
# This omits the LOCAL addrtype check, though, because at this
# point the packet is definitely leaving this host.
b"--protocol", b"tcp", b"--destination-port", encoded_port,
# Do the masquerading.
b"--jump", b"MASQUERADE",
])
# Secret level!! Traffic that originates *on* the host bypasses the
# PREROUTING chain. Instead, it passes through the OUTPUT chain. If
# we want connections from localhost to the forwarded port to be
# affected then we need a rule in the OUTPUT chain to do the same kind
# of DNAT that we did in the PREROUTING chain.
iptables(logger, [
# All NAT stuff happens in the netfilter NAT table.
b"--table", b"nat",
# As mentioned, this rule is for the OUTPUT chain.
b"--append", b"OUTPUT",
# Matching the exact same kinds of packets as the PREROUTING rule
# matches.
b"--protocol", b"tcp",
b"--destination-port", encoded_port,
b"--match", b"addrtype", b"--dst-type", b"LOCAL",
# Do the same DNAT as we did in the rule for the PREROUTING chain.
b"--jump", b"DNAT", b"--to-destination", encoded_ip,
])
iptables(logger, [
b"--table", b"filter",
b"--insert", b"FORWARD",
b"--destination", encoded_ip,
b"--protocol", b"tcp", b"--destination-port", encoded_port,
b"--jump", b"ACCEPT",
])
# The network stack only considers forwarding traffic when certain
# system configuration is in place.
#
# https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt
# will explain the meaning of these in (very slightly) more detail.
conf = FilePath(b"/proc/sys/net/ipv4/conf")
descendant = conf.descendant([b"default", b"forwarding"])
with descendant.open("wb") as forwarding:
forwarding.write(b"1")
# In order to have the OUTPUT chain DNAT rule affect routing decisions,
# we also need to tell the system to make routing decisions about
# traffic from or to localhost.
for path in conf.children():
with path.child(b"route_localnet").open("wb") as route_localnet:
route_localnet.write(b"1")
return Proxy(ip=ip, port=port) | 006ae25e069f3ac82887e9071170b334a0c6ea74 | 26,208 |
def get_output_shape(tensor_shape, channel_axis):
"""
Returns shape vector with the number of channels in the given channel_axis location and 1 at all other locations.
Args:
tensor_shape: A shape vector of a tensor.
channel_axis: Output channel index.
Returns: A shape vector of a tensor.
"""
return [-1 if i is channel_axis else 1 for i in range(len(tensor_shape))] | 7c7058c2da9cb5a4cdb88377ece4c2509727894a | 26,209 |
import numbers
def process_padding(pad_to_length, lenTrials):
"""
Simplified padding interface, for all taper based methods
padding has to be done **before** tapering!
Parameters
----------
pad_to_length : int, None or 'nextpow2'
Either an integer indicating the absolute length of
the trials after padding or `'nextpow2'` to pad all trials
to the nearest power of two. If `None`, no padding is to
be performed
lenTrials : sequence of int_like
Sequence holding all individual trial lengths
Returns
-------
abs_pad : int
Absolute length of all trials after padding
"""
# supported padding options
not_valid = False
if not isinstance(pad_to_length, (numbers.Number, str, type(None))):
not_valid = True
elif isinstance(pad_to_length, str) and pad_to_length not in availablePaddingOpt:
not_valid = True
# bool is an int subclass, have to check for it separately...
if isinstance(pad_to_length, bool):
not_valid = True
if not_valid:
lgl = "`None`, 'nextpow2' or an integer like number"
actual = f"{pad_to_length}"
raise SPYValueError(legal=lgl, varname="pad_to_length", actual=actual)
# zero padding of ALL trials the same way
if isinstance(pad_to_length, numbers.Number):
scalar_parser(pad_to_length,
varname='pad_to_length',
ntype='int_like',
lims=[lenTrials.max(), np.inf])
abs_pad = pad_to_length
# or pad to optimal FFT lengths
elif pad_to_length == 'nextpow2':
abs_pad = _nextpow2(int(lenTrials.max()))
# no padding in case of equal length trials
elif pad_to_length is None:
abs_pad = int(lenTrials.max())
if lenTrials.min() != lenTrials.max():
msg = f"Unequal trial lengths present, padding all trials to {abs_pad} samples"
SPYWarning(msg)
# `abs_pad` is now the (soon to be padded) signal length in samples
return abs_pad | b018e32c2c12a67e05e5e324f4fcd8547a9d992b | 26,210 |
from ..constants import asecperrad
def angular_to_physical_size(angsize,zord,usez=False,**kwargs):
"""
Converts an observed angular size (in arcsec or as an AngularSeparation
object) to a physical size.
:param angsize: Angular size in arcsecond.
:type angsize: float or an :class:`AngularSeparation` object
:param zord: Redshift or distance
:type zord: scalar number
:param usez:
If True, the input will be interpreted as a redshift, and kwargs
will be passed into the distance calculation. The result will be in
pc. Otherwise, `zord` will be interpreted as a distance.
:type usez: boolean
kwargs are passed into :func:`cosmo_z_to_dist` if `usez` is True.
:returns:
A scalar value for the physical size (in pc if redshift is used,
otherwise in `zord` units)
"""
if usez:
d = cosmo_z_to_dist(zord,disttype=2,**kwargs)*1e6 #pc
else:
if len(kwargs)>0:
raise TypeError('if not using redshift, kwargs should not be provided')
d = zord
if hasattr(angsize,'arcsec'):
angsize = angsize.arcsec
sintheta = np.sin(angsize/asecperrad)
return d*(1/sintheta/sintheta-1)**-0.5
#return angsize*d/asecperrad | 80fbd5aa90807536e06157c338d86973d6050c7c | 26,211 |
import sys
from sys import stderr
import getopt
def handle_arguments():
""" verifies the presence of all necessary arguments and returns the data dir """
if len ( sys.argv ) == 1:
stderr( "no arguments provided." )
show_help()
try: # check for the right arguments
keys, values = getopt.getopt( sys.argv[1:], "hf:" )
except getopt.GetoptError:
stderr( "invalid arguments provided." )
show_help()
args = {}
for key, value in keys:
if key == '-f': args['signalpfile'] = value
for key in ['signalpfile']:
if key.endswith("file"):
if not args_file_exists(args, key): show_help()
elif key.endswith("dir"):
if not args_dir_exists(args, key): show_help()
elif not args.has_key(key):
print >> sys.stderr, "missing argument", key
show_help()
return args | 46f7bf9c5778bdc762d8f29c33f708da9ca3b2ba | 26,212 |
def is_tuple(x):
"""
Check that argument is a tuple.
Parameters
----------
x : object
Object to check.
Returns
-------
bool
True if argument is a tuple, False otherwise.
"""
return isinstance(x, tuple) | ac129c4ab7c6a64401f61f1f03ac094b3a7dc6d4 | 26,213 |
def find_verbalizer(tagged_text: str) -> pynini.FstLike:
"""
Given tagged text, e.g. token {name: ""} token {money {fractional: ""}}, creates verbalization lattice
This is context-independent.
Args:
tagged_text: input text
Returns: verbalized lattice
"""
lattice = tagged_text @ verbalizer.fst
return lattice | fef98d7a2038490f926dc89fc7aefbbda52ed611 | 26,214 |
def get_color_pattern(input_word: str, solution: str) -> str:
"""
Given an input word and a solution, generates the resulting
color pattern.
"""
color_pattern = [0 for _ in range(5)]
sub_solution = list(solution)
for index, letter in enumerate(list(input_word)):
if letter == solution[index]:
color_pattern[index] = 2
sub_solution[index] = "_"
for index, letter in enumerate(list(input_word)):
if letter in sub_solution and color_pattern[index] != 2:
color_pattern[index] = 1
sub_solution[sub_solution.index(letter)] = "_"
color_pattern = "".join([str(c) for c in color_pattern])
return color_pattern | a8746a5854067e27e0aefe451c7b950dd9848f50 | 26,215 |
import inspect
def getargspec(obj):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
Modified version of inspect.getargspec from the Python Standard
Library."""
if inspect.isfunction(obj):
func_obj = obj
elif inspect.ismethod(obj):
func_obj = obj.__func__
else:
raise TypeError('arg is not a Python function')
args, varargs, varkw = inspect.getargs(func_obj.__code__)
return args, varargs, varkw, func_obj.__defaults__ | bcfe75de95ccf22bcefdba52c8556f0722dbbcb7 | 26,216 |
def nearest(x, xp, fp=None):
"""
Return the *fp* value corresponding to the *xp* that is nearest to *x*.
If *fp* is missing, return the index of the nearest value.
"""
if len(xp) == 1:
if np.isscalar(x):
return fp[0] if fp is not None else 0
else:
return np.array(len(x)*(fp if fp is not None else [0]))
# if fp is not provided, want to return f as an index into the array xp
# for the target values x, so set it to integer indices. if fp is
# provided, make sure it is an array.
fp = np.arange(len(xp)) if fp is None else np.asarray(fp)
# make sure that the xp array is sorted
xp = np.asarray(xp)
if np.any(np.diff(xp) < 0.):
index = np.argsort(xp)
xp, fp = xp[index], fp[index]
# find the midpoints of xp and use that as the index
xp = 0.5*(xp[:-1] + xp[1:])
return fp[np.searchsorted(xp, x)] | 77ebcb08adbc5b1a70d801c7fe1f4b31f1b14717 | 26,217 |
def generate_warning_message(content: str):
"""
Receives the WARNING message content and colorizes it
"""
return (
colorama.Style.RESET_ALL
+ colorama.Fore.YELLOW
+ f"WARNING: {content}"
+ colorama.Style.RESET_ALL
) | bdb22e6c434db2252fe50a552d889fc5f506931f | 26,218 |
def extract_Heff_params(params, delta):
"""
Processes the VUMPS params into those specific to the
effective Hamiltonian eigensolver.
"""
keys = ["Heff_tol", "Heff_ncv", "Heff_neigs"]
Heff_params = {k: params[k] for k in keys}
if params["adaptive_Heff_tol"]:
Heff_params["Heff_tol"] = Heff_params["Heff_tol"]*delta
return Heff_params | 0153b3f7bdca639c7aab659b60ef0a3c28339935 | 26,219 |
def illumination_correction(beam_size: sc.Variable, sample_size: sc.Variable,
theta: sc.Variable) -> sc.Variable:
"""
Compute the factor by which the intensity should be multiplied to account for the
scattering geometry, where the beam is Gaussian in shape.
:param beam_size: Width of incident beam.
:param sample_size: Width of sample in the dimension of the beam.
:param theta: Incident angle.
"""
beam_on_sample = beam_size / sc.sin(theta)
fwhm_to_std = 2 * np.sqrt(2 * np.log(2))
return sc.erf(sample_size / beam_on_sample * fwhm_to_std) | 59ec50e8e8268677b536638e7d6897be38d4ab43 | 26,220 |
def get_subscription_query_list_xml(xml_file_path):
"""
Extract the embedded windows event XML QueryList elements out of a an XML subscription setting file
"""
with open(xml_file_path, 'rb') as f_xml:
x_subscription = lxml.etree.parse(f_xml)
# XPath selection requires namespace to used!
x_subscription_query = x_subscription.xpath('/ns:Subscription/ns:Query', namespaces={'ns': 'http://schemas.microsoft.com/2006/03/windows/events/subscription'})
assert len(x_subscription_query) == 1, f"Unexpected number of elements matched by XPath query '/ns:Subscription/ns:Query' for file {xml_file_path}."
s_x_query = xml_element_get_all_text(x_subscription_query[0])
return s_x_query | 9c76a525a691ea91804304efc5e2e0aeab7d87ac | 26,221 |
async def create_state_store() -> StateStore:
"""Create a ready-to-use StateStore instance."""
deck_data = DeckDataProvider()
# TODO(mc, 2020-11-18): check short trash FF
deck_definition = await deck_data.get_deck_definition()
deck_fixed_labware = await deck_data.get_deck_fixed_labware(deck_definition)
return StateStore(
deck_definition=deck_definition,
deck_fixed_labware=deck_fixed_labware,
) | 5e0835db894fb3aca6c5321a782b5eacea5681a8 | 26,222 |
from typing import Optional
from typing import Mapping
def get_stream(name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStreamResult:
"""
Use this data source to get information about a Kinesis Stream for use in other
resources.
For more details, see the [Amazon Kinesis Documentation](https://aws.amazon.com/documentation/kinesis/).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
stream = aws.kinesis.get_stream(name="stream-name")
```
:param str name: The name of the Kinesis Stream.
:param Mapping[str, str] tags: A map of tags to assigned to the stream.
"""
__args__ = dict()
__args__['name'] = name
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:kinesis/getStream:getStream', __args__, opts=opts, typ=GetStreamResult).value
return AwaitableGetStreamResult(
arn=__ret__.arn,
closed_shards=__ret__.closed_shards,
creation_timestamp=__ret__.creation_timestamp,
id=__ret__.id,
name=__ret__.name,
open_shards=__ret__.open_shards,
retention_period=__ret__.retention_period,
shard_level_metrics=__ret__.shard_level_metrics,
status=__ret__.status,
tags=__ret__.tags) | ec12fe08019e6d41ec8694d620efdab1e8d7683f | 26,223 |
def get_vlan_config_commands(vlan, vid):
"""Build command list required for VLAN configuration
"""
reverse_value_map = {
"admin_state": {
"down": "shutdown",
"up": "no shutdown"
}
}
if vlan.get('admin_state'):
# do we need to apply the value map?
# only if we are making a change to the admin state
# would need to be a loop or more in depth check if
# value map has more than 1 key
vlan = apply_value_map(reverse_value_map, vlan)
VLAN_ARGS = {
'name': 'name {name}',
'vlan_state': 'state {vlan_state}',
'admin_state': '{admin_state}',
'mode': 'mode {mode}'
}
commands = []
for param, value in vlan.iteritems():
command = VLAN_ARGS.get(param).format(**vlan)
if command:
commands.append(command)
commands.insert(0, 'vlan ' + vid)
commands.append('exit')
return commands | 23be6435f008eb9e043e2dcb5bfdc1e1d72f6778 | 26,224 |
def kdf():
"""
Returns a dataframe with a few values for generic testing
"""
return ks.DataFrame(
{
"x": [36.12, 47.32, 56.78, None],
"y": [28.21, 87.12, 90.01, None],
"names": ["geography", "place", "location", "geospatial"],
}
) | ce0500c5e028c6480a2e500d2f0a70063e6ab13d | 26,225 |
from pathlib import Path
def build_name(prefix: str, source_ind: int, name: Path) -> str:
"""Build a package name from the index and path."""
if name.name.casefold() == '__init__.py':
name = name.parent
name = name.with_suffix('')
dotted = str(name).replace('\\', '.').replace('/', '.')
return f'{prefix}_{source_ind:02x}.{dotted}' | d777b1b875ff6ab6f3228538a9e4c1cfa3c398a0 | 26,226 |
def step(x, mu, sigma, bkg, a):
"""
A step function template
Can be used as a component of other fit functions
"""
step_f = bkg + a * erfc((x-mu)/(np.sqrt(2)*sigma))
return step_f | af83a9773ee0907a2dadd065870025fcc2d41441 | 26,227 |
import json
def json_line_to_track(line):
"""Converts a json line to an appropriate Track object
"""
track_dct = json.loads(line)
# Clean up word count dictionary
wc_dct = {}
for word_id, word_count in track_dct['wordcount'].iteritems():
wc_dct[int(word_id)] = int(word_count)
track = Track(track_dct['id'], track_dct['genres'], wc_dct)
return track | e3b63aec0b1dce0fdf9d5c47f235496ae61fc18e | 26,228 |
def update_planting(
key, secret, planting_info,
):
"""Updates a specified planting.
API reference: https://docs.awhere.com/knowledge-base-docs/field-plantings/
Parameters
----------
key : str
API key for a valid aWhere API application.
secret : str
API secret for a valid aWhere API application.
planting_info : dict
Dictionary containing the information to update an aWhere planting.
Can contain the following keys:
field_id: str
Field ID for an existing aWhere field.
planting_id: int
Planting ID for an existing planting.
update_type : str
The type of update. Options are 'full' or 'partial'. Full
update replaces every parameter in the planting. Partial
update replaces only the provided parameters in the planting.
crop: str
ID for the crop that will be planted. Must be a valid aWhere
crop ID.
planting_date: str, required
Planting date for the crop, formatted as 'YYYY-MM-DD'.
projected_yield_amount: str or int or float, optional
Projected yield amount for the crop. If used, the
projected_yield_units parameter must also be set.
projected_yield_units: str, optional
Units for the projected crop yield. If used, the
projected_yield_amount parameter must also be set.
projected_harvest_date: str, optional
Projected harvest date for the crop, formatted as
'YYYY-MM-DD'.
yield_amount: str or int or float, optional
Actual yield amount for the crop. If used, the
yield_units parameter must also be set.
yield_units: str, optional
Units for the actual crop yield. If used, the
yield_amount parameter must also be set.
harvest_date: str, optional
Actual harvest date for the crop, formatted as 'YYYY-MM-DD'.
Example
-------
>>> # Imports
>>> import os
>>> import awherepy as aw
>>> import ahwerepy.plantings as awp
>>> # Get aWhere API key and secret
>>> awhere_api_key = os.environ.get('AWHERE_API_KEY')
>>> awhere_api_secret = os.environ.get('AWHERE_API_SECRET')
>>> # Define planting update information
>>> planting_update_info = {
... 'field_id': 'VT-Manchester',
... 'planting_id': 475994,
... 'update_type': 'partial'
... 'crop': 'sugarbeet-generic',
... 'planting_date': '2020-06-05',
... 'projections_yield_amount': 50,
... 'projections_yield_units': 'large boxes',
... 'projected_harvest_date': '2020-08-08',
... 'yield_amount': 200,
... 'yield_units': 'large boxes',
... 'harvest_date': '2020-07-31'
... }
>>> # Update planting
>>> awp.update_planting(
... awhere_api_key,
... awhere_api_secret,
... planting_info=planting_update_info
... )
Attempting to update planting...
Updated planting: sugarbeet-generic in VT-Manchester
"""
# Check if credentials are valid
if aw.valid_credentials(key, secret):
# Check planting_info object type
if not isinstance(planting_info, dict):
raise TypeError(
"Invalid type: 'planting_info' must be of type dictionary."
)
# Raise error if field does not exist
if (
planting_info.get("field_id")
not in awf.get_fields(key, secret).index
):
raise KeyError("Field does not exist within account.")
# Raise error if planting does not exist
if planting_info.get("planting_id") != "current":
if (
planting_info.get("planting_id")
not in get_plantings(key, secret).index
):
raise KeyError("Planting does not exist within account.")
# Define api url
api_url = (
"https://api.awhere.com/v2/agronomics/fields/"
f"{planting_info.get('field_id')}/plantings/"
f"{planting_info.get('planting_id')}"
)
# Get OAuth token
auth_token = aw.get_oauth_token(key, secret)
# Set up the HTTP request headers
auth_headers = {
"Authorization": f"Bearer {auth_token}",
"Content-Type": "application/json",
}
# Full update
if planting_info.get("update_type").lower() == "full":
# Define request body
field_body = {
"crop": planting_info.get("crop"),
"plantingDate": planting_info.get("planting_date"),
"projections": {
"yield": {
"amount": planting_info.get("projected_yield_amount"),
"units": planting_info.get("projected_yield_units"),
},
"harvestDate": planting_info.get("projected_harvest_date"),
},
"yield": {
"amount": planting_info.get("yield_amount"),
"units": planting_info.get("yield_units"),
},
"harvestDate": planting_info.get("harvest_date"),
}
# Perform HTTP request to update planting information
print("Attempting to update planting...")
response = rq.put(
f"{api_url}", headers=auth_headers, json=field_body,
)
# Partial update
elif planting_info.get("update_type").lower() == "partial":
# Define field body
field_body = [
{"op": "replace", "path": f"/{key}", "value": f"{value}"}
for key, value in planting_info.items()
]
# Perform HTTP request to update planting information
print("Attempting to update planting...")
response = rq.patch(
f"{api_url}", headers=auth_headers, json=field_body,
)
# Invalid update type
else:
raise ValueError(
"Invalid update type. Must be 'full' or 'partial'."
)
# Check if request succeeded
if response.ok:
# Get planting for output
planting = get_plantings(
key,
secret,
kwargs={
"field_id": planting_info.get("field_id"),
"planting_id": planting_info.get("planting_id"),
},
)
# Indicate success
print(
(
f"Updated planting: {planting_info.get('planting_id')}"
f"in {planting_info.get('field_id')}"
)
)
else:
# Indicate error
planting = "Failed to update planting."
print(planting)
# Invalid credentials
else:
# Raise error
raise ValueError("Invalid aWhere API credentials.")
# Return planting
return planting | 02d5509513e484e35a580a0127b95615e30580fb | 26,229 |
def check_for_multiple(files):
""" Return list of files that looks like a multi-part post """
for regex in _RE_MULTIPLE:
matched_files = check_for_sequence(regex, files)
if matched_files:
return matched_files
return "" | 8e9985ef1ef5c85734c576704b7d9e3d690306a0 | 26,230 |
def get_density2D(f,data,steps=100):
""" Calcule la densité en chaque case d'une grille steps x steps dont les bornes sont calculées à partir du min/max de data. Renvoie la grille estimée et la discrétisation sur chaque axe.
"""
xmin, xmax = data[:,0].min(), data[:,0].max()
ymin, ymax = data[:,1].min(), data[:,1].max()
xlin,ylin = np.linspace(xmin,xmax,steps),np.linspace(ymin,ymax,steps)
xx, yy = np.meshgrid(xlin,ylin)
grid = np.c_[xx.ravel(), yy.ravel()]
res = f.predict(grid).reshape(steps, steps)
return res, xlin, ylin | 441a2a440d7d1a095d5719e07da2289059057279 | 26,231 |
def draw_cards_command(instance, player, arguments):
""" Draw cards from the deck and put them into the calling player's hand.
Args:
instance: The GameInstance database model for this operation.
player: The email address of the player requesting the action.
arguments: A list of arguments to this command as explained below.
The arguments list for this command consists of two items in order:
1: cards_to_draw - The number of cards to attempt to draw.
2: ignore_empty_deck - A boolean controlling whether to ignore an
empty deck or not. If it is true, cards can be drawn until the
deck runs out and then this command will return
successfully. If it is false, an error will occur if the deck
runs out of cards and no changes will be made to the hand of
the player.
Returns:
The hand of the player after drawing the new cards.
Raises:
An IndexError if the deck runs out of cards and empty deck errors
are not being ignored.
ValueError if the requesting player is not in the instance.
"""
cards_to_draw = int(arguments[0])
ignore_empty_deck = get_boolean(arguments[1])
return draw_cards(instance, player, cards_to_draw, ignore_empty_deck) | ad9ef6240b5d9ec8ab2f1331b737a89539ade673 | 26,232 |
def recipe_content(*,
projects_base,
project,
recipe):
"""
Returns the content of a recipe in a project, based on the
projects base path, and the project and recipe name.
Kwargs:
projects_base (str): base path for all projects, e.g.
'/path/to/newsltd_etl/projects'.
project (str): Project name, as defined in Project('projfoo_name')
recipe (str): Recipe name e.g. "compute_contracts",
as part of recipe file name "compute_contracts.py"
Returns:
True or False
"""
rpath = recipe_path(projects_base=projects_base,
project=project,
recipe=recipe)
return open(rpath, "r").read() | 0b6ccc19202927b49383ae5f991f5e66ece89b6a | 26,233 |
from typing import Union
def precision_score(
y_true: str,
y_score: str,
input_relation: Union[str, vDataFrame],
cursor=None,
pos_label: (int, float, str) = 1,
):
"""
---------------------------------------------------------------------------
Computes the Precision Score.
Parameters
----------
y_true: str
Response column.
y_score: str
Prediction.
input_relation: str/vDataFrame
Relation to use to do the scoring. The relation can be a view or a table
or even a customized relation. For example, you could write:
"(SELECT ... FROM ...) x" as long as an alias is given at the end of the
relation.
cursor: DBcursor, optional
Vertica database cursor.
pos_label: int/float/str, optional
To compute the Precision Score, one of the response column classes must be
the positive one. The parameter 'pos_label' represents this class.
Returns
-------
float
score
"""
check_types(
[
("y_true", y_true, [str],),
("y_score", y_score, [str],),
("input_relation", input_relation, [str, vDataFrame],),
]
)
cursor, conn, input_relation = check_cursor(cursor, input_relation)
matrix = confusion_matrix(y_true, y_score, input_relation, cursor, pos_label)
if conn:
conn.close()
non_pos_label = 0 if (pos_label == 1) else "Non-{}".format(pos_label)
tn, fn, fp, tp = (
matrix.values[non_pos_label][0],
matrix.values[non_pos_label][1],
matrix.values[pos_label][0],
matrix.values[pos_label][1],
)
precision = tp / (tp + fp) if (tp + fp != 0) else 0
return precision | 6b849504719d6f85402656d70dba6609e4822f49 | 26,234 |
import click
def new_deployment(name):
"""
Creates a new deployment in the /deployments directory.
Usage:
`drone-deploy new NAME`
Where NAME == a friendly name for your deployment.
Example:
`drone-deploy new drone.yourdomain.com`
"""
# create the deployment/$name directory
deployment_dir = create_deployment_dir_if_not_exists(name)
if not deployment_dir:
click.echo("Deployment with that name already exists.")
return False
# copy our configs and generate the config.yaml file
copy_terraform_templates_to(deployment_dir)
copy_packer_templates_to(deployment_dir)
copy_build_script_to(deployment_dir)
generate_config_yaml(deployment_dir)
click.echo(f"Deployment created: {deployment_dir}")
click.echo("Next steps:")
click.echo(f" - edit the config.yaml file ('drone-deploy edit {name}')")
click.echo(f" - run 'drone-deploy prepare {name}' to bootstrap the deployment.")
click.echo(f" - run 'drone-deploy build-ami {name} to build the drone-server ami.")
click.echo(f" - run 'drone-deploy plan|apply {name} to deploy.") | ab1ae5a8e8d397c493fbd330eacf3986b2f7c448 | 26,235 |
def GenerateConfig(context):
"""Returns a list of configs and waiters for this deployment.
The configs and waiters define a series of phases that the deployment will
go through. This is a way to "pause" the deployment while some process on
the VMs happens, checks for success, then goes to the next phase.
The configs here define the phases, and the waiters "wait" for the phases
to be complete.
The phases are:
CREATE_DOMAIN: the Windows Active Directory node installs and sets up the
Active Directory.
JOIN_DOMAIN: all nodes join the domain set up by the Active Directory node.
CREATE_CLUSTER: creates the failover cluster, enables S2D
INSTALL_FCI: Installs SQL FCI on all non-master nodes.
Args:
context: the context of the deployment. This is a class that will have
"properties" and "env" dicts containing parameters for the
deployment.
Returns:
A list of dicts, which are the definitions of configs and waiters.
"""
num_cluster_nodes = context.properties["num_cluster_nodes"]
deployment = context.env["deployment"]
create_domain_config_name = utils.ConfigName(
deployment, utils.CREATE_DOMAIN_URL_ENDPOINT)
create_domain_waiter_name = utils.WaiterName(
deployment, utils.CREATE_DOMAIN_URL_ENDPOINT)
join_domain_config_name = utils.ConfigName(
deployment, utils.JOIN_DOMAIN_URL_ENDPOINT)
join_domain_waiter_name = utils.WaiterName(
deployment, utils.JOIN_DOMAIN_URL_ENDPOINT)
# This is the list of resources that will be returned to the deployment
# manager so that the deployment manager can create them. Every Item in this
# list will have a dependency on the item before it so that they are created
# in order.
cluster_config_name = utils.ConfigName(
deployment, utils.CREATE_CLUSTER_URL_ENDPOINT)
cluster_waiter_name = utils.WaiterName(
deployment, utils.CREATE_CLUSTER_URL_ENDPOINT)
fci_config_name = utils.ConfigName(
deployment, utils.INSTALL_FCI_URL_ENDPOINT)
fci_waiter_name = utils.WaiterName(
deployment, utils.INSTALL_FCI_URL_ENDPOINT)
resources = [
CreateConfigDefinition(create_domain_config_name),
CreateWaiterDefinition(
create_domain_waiter_name,
create_domain_config_name,
1,
deps=[create_domain_config_name]),
CreateConfigDefinition(
join_domain_config_name,
deps=[create_domain_waiter_name]),
CreateWaiterDefinition(
join_domain_waiter_name,
join_domain_config_name,
num_cluster_nodes,
deps=[join_domain_config_name]),
CreateConfigDefinition(
cluster_config_name,
deps=[join_domain_waiter_name]),
CreateWaiterDefinition(
cluster_waiter_name,
cluster_config_name,
1,
deps=[cluster_config_name]),
CreateConfigDefinition(
fci_config_name,
deps=[cluster_waiter_name]),
CreateWaiterDefinition(
fci_waiter_name,
fci_config_name,
# -1 to account for the fact that the master already set up
# FCI by this point.
(num_cluster_nodes - 1),
deps=[fci_config_name])
]
return {
"resources": resources,
} | 19a8601b876cd3c6793ba6263adc5e30f39bb25f | 26,236 |
import re
def _format_param_value(value_repr):
"""
Format a parameter value for displaying it as test output. The
values are string obtained via Python repr.
"""
regexs = ["^'(.+)'$",
"^u'(.+)'$",
"^<class '(.+)'>$"]
for regex in regexs:
m = re.match(regex, value_repr)
if m and m.group(1).strip():
return m.group(1)
return value_repr | 87d881a3159c18f56dd2bb1c5556f3e70d27c1bc | 26,237 |
import logging
def redis_get_keys(duthost, db_id, pattern):
"""
Get all keys for a given pattern in given redis database
:param duthost: DUT host object
:param db_id: ID of redis database
:param pattern: Redis key pattern
:return: A list of key name in string
"""
cmd = 'sonic-db-cli {} KEYS \"{}\"'.format(db_id, pattern)
logging.debug('Getting keys from redis by command: {}'.format(cmd))
output = duthost.shell(cmd)
content = output['stdout'].strip()
return content.split('\n') if content else None | 4ac06e03048d59ffb28b7950e4bdca71bd6532a5 | 26,238 |
def data_process(X, labels, window_size, missing):
"""
Takes in 2 numpy arrays:
- X is of shape (N, chm_len)
- labels is of shape (N, chm_len)
And returns 2 processed numpy arrays:
- X is of shape (N, chm_len)
- labels is of shape (N, chm_len//window_size)
"""
# Reshape labels into windows
labels = window_reshape(labels, window_size)
# simulates lacking of input
if missing != 0:
print("Simulating missing values...")
X = simulate_missing_values(X, missing)
return X, labels | bc4e1b89e68cbf069a986a40e668311d04e2d158 | 26,239 |
def check_response_status(curr_response, full_time):
"""Check response status and return code of it.
This function also handles printing console log info
Args:
curr_response (str): Response from VK API URL
full_time (str): Full format fime for console log
Returns:
int: Status from response data
"""
if R_ONE in curr_response:
print(f'[{full_time}] Status has been set successfully! '
f'Waiting 1 hour to update!')
return 1
if R_FIVE in curr_response:
print('[Error] Access token has been expired or it is wrong. '
'Please change it and try again!')
return 5
if R_EIGHT in curr_response:
print('[Error] Got deprecated API version or no API version. '
'Please add/change API version and try again!')
return 8
if R_TWENTY_NINE in curr_response:
print("[Error] Rate limit!")
print('We are sorry, we can\'t do nothing about this. '
'All you need is patience. '
'Please wait before initilizing script again!')
return 29
print('[Error] Unknown error! '
'Here are response string:')
print(curr_response)
print('We hope this can help you out to fix this problem!')
return 0 | e1a53cc8ae594dd333e6317840127b0bb6e3c005 | 26,240 |
def height_to_amplitude(height, sigma):
"""
Convert height of a 1D Gaussian to the amplitude
"""
return height * sigma * np.sqrt(2 * np.pi) | 18712e2a308e87e8c2ceb745d7f8c4ebccc7e28d | 26,241 |
def get_param_value(val, unit, file, line, units=True):
"""
Grab the parameter value from a line in the log file.
Returns an int, float (with units), bool, None (if no value
was provided) or a string (if processing failed).
"""
# Remove spaces
val = val.lstrip().rstrip()
# Is there a value?
if val == "":
return None
# Check if int, float, or bool
int_chars = "-0123456789"
float_chars = int_chars + ".+e"
if all([c in int_chars for c in val]):
try:
val = int(val)
except ValueError:
logger.error(
"Error processing line {} of {}: ".format(line, file)
+ "Cannot interpret value as integer."
)
# Return unprocessed string
return val
elif all([c in float_chars for c in val]):
if units:
try:
val = Quantity(float(val) * unit)
except ValueError:
logger.error(
"Error processing line {} of {}: ".format(line, file)
+ "Cannot interpret value as float."
)
# Return unprocessed string
return val
else:
val = float(val)
elif (val.lower() == "true") or (val.lower() == "yes"):
val = True
elif (val.lower() == "false") or (val.lower() == "no"):
val = False
elif val.lower() == "inf":
val = np.inf
elif val.lower() == "-inf":
val = -np.inf
elif val.lower() == "nan":
val = np.nan
return val | 05dba3f7b3f138b4481c03cd711bee39c83bb0c0 | 26,242 |
def exact_auc(errors, thresholds):
"""
Calculate the exact area under curve, borrow from https://github.com/magicleap/SuperGluePretrainedNetwork
"""
sort_idx = np.argsort(errors)
errors = np.array(errors.copy())[sort_idx]
recall = (np.arange(len(errors)) + 1) / len(errors)
errors = np.r_[0., errors]
recall = np.r_[0., recall]
aucs = []
for t in thresholds:
last_index = np.searchsorted(errors, t)
r = np.r_[recall[:last_index], recall[last_index - 1]]
e = np.r_[errors[:last_index], t]
aucs.append(np.trapz(r, x=e) / t)
return aucs | 792652ab45096c76d448980ab527bc4d5c2a5440 | 26,243 |
def avalanche_basic_stats(spike_matrix):
"""
Example
-------
total_avalanches, avalanche_sizes, avalanche_durations = avalanche_stats(spike_matrix)
"""
total_spikes = spike_matrix.sum(axis=0)
total_spikes = np.hstack(([0], total_spikes)) # Assure it starts with no spike
avalanche = pd.Series(total_spikes >= 1)
event = (avalanche.astype(int).diff().fillna(0) != 0)
event_indexes = event.to_numpy().nonzero()[0]
if len(event_indexes) % 2 == 0:
avalanche_periods = event_indexes.reshape(-1, 2)
else:
avalanche_periods = event_indexes[:-1].reshape(-1, 2)
avalanche_durations = avalanche_periods[:, 1] - avalanche_periods[:, 0]
avalanche_sizes = np.array([total_spikes[avalanche_periods[i][0]: avalanche_periods[i][1]].sum()
for i in range(len(avalanche_periods))])
total_avalanches = len(avalanche_sizes)
return total_avalanches, avalanche_sizes, avalanche_durations | c053ad281b6dddd5f144c9717e7ffd352e4a6f94 | 26,244 |
from utils import indices_to_subscripts
def parse_jax_dot_general(parameters, innodes):
"""
Parse the JAX dot_general function.
Parameters
----------
parameters: A dict containing the parameters of the dot_general function.
parameters['dimension_numbers'] is a tuple of tuples of the form
((lhs_contracting_dims, rhs_contracting_dims), (lhs_batch_dims, rhs_batch_dims)).
innodes: The input nodes for the generated einsum node.
Returns
-------
An einsum node equivalent to the dot_general function.
jax dot_general reference:
https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.dot_general.html?highlight=dot_general#jax.lax.dot_general
Note: the dot_general is a bit different from tensordot because it has specific batch dimensions
"""
assert len(innodes) == 2
node_A, node_B = innodes
dim_numbers = parameters['dimension_numbers']
contract_dims, batch_dims = dim_numbers
A_contract_dims, B_contract_dims = contract_dims
A_batch_dims, B_batch_dims = batch_dims
A_noncontract_dims = tuple(
sorted(
set(range(len(innodes[0].shape))) - set(A_batch_dims) -
set(A_contract_dims)))
B_noncontract_dims = tuple(
sorted(
set(range(len(innodes[1].shape))) - set(B_batch_dims) -
set(B_contract_dims)))
assert len(A_contract_dims) == len(B_contract_dims)
assert len(A_batch_dims) == len(B_batch_dims)
dim = len(A_noncontract_dims) + len(B_noncontract_dims) + len(
A_contract_dims) + len(A_batch_dims)
input_indices_A = list(range(len(node_A.shape)))
index_acc = len(node_A.shape)
input_indices_B = [0] * len(node_B.shape)
for i in range(len(node_B.shape)):
if i in B_noncontract_dims:
input_indices_B[i] = index_acc
index_acc += 1
for i in range(len(B_contract_dims)):
input_indices_B[B_contract_dims[i]] = input_indices_A[
A_contract_dims[i]]
for i in range(len(B_batch_dims)):
# Note: this part is not tested currently. Einsum is needed for this to be activated
input_indices_B[B_batch_dims[i]] = input_indices_A[A_batch_dims[i]]
assert index_acc == dim
out_indices = [
v for (i, v) in enumerate(input_indices_A) if i in A_batch_dims
]
out_indices += [
v for (i, v) in enumerate(input_indices_A) if i in A_noncontract_dims
]
out_indices += [
v for (i, v) in enumerate(input_indices_B) if i in B_noncontract_dims
]
subscripts = indices_to_subscripts([input_indices_A, input_indices_B],
out_indices, dim)
return ad.einsum(subscripts, node_A, node_B) | d73b3f737411990b3c37d06d4ef3d7b89b74b795 | 26,245 |
from typing import Dict
from typing import Any
def get_stratum_alert_data(threshold: float, notification_channel: int) -> Dict[str, Any]:
""" Gets alert config for when stratum goes below given threshold
:param threshold: Below this value, grafana should send an alert
:type description: float
:param notification_channel: Id of the notification channel the alert should be sent to
:type notification_channel: int
:return: Data of the alert
:rtype: Dict
"""
return {
"conditions": [
{
"evaluator": {
"params": [threshold],
"type": "lt"
},
"operator": {"type": "and"},
"query": {
"params": ["A", "5m", "now"]
},
"reducer": {
"params": [],
"type": "avg"
},
"type": "query"
}
],
"executionErrorState": "alerting",
"frequency": "60s",
"handler": 1,
"name": "Estimated hash rate on stratum alert",
"noDataState": "alerting",
"notifications": [{"id": notification_channel}]
} | bae4aafe18286eeb5b0f59ad87804a19936ec182 | 26,246 |
def opponent1(im, display=False):
""" Generate Opponent color space. O3 is just the intensity """
im = img.norm(im)
B, G, R = np.dsplit(im, 3)
O1 = (R - G) / np.sqrt(2)
O2 = (R + G - 2 * B) / np.sqrt(6)
O3 = (R + G + B) / np.sqrt(3)
out = cv2.merge((np.uint8(img.normUnity(O1) * 255),
np.uint8(img.normUnity(O2) * 255),
np.uint8(img.normUnity(O3) * 255)))
if display:
cv2.imshow('op1', np.hstack((np.uint8(img.normUnity(O1) * 255),
np.uint8(img.normUnity(O2) * 255),
np.uint8(img.normUnity(O3) * 255))))
cv2.waitKey(0)
return out, O1, O2, O3 | 18ff83f9cc192892c12c020a1d781cabff497544 | 26,247 |
import random
def shuffle_multiset_data(rv, rv_err):
"""
"""
#shuffle RV's and their errors
comb_rv_err = zip(rv, rv_err)
random.shuffle(comb_rv_err)
comb_rv_err_ = [random.choice(comb_rv_err) for _ in range(len(comb_rv_err))]
rv_sh, rv_err_sh = zip(*comb_rv_err_)
return np.array(rv_sh), np.array(rv_err_sh) | 3075cb3cd414122b93f4ceed8d82fdd6955a7051 | 26,248 |
import time
import re
def parse_pubdate(text):
"""Parse a date string into a Unix timestamp
>>> parse_pubdate('Fri, 21 Nov 1997 09:55:06 -0600')
880127706
>>> parse_pubdate('2003-12-13T00:00:00+02:00')
1071266400
>>> parse_pubdate('2003-12-13T18:30:02Z')
1071340202
>>> parse_pubdate('Mon, 02 May 1960 09:05:01 +0100')
-305049299
>>> parse_pubdate('')
0
>>> parse_pubdate('unknown')
0
"""
if not text:
return 0
parsed = parsedate_tz(text)
if parsed is not None:
try:
pubtimeseconds = int(mktime_tz(parsed))
return pubtimeseconds
except(OverflowError,ValueError):
logger.warning('bad pubdate %s is before epoch or after end of time (2038)',parsed)
return 0
try:
parsed = time.strptime(text[:19], '%Y-%m-%dT%H:%M:%S')
if parsed is not None:
m = re.match(r'^(?:Z|([+-])([0-9]{2})[:]([0-9]{2}))$', text[19:])
if m:
parsed = list(iter(parsed))
if m.group(1):
offset = 3600 * int(m.group(2)) + 60 * int(m.group(3))
if m.group(1) == '-':
offset = 0 - offset
else:
offset = 0
parsed.append(offset)
return int(mktime_tz(tuple(parsed)))
else:
return int(time.mktime(parsed))
except Exception:
pass
logger.error('Cannot parse date: %s', repr(text))
return 0 | 74bc178cc4ea8fc9c7d1178b5b58cb663f1673b6 | 26,249 |
from typing import List
def find_ngram(x: np.ndarray, occurence: int= 2) -> List[int]: # add token
"""
Build pairwise of size occurence of conssecutive value.
:param x:
:type x: ndarray
:param occurence: number of succesive occurence
:type occurence: int
:return:
:rtype:
Example:
--------
>>> a = np.([1, 2, 5, 7, 8, 10])
>>> find_ngram(a)
[[1, 2], [5, 7], [7, 8]]
"""
assert occurence != 0
save_group_index= []
for k, g in groupby(enumerate(x), lambda x: x[0] - x[1]):
index_ngram= list(map(itemgetter(1), g))
num_occurence= len(index_ngram)
if num_occurence < occurence:
continue
elif num_occurence == occurence:
save_group_index.append(index_ngram)
elif (num_occurence % occurence == 0) & (num_occurence > occurence):
generator_ngram_index= iter(index_ngram)
group_of_ngram= [[next(generator_ngram_index) for j in range(occurence)]
for i in range(int(num_occurence/occurence))]
save_group_index += group_of_ngram
elif (num_occurence % occurence != 0) & (num_occurence > occurence):
group_of_ngram= [[index_ngram[i+j] for j in range(occurence)]
for i in range(num_occurence - occurence + 1)]
save_group_index += group_of_ngram
return save_group_index | 0e2d5287cd145c558360a7e4a647dd49b5308b78 | 26,250 |
import sys
def list2ids(listf, verbose=False):
""" Read a list of ids, one per line
:param listf: file of ids
:param verbose: more output
:return: a set of IDS
"""
if verbose:
sys.stderr.write(f"{bcolors.GREEN} Reading IDs from text file: {listf}{bcolors.ENDC}")
i=set()
with open(listf, 'r') as f:
for l in f:
i.add(l.strip().split(" ")[0])
return i | 672b66c5a9e0e47e13eaff8e059642c9950a1d9e | 26,251 |
def ewma(current, previous, weight):
"""Exponentially weighted moving average: z = w*z + (1-w)*z_1"""
return weight * current + ((1.0 - weight) * previous) | 5a678b51618ebd445db0864d9fa72f63904e0e94 | 26,252 |
def srs_double(f):
"""
Creates a function prototype for the OSR routines that take
the OSRSpatialReference object and
"""
return double_output(f, [c_void_p, POINTER(c_int)], errcheck=True) | 6b17a0fa068779c32fc7f8dc8ec45da34dc01b62 | 26,253 |
def atmost_one(*args):
"""Asserts one of the arguments is not None
Returns:
result(bool): True if exactly one of the arguments is not None
"""
return sum([1 for a in args if a is not None]) <= 1 | 448469e2e0d7cb7c00981f899ef09138e8fa17fc | 26,254 |
def _get_default_group():
"""
Getting the default process group created by :func:`init_process_group`.
"""
if not is_initialized():
raise RuntimeError(
"Default process group has not been initialized, "
"please make sure to call init_process_group."
)
return _default_pg | 0f9c396eb33abf9441f6cd6ba4c3911efe888efb | 26,255 |
def string_parameter(name, in_, description=None):
"""
Define a string parameter.
Location must still be specified.
"""
return parameter(name, in_, str, description=description) | a149a7ad491d3a077909b10ce72187cd7b8aafcb | 26,256 |
def greater_version(versionA, versionB):
"""
Summary:
Compares to version strings with multiple digits and returns greater
Returns:
greater, TYPE: str
"""
try:
list_a = versionA.split('.')
list_b = versionB.split('.')
except AttributeError:
return versionA or versionB # either A or B is None
try:
for index, digit in enumerate(list_a):
if int(digit) > int(list_b[index]):
return versionA
elif int(digit) < int(list_b[index]):
return versionB
elif int(digit) == int(list_b[index]):
continue
except ValueError:
return versionA or versionB # either A or B is ''
return versionA | 6c4a947dbc22cd26e96f9d3c8d43e7e3f57239be | 26,257 |
from bs4 import BeautifulSoup
def get_post_mapping(content):
"""This function extracts blog post title and url from response object
Args:
content (request.content): String content returned from requests.get
Returns:
list: a list of dictionaries with keys title and url
"""
post_detail_list = []
post_soup = BeautifulSoup(content,"lxml")
h3_content = post_soup.find_all("h3")
for h3 in h3_content:
post_detail_list.append(
{'title':h3.a.get_text(),'url':h3.a.attrs.get('href')}
)
return post_detail_list | 0b3b31e9d8c5cf0a3950dc33cb2b8958a12f47d4 | 26,258 |
import os
def get_output_foldername(start_year: int, end_year: int) -> str:
""" Create an output folder and return its name """
folder_name = "app/data/{}-{}".format(start_year, end_year)
if not os.path.exists(folder_name):
os.mkdir(folder_name)
return folder_name | af098e90b858ca35c834e04af2cc293167b722d5 | 26,259 |
import textwrap
def _dict_attribute_help(object_name, path, parent_object_names, attribute):
"""
Get general help information or information on a specific attribute.
See get_help().
:param (str|unicode) attribute: The attribute we'll get info for.
"""
help_dict = {
'object_name': object_name,
'path_string': '[' + ']['.join(repr(k) for k in path) + ']',
'parent_object_names': parent_object_names,
'attribute': attribute
}
valid_attributes = graph_reference.get_valid_attributes(
object_name, parent_object_names
)
help_string = (
"Current path: {path_string}\n"
"Current parent object_names: {parent_object_names}\n\n")
if attribute not in valid_attributes:
help_string += "'{attribute}' is not allowed here.\n"
return help_string.format(**help_dict)
attributes_dicts = graph_reference.get_attributes_dicts(
object_name, parent_object_names
)
attribute_definitions = []
additional_definition = None
meta_keys = graph_reference.GRAPH_REFERENCE['defs']['metaKeys']
trace_names = graph_reference.TRACE_NAMES
for key, attribute_dict in attributes_dicts.items():
if attribute in attribute_dict:
if object_name in trace_names and attribute == 'type':
d = {'role': 'info'}
else:
d = {k: v for k, v in attribute_dict[attribute].items()
if k in meta_keys and not k.startswith('_')}
elif attribute in attribute_dict.get('_deprecated', {}):
deprecate_attribute_dict = attribute_dict['_deprecated'][attribute]
d = {k: v for k, v in deprecate_attribute_dict.items()
if k in meta_keys and not k.startswith('_')}
d['deprecated'] = True
else:
continue
if key == 'additional_attributes':
additional_definition = d
continue
new_definition = True
for item in attribute_definitions:
if item['definition'] == d:
item['paths'].append(key)
new_definition = False
if new_definition:
attribute_definitions.append({'paths': [key], 'definition': d})
if attribute_definitions:
help_string += ("With the current parents, '{attribute}' can be "
"used as follows:\n\n")
help_string = help_string.format(**help_dict)
for item in attribute_definitions:
valid_parents_objects_names = [
graph_reference.attribute_path_to_object_names(definition_path)
for definition_path in item['paths']
]
if len(valid_parents_objects_names) == 1:
valid_parent_objects_names = valid_parents_objects_names[0]
help_string += 'Under {}:\n\n'.format(
str(valid_parent_objects_names)
)
else:
help_string += 'Under any of:\n\t\t* {}\n\n'.format(
'\n\t\t* '.join(str(tup) for tup in valid_parents_objects_names)
)
for meta_key, val in sorted(item['definition'].items()):
help_string += '\t{}: '.format(meta_key)
if meta_key == 'description':
# TODO: https://github.com/plotly/streambed/issues/3950
if isinstance(val, list) and attribute == 'showline':
val = val[0]
lines = textwrap.wrap(val, width=LINE_SIZE)
help_string += '\n\t\t'.join(lines)
else:
help_string += '{}'.format(val)
help_string += '\n'
help_string += '\n\n'
if additional_definition:
help_string += 'Additionally:\n\n'
for item in sorted(additional_definition.items()):
help_string += '\t{}: {}\n'.format(*item)
help_string += '\n'
return help_string | eb029790bc24d561d79596cd986abeba49e69a54 | 26,260 |
def len_iter(iterator):
"""Count items in an iterator"""
return sum(1 for i in iterator) | 1d828b150945cc4016cdcb067f65753a70d16656 | 26,261 |
import copy
def clones(module, N):
"""Produce N identical layers."""
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) | 3dfc9d0d8befa26dbecaa8deda53c7fc1d00d3a1 | 26,262 |
import re
def form_transfer(form):
"""
:param form: formula in string type. e.g. 'y~x1+x2|id+firm|id',dependent_variable~continuous_variable|fixed_effect|clusters
:return: Lists of out_col, consist_col, category_col, cluster_col, fake_x, iv_col respectively.
"""
form = form.replace(' ', '')
out_col, consist_col, category_col, cluster_col, fake_x, iv_col = [], [], [], [], [], []
ivfinder = re.compile(r'[(](.*?)[)]', re.S)
iv_expression = re.findall(ivfinder, form)
if iv_expression:
form = re.sub(r'[(](.*?)[)]', "", form)
iv_expression = ''.join(iv_expression)
iv_expression = iv_expression.split('~')
fake_x = iv_expression[0].split('|')
iv_col = iv_expression[1].split('+')
form = form.split('~')
pos = 0
for part in form:
part = part.split('|')
for p2 in part:
pos += 1
p2 = p2.split('+')
if pos == 1:
out_col = p2
elif pos == 2:
consist_col = p2
elif pos == 3:
category_col = p2
elif pos == 4:
cluster_col = p2
elif pos == 5:
iv_col = iv_col
else:
raise NameError('Invalid formula, please refer to the right one')
# if no input, replace 0 with null
if category_col[0] == '0':
category_col = []
if consist_col[0] == '0':
consist_col = []
return out_col, consist_col, category_col, cluster_col, fake_x, iv_col | 77d817a32d5df270a351f7c6a49062aa6ba72941 | 26,263 |
import os
import json
import six
def merge_metadata(preprocess_output_dir, transforms_file):
"""Merge schema, analysis, and transforms files into one python object.
Args:
preprocess_output_dir: the output folder of preprocessing. Should contain
the schema, and the numerical and categorical
analysis files.
transforms_file: the training transforms file.
Returns:
A dict in the form
{
csv_header: [name1, name2, ...],
csv_defaults: {name1: value, name2: value},
key_column: name,
target_column: name,
categorical_columns: []
numerical_columns: []
transforms: { name1: {transform: scale, value: 2},
name2: {transform: embedding, dim: 50}, ...
}
vocab_stats: { name3: {n_classes: 23, labels: ['1', '2', ..., '23']},
name4: {n_classes: 102, labels: ['red', 'blue', ...]}}
}
Raises:
ValueError: if one of the input metadata files is wrong.
"""
numerical_anlysis_file = os.path.join(preprocess_output_dir,
NUMERICAL_ANALYSIS)
schema_file = os.path.join(preprocess_output_dir, SCHEMA_FILE)
numerical_anlysis = json.loads(
python_portable_string(
file_io.read_file_to_string(numerical_anlysis_file)))
schema = json.loads(
python_portable_string(file_io.read_file_to_string(schema_file)))
transforms = json.loads(
python_portable_string(file_io.read_file_to_string(transforms_file)))
result_dict = {}
result_dict['csv_header'] = [col_schema['name'] for col_schema in schema]
result_dict['key_column'] = None
result_dict['target_column'] = None
result_dict['categorical_columns'] = []
result_dict['numerical_columns'] = []
result_dict['transforms'] = {}
result_dict['csv_defaults'] = {}
result_dict['vocab_stats'] = {}
# get key column.
for name, trans_config in six.iteritems(transforms):
if trans_config.get('transform', None) == 'key':
result_dict['key_column'] = name
break
if result_dict['key_column'] is None:
raise ValueError('Key transform missing form transfroms file.')
# get target column.
result_dict['target_column'] = schema[0]['name']
for name, trans_config in six.iteritems(transforms):
if trans_config.get('transform', None) == 'target':
result_dict['target_column'] = name
break
if result_dict['target_column'] is None:
raise ValueError('Target transform missing from transforms file.')
# Get the numerical/categorical columns.
for col_schema in schema:
col_name = col_schema['name']
col_type = col_schema['type'].lower()
if col_name == result_dict['key_column']:
continue
if col_type == 'string':
result_dict['categorical_columns'].append(col_name)
elif col_type == 'integer' or col_type == 'float':
result_dict['numerical_columns'].append(col_name)
else:
raise ValueError('Unsupported schema type %s' % col_type)
# Get the transforms.
for name, trans_config in six.iteritems(transforms):
if name != result_dict['target_column'] and name != result_dict['key_column']:
result_dict['transforms'][name] = trans_config
# Get the vocab_stats
for name in result_dict['categorical_columns']:
if name == result_dict['key_column']:
continue
label_values = get_vocabulary(preprocess_output_dir, name)
if name != result_dict['target_column'] and '' not in label_values:
label_values.append('') # append a 'missing' label.
n_classes = len(label_values)
result_dict['vocab_stats'][name] = {'n_classes': n_classes,
'labels': label_values}
# Get the csv_defaults
for col_schema in schema:
name = col_schema['name']
col_type = col_schema['type'].lower()
default = transforms.get(name, {}).get('default', None)
if name == result_dict['target_column']:
if name in result_dict['numerical_columns']:
default = float(default or 0.0)
else:
default = default or ''
elif name == result_dict['key_column']:
if col_type == 'string':
default = str(default or '')
elif col_type == 'float':
default = float(default or 0.0)
else:
default = int(default or 0)
else:
if col_type == 'string':
default = str(default or '')
if default not in result_dict['vocab_stats'][name]['labels']:
raise ValueError('Default %s is not in the vocab for %s' %
(default, name))
else:
default = float(default or numerical_anlysis[name]['mean'])
result_dict['csv_defaults'][name] = default
validate_metadata(result_dict)
return result_dict | 68538c4f33823c5b11a945f4aefdd369cb783799 | 26,264 |
def get_user(request):
"""
Returns the user model instance associated with the given request session.
If no user is retrieved an instance of `MojAnonymousUser` is returned.
"""
user = None
try:
user_id = request.session[SESSION_KEY]
token = request.session[AUTH_TOKEN_SESSION_KEY]
user_data = request.session[USER_DATA_SESSION_KEY]
backend_path = request.session[BACKEND_SESSION_KEY]
except KeyError:
pass
else:
if backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
user = backend.get_user(user_id, token, user_data)
# Verify the session
if hasattr(user, 'get_session_auth_hash'):
session_hash = request.session.get(HASH_SESSION_KEY)
session_hash_verified = session_hash and constant_time_compare(
session_hash,
user.get_session_auth_hash()
)
if not session_hash_verified:
request.session.flush()
user = None
return user or MojAnonymousUser() | 94360776b1048c53c0f31b78207a7077fcd76058 | 26,265 |
import pprint
def main(database):
"""
:type database: db.BlogDB
"""
sqls = []
sql_create_articles = 'CREATE TABLE {} '.format(database.table_name['articles']) + \
'(id INTEGER PRIMARY KEY AUTOINCREMENT, slug CHAR(100) NOT NULL UNIQUE, cat_id INT,' + \
'title NCHAR(100) NOT NULL, md_content TEXT NOT NULL, html_content TEXT NOT NULL, ' + \
'author NCHAR(30) NOT NULL, time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP)'
sql_create_cat = 'CREATE TABLE {} '.format(database.table_name['category']) + \
'(id INTEGER PRIMARY KEY AUTOINCREMENT, slug CHAR(100) UNIQUE, name NCHAR(100) NOT NULL)'
sqls.append(sql_create_articles)
sqls.append(sql_create_cat)
print('INIT...')
pprint.pprint(sqls)
result = False
conn = database.connect()
for sql in sqls:
try:
conn.execute(sql)
result = True
except Exception as e:
print(e)
conn.rollback()
conn.commit()
conn.close()
return result | d0aa5ea2ad4d3eb7909785f427604e3e0bc7681a | 26,266 |
def fav_rate(y_pred_group):
"""
Gets rate of favorable outcome.
:param y_pred_group: Model-predicted labels of test set for privileged/unprivileged group
:type y_pred_group: `np.array`
:return: rate of favorable outcome
:rtype: `float`
"""
if y_pred_group == []:
return 0
else:
return num_pos(y_pred_group) / y_pred_group.shape[0] | cc416dd0bcb37c413728a769652b4074bdd178ee | 26,267 |
def parse_date(datestring):
"""
Parse a date/time string into a tuple of integers.
:param datestring: The date/time string to parse.
:returns: A tuple with the numbers ``(year, month, day, hour, minute,
second)`` (all numbers are integers).
:raises: :exc:`InvalidDate` when the date cannot be parsed.
Supported date/time formats:
- ``YYYY-MM-DD``
- ``YYYY-MM-DD HH:MM:SS``
.. note:: If you want to parse date/time strings with a fixed, known
format and :func:`parse_date()` isn't useful to you, consider
:func:`time.strptime()` or :meth:`datetime.datetime.strptime()`,
both of which are included in the Python standard library.
Alternatively for more complex tasks consider using the date/time
parsing module in the dateutil_ package.
Examples:
>>> from humanfriendly import parse_date
>>> parse_date('2013-06-17')
(2013, 6, 17, 0, 0, 0)
>>> parse_date('2013-06-17 02:47:42')
(2013, 6, 17, 2, 47, 42)
Here's how you convert the result to a number (`Unix time`_):
>>> from humanfriendly import parse_date
>>> from time import mktime
>>> mktime(parse_date('2013-06-17 02:47:42') + (-1, -1, -1))
1371430062.0
And here's how you convert it to a :class:`datetime.datetime` object:
>>> from humanfriendly import parse_date
>>> from datetime import datetime
>>> datetime(*parse_date('2013-06-17 02:47:42'))
datetime.datetime(2013, 6, 17, 2, 47, 42)
Here's an example that combines :func:`format_timespan()` and
:func:`parse_date()` to calculate a human friendly timespan since a
given date:
>>> from humanfriendly import format_timespan, parse_date
>>> from time import mktime, time
>>> unix_time = mktime(parse_date('2013-06-17 02:47:42') + (-1, -1, -1))
>>> seconds_since_then = time() - unix_time
>>> print(format_timespan(seconds_since_then))
1 year, 43 weeks and 1 day
.. _dateutil: https://dateutil.readthedocs.io/en/latest/parser.html
.. _Unix time: http://en.wikipedia.org/wiki/Unix_time
"""
try:
tokens = [t.strip() for t in datestring.split()]
if len(tokens) >= 2:
date_parts = list(map(int, tokens[0].split('-'))) + [1, 1]
time_parts = list(map(int, tokens[1].split(':'))) + [0, 0, 0]
return tuple(date_parts[0:3] + time_parts[0:3])
else:
year, month, day = (list(map(int, datestring.split('-'))) + [1, 1])[0:3]
return (year, month, day, 0, 0, 0)
except Exception:
msg = "Invalid date! (expected 'YYYY-MM-DD' or 'YYYY-MM-DD HH:MM:SS' but got: %r)"
raise InvalidDate(format(msg, datestring)) | c28ac922ba11e032a5400f4d8fb866a87dad2ae6 | 26,268 |
def read_datafiles(filepath):
"""
Example function for reading in data form a file.
This needs to be adjusted for the specific format that
will work for the project.
Parameters
----------
filepath : [type]
[description]
Returns
-------
[type]
[description]
"""
with open(filepath) as read_file:
filecontents = read_file.read()
return filecontents | 789feb11cfa62d2fc2f5ac244ae2be3f618aaf2f | 26,269 |
def get_outputs(var_target_total, var_target_primary, reg, layer):
"""Construct standard `vlne` output layers."""
outputs = []
if var_target_total is not None:
target_total = Dense(
1, name = 'target_total', kernel_regularizer = reg
)(layer)
outputs.append(target_total)
if var_target_primary is not None:
target_primary = Dense(
1, name = 'target_primary', kernel_regularizer = reg
)(layer)
outputs.append(target_primary)
return outputs | 16c6307cb4d9d2e5d908fec0293cf317f8ff3d24 | 26,270 |
import os
def getJsonPath(name, moduleFile):
"""
获取JSON配置文件的路径:
1. 优先从当前工作目录查找JSON文件
2. 若无法找到则前往模块所在目录查找
"""
currentFolder = os.getcwd()
currentJsonPath = os.path.join(currentFolder, name)
if os.path.isfile(currentJsonPath):
jsonPathDict[name] = currentJsonPath
return currentJsonPath
moduleFolder = os.path.abspath(os.path.dirname(moduleFile))
moduleJsonPath = os.path.join(moduleFolder, '.', name)
jsonPathDict[name] = moduleJsonPath
return moduleJsonPath | b62265f3c017c70026bb9e5264e42c219ac28d93 | 26,271 |
import functools
import operator
def flatten_list(l):
"""
Function which flattens a list of lists to a list.
"""
return functools.reduce(operator.iconcat, l, []) | 37e8918ce2f71754e385f017cdfdf38ed5a30ff4 | 26,272 |
import os
def js_to_python(in_file, out_file=None, use_qgis=True, github_repo=None):
"""Converts an Earth Engine JavaScript to Python script.
Args:
in_file (str): File path of the input JavaScript.
out_file (str, optional): File path of the output Python script. Defaults to None.
use_qgis (bool, optional): Whether to add "from ee_plugin import Map \n" to the output script. Defaults to True.
github_repo (str, optional): GitHub repo url. Defaults to None.
Returns:
list : Python script
"""
in_file = os.path.abspath(in_file)
if out_file is None:
out_file = in_file.replace(".js", ".py")
root_dir = os.path.dirname(os.path.abspath(__file__))
if not os.path.isfile(in_file):
in_file = os.path.join(root_dir, in_file)
if not os.path.isfile(out_file):
out_file = os.path.join(root_dir, out_file)
is_python = False
add_github_url = False
qgis_import_str = ''
if use_qgis:
qgis_import_str = "from ee_plugin import Map \n"
github_url = ""
if github_repo is not None:
github_url = "# GitHub URL: " + github_repo + in_file + "\n\n"
math_import = False
math_import_str = ""
lines = []
with open(in_file) as f:
lines = f.readlines()
math_import = use_math(lines)
for line in lines:
line = line.strip()
if line == 'import ee':
is_python = True
if math_import:
math_import_str = "import math\n"
output = ""
if is_python: # only update the GitHub URL if it is already a GEE Python script
output = github_url + ''.join(map(str, lines))
else: # deal with JavaScript
header = github_url + "import ee \n" + qgis_import_str + math_import_str
function_defs = []
output = header + "\n"
with open(in_file) as f:
lines = f.readlines()
# print('Processing {}'.format(in_file))
lines = check_map_functions(lines)
for index, line in enumerate(lines):
if ('/* color' in line) and ('*/' in line):
line = line[:line.index('/*')].lstrip() + line[(line.index('*/')+2):]
if ("= function" in line) or ("=function" in line) or line.strip().startswith("function"):
bracket_index = line.index("{")
matching_line_index, matching_char_index = find_matching_bracket(
lines, index, bracket_index)
line = line[:bracket_index] + line[bracket_index+1:]
if matching_line_index == index:
line = line[:matching_char_index] + \
line[matching_char_index+1:]
else:
tmp_line = lines[matching_line_index]
lines[matching_line_index] = tmp_line[:matching_char_index] + \
tmp_line[matching_char_index+1:]
line = line.replace(" = function", "").replace(
"=function", '').replace("function ", '')
line = " " * (len(line) - len(line.lstrip())) + "def " + line.strip() + ":"
elif "{" in line:
bracket_index = line.index("{")
matching_line_index, matching_char_index = find_matching_bracket(
lines, index, bracket_index)
if (matching_line_index == index) and (':' in line):
pass
elif ('for (' in line) or ('for(' in line):
line = convert_for_loop(line)
lines[index] = line
bracket_index = line.index("{")
matching_line_index, matching_char_index = find_matching_bracket(lines, index, bracket_index)
tmp_line = lines[matching_line_index]
lines[matching_line_index] = tmp_line[:matching_char_index] + tmp_line[matching_char_index+1:]
line = line.replace('{', '')
if line is None:
line = ''
line = line.replace("//", "#")
line = line.replace("var ", "", 1)
line = line.replace("/*", '#')
line = line.replace("*/", '#')
line = line.replace("true", "True").replace("false", "False")
line = line.replace("null", "{}")
line = line.replace(".or", ".Or")
line = line.replace(".and", '.And')
line = line.replace(".not", '.Not')
line = line.replace('visualize({', 'visualize(**{')
line = line.replace('Math.PI', 'math.pi')
line = line.replace('Math.', 'math.')
line = line.replace('= new', '=')
line = line.rstrip()
if line.endswith("+"):
line = line + " \\"
elif line.endswith(";"):
line = line[:-1]
if line.lstrip().startswith('*'):
line = line.replace('*', '#')
if (":" in line) and (not line.strip().startswith("#")) and (not line.strip().startswith('def')) and (not line.strip().startswith(".")):
line = format_params(line)
if index < (len(lines) - 1) and line.lstrip().startswith("#") and lines[index+1].lstrip().startswith("."):
line = ''
if line.lstrip().startswith("."):
if "#" in line:
line = line[:line.index("#")]
output = output.rstrip() + " " + "\\" + "\n" + line + "\n"
else:
output += line + "\n"
out_dir = os.path.dirname(out_file)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(out_file, 'w') as f:
f.write(output)
return output | e28fb363495756212c89ded69b60daf56c0720de | 26,273 |
def getPutDeltas(delta, optType):
"""
delta: array or list of deltas
optType: array or list of optType "C", "P"
:return:
"""
# otm_x = put deltas
otm_x = []
for i in range(len(delta)):
if optType[i] == "C":
otm_x.append(1-delta[i])
else:
otm_x.append(abs(delta[i]))
return otm_x | a9950bd383f91c49e6c6ff745ab9b83a677ea9e8 | 26,274 |
def test_folders_has_path_long(list_folder_path, max_path=260):
"""tests a serie of folders if any of them has files whose pathfile
has a larger length than stipulated in max_path
Args:
list_folder_path (list): list of folder_path to be tested
max_path (int, optional): max file_path len permitted. Defaults to 260.
Returns:
dict: {'approved': ['folder_path': folder_path, 'list_file_path_long': []],
'rejected': ['folder_path': folder_path, 'list_file_path_long': list_file_path_long]}
"""
result_test_max_path = {}
list_folder_path_approved = []
list_folder_path_rejected = []
for folder_path in list_folder_path:
dict_result_test_file_path_long = \
test_folder_has_file_path_long(folder_path, max_path)
dict_folders_path = {}
dict_folders_path['folder_path'] = folder_path
dict_folders_path['list_file_path_long'] = dict_result_test_file_path_long['list_file_path_long']
if dict_result_test_file_path_long['result']:
list_folder_path_approved.append(dict_folders_path)
else:
list_folder_path_rejected.append(dict_folders_path)
result_test_max_path['approved'] = list_folder_path_approved
result_test_max_path['rejected'] = list_folder_path_rejected
return result_test_max_path | e00ef7a2707099b0a2606e7f7a032e1347836979 | 26,275 |
def get_positive_empirical_prob(labels: tf.Tensor) -> float:
"""Given a set of binary labels, determine the empirical probability of a positive label (i.e., the proportion of ones).
Args:
labels: tf.Tensor, batch of labels
Returns:
empirical probability of a positive label
"""
n_pos_labels = tf.math.count_nonzero(labels)
total_n_labels = labels.get_shape()[0]
return n_pos_labels / total_n_labels | db2cc7b5a17c74c27b2cdb4d31e5eeef57fa9411 | 26,276 |
def networks_get_by_id(context, network_id):
"""Get a given network by its id."""
return IMPL.networks_get_by_id(context, network_id) | d7c8ac06dcf828090ec99e884751bd0e3ecde384 | 26,277 |
def get_particles(range_diff, image, clustering_settings):
"""Get the detections using Gary's original method
Returns a list of particles and their properties
Parameters
----------
range_diff:
output from background subtraction
image:
original image frame for intensity calculation
may have a different shape than range_diff
cluster_settings:
hyperparameters for the clustering algorithm
Returns
-------
list of dicts with keys:
pos: (y, x) coordinates of particle
size: number of pixels in cluster
bbox_tl: bbox (top, left)
bbox_hw: bbox (height, width)
max_intensity: max intensity of pixels (list)
"""
# select points above a threshold, and get their weights
idx = (range_diff > 0)
points = np.column_stack(np.nonzero(idx))
weights = range_diff[idx].ravel().astype(float)
# empty list to store particles
particles = []
if len(points) > 0:
# use DBSCAN to cluster the points
dbscan = DBSCAN(eps=clustering_settings['dbscan']['epsilon_px'],
min_samples=clustering_settings['dbscan']['min_weight'])
labels = dbscan.fit_predict(points, sample_weight=weights)
n_clusters = int(np.max(labels)) + 1
for l in range(n_clusters):
idx = (labels == l)
# must have specified minimum number of points
# keep track of clusters that fall below this thresh
if np.sum(idx) < clustering_settings['filters']['min_px']:
continue
relevant = points[idx]
# Build particle properties
particle = {}
# center of particle
particle['pos'] = [round(i, 1) for i in np.average(relevant, axis=0).tolist()]
# number of pixels in particle
particle['size'] = int(np.sum(idx))
# bounding box top left anchor
# bounding box calculations
bbox_y, bbox_x = int(np.min(relevant[:,0])), int(np.min(relevant[:,1]))
bbox_h, bbox_w = int(np.max(relevant[:,0]) - np.min(relevant[:,0])), \
int(np.max(relevant[:,1]) - np.min(relevant[:,1]))
particle['bbox'] = ((bbox_y, bbox_x), (bbox_h, bbox_w))
# convert bounding box indices to original resolution
yres_ratio = image.shape[0] / range_diff.shape[0]
xres_ratio = image.shape[1] / range_diff.shape[1]
bbox_y_ores = int(bbox_y * yres_ratio)
bbox_h_ores = int(bbox_h * yres_ratio)
bbox_x_ores = int(bbox_x * xres_ratio)
bbox_w_ores = int(bbox_w * xres_ratio)
# max intensity for each channel
if len(image.shape) == 2:
# grayscale original image, single channel
particle['max_intensity'] = [int(np.amax(image[bbox_y_ores:bbox_y_ores+bbox_h_ores+1,
bbox_x_ores:bbox_x_ores+bbox_w_ores+1]))]
else:
# RGB original image, max per channel
particle['max_intensity'] = np.amax(image[bbox_y_ores:bbox_y_ores+bbox_h_ores+1,
bbox_x_ores:bbox_x_ores+bbox_w_ores+1],
axis=(0,1)).tolist()
particles.append(particle)
return particles | ee98806ed38e341da80a9bf3c84d57310a84f2ca | 26,278 |
def validate_file(file):
"""Validate that the file exists and is a proper puzzle file.
Preemptively perform all the checks that are done in the input loop of sudoku_solver.py.
:param file: name of file to validate
:return True if the file passes all checks, False if it fails
"""
try:
open_file = open(file)
file_contents = open_file.read()
puzzle_list = [char for char in file_contents if char.isdigit() or char == '.']
puzzle_string = ''.join(puzzle_list)
if len(puzzle_string) == 81:
clues = [char for char in puzzle_string if char != '.' and char != '0']
num_clues = len(clues)
if num_clues >= 17:
return True
else:
print('{} is an unsolvable puzzle. It has {} clues.\n'
'There are no valid sudoku puzzles with fewer than 17 clues.'.format(file, num_clues))
return False
else:
print('{} in incorrect format.\nSee README.md for accepted puzzle formats.'.format(file))
return False
except OSError:
print('File {} not found.'.format(file))
return False | 31b9a5fa7dc999d0336b69642c549517399686c1 | 26,279 |
def poly(coeffs, n):
"""Compute value of polynomial given coefficients"""
total = 0
for i, c in enumerate(coeffs):
total += c * n ** i
return total | 332584e7bc634f4bcfbd9b6b4f4d04f81df7cbe2 | 26,280 |
from typing import Iterable
from typing import Dict
from typing import Hashable
from typing import Tuple
def group_by_vals(
features: Iterable[feature]
) -> Dict[Hashable, Tuple[feature, ...]]:
"""
Construct a dict grouping features by their values.
Returns a dict where each value is mapped to a tuple of features that have
that value. Does not check for duplicate features.
:param features: An iterable of features to be grouped by value.
"""
# Ignore type of key due to mypy false alarm. - Can
key = feature.val.fget # type: ignore
return group_by(iterable=features, key=key) | 436b3e038a11131ea6c0b967c57dd5db59a4b67c | 26,281 |
def coords2cells(coords, meds, threshold=15):
"""
Single plane method with KDTree. Avoid optotune <-> holography mismatch weirdness.
Threshold is in pixels.
"""
holo_zs = np.unique(coords[:,2])
opto_zs = np.unique(meds[:,2])
matches = []
distances = []
ismatched = []
for hz, oz in zip(holo_zs, opto_zs):
this_plane_meds = meds[meds[:,2] == oz]
this_plane_targs = coords[coords[:,2] == hz]
cells = KDTree(this_plane_meds[:,:2])
dists, match_idx = cells.query(this_plane_targs[:,:2])
dists = dists.squeeze()
match_idx = match_idx.squeeze()[dists < threshold]
matches.append(this_plane_meds[match_idx,:])
distances.append(dists[dists < threshold])
ismatched.append(dists < threshold)
locs = np.vstack(matches)
distances = np.concatenate(distances)
ismatched = np.concatenate(ismatched)
return locs, distances, ismatched | 7286be31c9e201df4aea9881bc8226816d0fbf32 | 26,282 |
def how_many(num):
"""Count the number of digits of `num`."""
if num < 10:
return 1
else:
return 1 + how_many(num // 10) | 6b6f8c2a95dac9f2a097300924e29bbca0bdd55c | 26,283 |
def line_length_check(line):
"""Return TRUE if the line length is too long"""
if len(line) > 79:
return True
return False | 2cde1a2b8f20ebf57c6b54bf108e97715789a51d | 26,284 |
def load_image_into_numpy_array(path):
"""Load an image from file into a numpy array.
Puts image into numpy array to feed into tensorflow graph.
Note that by convention we put it into a numpy array with shape
(height, width, channels), where channels=3 for RGB.
Args:
path: a file path.
Returns:
uint8 numpy array with shape (img_height, img_width, 3)
"""
img_data = tf.io.gfile.GFile(path, 'rb').read()
image = Image.open(BytesIO(img_data))
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8) | e0ac545486a7ae18cf7c40c03352a04d606c093f | 26,285 |
def possibly_intersecting(dataframebounds, geometry, buffer=0):
"""
Finding intersecting profiles for each branch is a slow process in case of large datasets
To speed this up, we first determine which profile intersect a square box around the branch
With the selection, the interseting profiles can be determines much faster.
Parameters
----------
dataframebounds : numpy.array
geometry : shapely.geometry.Polygon
"""
geobounds = geometry.bounds
idx = (
(dataframebounds[0] - buffer < geobounds[2])
& (dataframebounds[2] + buffer > geobounds[0])
& (dataframebounds[1] - buffer < geobounds[3])
& (dataframebounds[3] + buffer > geobounds[1])
)
# Get intersecting profiles
return idx | a3e5d031555f92de1fe9ee2d1fb75163d0e29eb9 | 26,286 |
def compute_complexity(L, k=5, from_evalues=False, from_gram=False):
"""
Computes a variety of internal representation complexity metrics at once.
Parameters
----------
L : numpy.ndarray, shape (n_samples, ...)
internal representation matrix or precomputed eigenvalues
k : int, default=5
number of eigenvalues for KF and Schatten methods
from_evalues : boolean, default=False
If True, then L is assumed to be the precomputed eigenvalues
from_gram : boolean, default=False
If True, then L is assumed to be a square kernel (Gram) matrix.
Otherwise an svd will be performed on L where the Gram matrix is LL^T
which improves computational efficiency.
Returns
-------
complexity_dict : dict
dictionary of (metric_name, metric_value) pairs for L
"""
complexity_dict = {}
# For efficiency across the multiple metrics
if from_evalues:
evalues = L
elif from_gram:
evalues = np.linalg.svd(L, compute_uv=False, hermitian=True)
else:
ss = np.linalg.svd(L, compute_uv=False)
evalues = np.zeros(L.shape[0])
evalues[:len(ss)] = ss**2
KF_norms, KF_ratios, KF_kers, Schattens = get_KF_Schatten_norms(evalues, k, from_evalues=True)
complexity_dict['KF-raw'] = KF_norms
complexity_dict['KF-ratio'] = KF_ratios
complexity_dict['KF-kernel'] = KF_kers
complexity_dict['Schatten'] = Schattens
h_star, h_argmin = get_local_rad_bound(evalues, normalize=True, from_evalues=True)
complexity_dict['h*'] = h_star
complexity_dict['h_argmin'] = h_argmin
return complexity_dict | 57283101a1dc266f2059b760715f6b2a782a5e4c | 26,287 |
import os
import io
import logging
import traceback
def write_html_report(xml_root, work_dir, html_dir, html_name = 'index.html'):
""" Write the HTML report in a given directory
"""
if not os.path.isdir(html_dir):
os.makedirs(html_dir)
html_path = os.path.join(html_dir, html_name)
with io.open(html_path, 'w', encoding='utf8') as html_file:
header = html_header(xml_root)
html_file.write(header)
if xml_root:
parsed_testcases = []
for testcase in xml_root.iter('testcase'):
try:
tc = parse_testcase_xml(testcase)
parsed_testcases.append(tc)
except Exception as err:
logging.warning('Unexpected error parsing testcase XML {}'.format(testcase))
print(traceback.format_exc())
# sort test cases so failed first, then sim, the by name
def sort_key(tc):
key = ('0' if 'sim' in tc['name'] else '1')
key = ('0' if tc['failed'] else '1') + key
return key + tc['name']
for tc in sorted(parsed_testcases, key=sort_key):
if not tc['skipped']:
tc_body = html_testcase(tc, work_dir, html_dir)
html_file.write(tc_body)
html_file.write('</body>\n</html>\n') | 6de29e8531a08931f18ebead4c2bd2c5211125e3 | 26,288 |
def settings(request, username, args={}, tab="change_password"):
"""
Show a page which allows the user to change his settings
"""
if not request.user.is_authenticated():
return render(request, "users/settings/settings_error.html", {"reason": "not_logged_in"})
profile_user = User.objects.get(username=username)
if request.user.id != profile_user.id:
return render(request, "users/settings/settings_error.html", {"reason": "incorrect_user"})
if tab == "change_preferences":
return change_preferences(request, args)
if tab == "change_password":
return change_password(request, args)
elif tab == "delete_account":
return delete_account(request, args) | 1325f952bc89644e6a9df0c64c43af85e52858a0 | 26,289 |
import hashlib
def check_contents(md5, filepath, ignore):
"""
md5 - the md5 sum calculated last time the data was validated as correct
filepath - the location/file where the new data is, this is to be validated
ignore - a list of regular expressions that should be thrown out, line by line in the comparison
"""
# Open,close, read file and calculate MD5 on its contents
with open(filepath,"r",encoding='utf-8') as file_to_check:
# read contents of the file
data = ""
lines = file_to_check.readlines()
for line in lines:
flag = True
for re in ignore:
if re in line:
flag = False #exclude this line, it's a date or something and will prevent the md5 from working
if flag:
data = data + line + "\n"
#print(data)
# pipe contents of the file through
md5_returned = hashlib.md5(data.encode('utf-8')).hexdigest()
print("Checking Contents Via Hash:")
print("Original: " + md5)
print("Calculated: " + md5_returned)
if md5 == md5_returned:
return True #md5 verified
else:
return False | fb6ae4a3b6600f7df64cf2ccfe24d035c4a98042 | 26,290 |
def create_id(prefix=None):
""" Create an ID using the module-level ID Generator"""
if not prefix:
return _get_generator().create_id()
else:
return _get_generator().create_id(prefix) | cfebf908175fc0a0dc64664f5ceb94eb8395671f | 26,291 |
from re import A
def evaluate_policy(theta, F):
"""
Given theta (scalar, dtype=float) and policy F (array_like), returns the
value associated with that policy under the worst case path for {w_t}, as
well as the entropy level.
"""
rlq = qe.robustlq.RBLQ(Q, R, A, B, C, beta, theta)
K_F, P_F, d_F, O_F, o_F = rlq.evaluate_F(F)
x0 = np.array([[1.], [0.], [0.]])
value = - x0.T.dot(P_F.dot(x0)) - d_F
entropy = x0.T.dot(O_F.dot(x0)) + o_F
return list(map(float, (value, entropy))) | 40db3c4215aea266ed2b85256c901f135fdc5b50 | 26,292 |
def arspec(x, order, nfft=None, fs=1):
"""Compute the spectral density using an AR model.
An AR model of the signal is estimated through the Yule-Walker equations;
the estimated AR coefficient are then used to compute the spectrum, which
can be computed explicitely for AR models.
Parameters
----------
x : array-like
input signal
order : int
Order of the LPC computation.
nfft : int
size of the fft to compute the periodogram. If None (default), the
length of the signal is used. if nfft > n, the signal is 0 padded.
fs : float
Sampling rate. By default, is 1 (normalized frequency. e.g. 0.5 is the
Nyquist limit).
Returns
-------
pxx : array-like
The psd estimate.
fgrid : array-like
Frequency grid over which the periodogram was estimated.
"""
x = np.atleast_1d(x)
n = x.size
if x.ndim > 1:
raise ValueError("Only rank 1 input supported for now.")
if not np.isrealobj(x):
raise ValueError("Only real input supported for now.")
if not nfft:
nfft = n
if nfft < n:
raise ValueError("nfft < signal size not supported yet")
a, e, k = lpc(x, order)
# This is not enough to deal correctly with even/odd size
if nfft % 2 == 0:
pn = nfft / 2 + 1
else:
pn = (nfft + 1 )/ 2
px = 1 / np.fft.fft(a, nfft)[:pn]
pxx = np.real(np.conj(px) * px)
pxx /= fs / e
fx = np.linspace(0, fs * 0.5, pxx.size)
return pxx, fx | b0e003bf387870320ea61419a169a80b1a41165d | 26,293 |
import warnings
def T_asymmetry(x, beta):
"""Performs a transformation over the input to break the symmetry of the symmetric functions.
Args:
x (np.array): An array holding the input to be transformed.
beta (float): Exponential value used to produce the asymmetry.
Returns:
The transformed input.
"""
# Gathers the amount of dimensions and calculates an equally-spaced interval between 0 and D-1
D = x.shape[0]
dims = np.linspace(1, D, D) - 1
# Activates the context manager for catching warnings
with warnings.catch_warnings():
# Ignores whenever the np.where raises an invalid square root value
# This will ensure that no warnings will be raised when calculating the line below
warnings.filterwarnings('ignore', r'invalid value encountered in sqrt')
# Re-calculates the input
x_t = np.where(
x > 0, x ** (1 + beta * (dims / (D - 1)) * np.sqrt(x)), x)
return x_t | 8ad3fa58fc2d1b641e4fc122517dc4202a76f840 | 26,294 |
def fit_improved_B2AC_numpy(points):
"""Ellipse fitting in Python with improved B2AC algorithm as described in
this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.
This version of the fitting simply applies NumPy:s methods for calculating
the conic section, modelled after the Matlab code in the paper:
.. code-block::
function a = fit_ellipse(x, y)
D1 = [x .ˆ 2, x .* y, y .ˆ 2]; % quadratic part of the design matrix
D2 = [x, y, ones(size(x))]; % linear part of the design matrix
S1 = D1’ * D1; % quadratic part of the scatter matrix
S2 = D1’ * D2; % combined part of the scatter matrix
S3 = D2’ * D2; % linear part of the scatter matrix
T = - inv(S3) * S2’; % for getting a2 from a1
M = S1 + S2 * T; % reduced scatter matrix
M = [M(3, :) ./ 2; - M(2, :); M(1, :) ./ 2]; % premultiply by inv(C1)
[evec, eval] = eig(M); % solve eigensystem
cond = 4 * evec(1, :) .* evec(3, :) - evec(2, :) .ˆ 2; % evaluate a’Ca
a1 = evec(:, find(cond > 0)); % eigenvector for min. pos. eigenvalue
a = [a1; T * a1]; % ellipse coefficients
:param points: The [Nx2] array of points to fit ellipse to.
:type points: :py:class:`numpy.ndarray`
:return: The conic section array defining the fitted ellipse.
:rtype: :py:class:`numpy.ndarray`
"""
x = points[:, 0]
y = points[:, 1]
D1 = np.vstack([x ** 2, x * y, y ** 2]).T
D2 = np.vstack([x, y, np.ones((len(x), ), dtype=x.dtype)]).T
S1 = D1.T.dot(D1)
S2 = D1.T.dot(D2)
S3 = D2.T.dot(D2)
T = -np.linalg.inv(S3).dot(S2.T)
M = S1 + S2.dot(T)
M = np.array([M[2, :] / 2, -M[1, :], M[0, :] / 2])
eval, evec = np.linalg.eig(M)
cond = (4 * evec[:, 0] * evec[:, 2]) - (evec[:, 1] ** 2)
I = np.where(cond > 0)[0]
a1 = evec[:, I[np.argmin(cond[I])]]
return np.concatenate([a1, T.dot(a1)]) | 85544ec311edce8eebdd6c143a68e5fc40b0a882 | 26,295 |
import copy
def generate_complex_topologies_and_positions(ligand_filename, protein_pdb_filename):
"""
Generate the topologies and positions for complex phase simulations, given an input ligand file (in supported openeye
format) and protein pdb file. Note that the input ligand file should have coordinates placing the ligand in the binding
site.
Parameters
----------
ligand_filename : str
Name of the file containing ligands
protein_pdb_filename : str
Name of the protein pdb file
Returns
-------
complex_topologies_dict : dict of smiles: md.topology
Dictionary of topologies for various complex systems
complex_positions_dict : dict of smiles: [n, 3] array of Quantity
Positions for corresponding complexes
"""
ifs = oechem.oemolistream()
ifs.open(ligand_filename)
# get the list of molecules
mol_list = [oechem.OEMol(mol) for mol in ifs.GetOEMols()]
for idx, mol in enumerate(mol_list):
mol.SetTitle("MOL{}".format(idx))
oechem.OETriposAtomNames(mol)
mol_dict = {oechem.OEMolToSmiles(mol) : mol for mol in mol_list}
ligand_topology_dict = {smiles : forcefield_generators.generateTopologyFromOEMol(mol) for smiles, mol in mol_dict.items()}
protein_pdbfile = open(protein_pdb_filename, 'r')
pdb_file = app.PDBFile(protein_pdbfile)
protein_pdbfile.close()
receptor_positions = pdb_file.positions
receptor_topology = pdb_file.topology
receptor_md_topology = md.Topology.from_openmm(receptor_topology)
n_receptor_atoms = receptor_md_topology.n_atoms
complex_topologies = {}
complex_positions_dict = {}
for smiles, ligand_topology in ligand_topology_dict.items():
ligand_md_topology = md.Topology.from_openmm(ligand_topology)
n_complex_atoms = ligand_md_topology.n_atoms + n_receptor_atoms
copy_receptor_md_topology = copy.deepcopy(receptor_md_topology)
complex_positions = unit.Quantity(np.zeros([n_complex_atoms, 3]), unit=unit.nanometers)
complex_topology = copy_receptor_md_topology.join(ligand_md_topology)
complex_topologies[smiles] = complex_topology
ligand_positions = extractPositionsFromOEMol(mol_dict[smiles])
complex_positions[:n_receptor_atoms, :] = receptor_positions
complex_positions[n_receptor_atoms:, :] = ligand_positions
complex_positions_dict[smiles] = complex_positions
return complex_topologies, complex_positions_dict | 6c47957c1d70936486d4bf5167f3724935a5f4df | 26,296 |
def aten_append(mapper, graph, node):
""" 构造对list进行append的PaddleLayer。
TorchScript示例:
%90 : int[] = aten::append(%_output_size.1, %v.1)
参数含义:
%90 (list): 输出,append后的list。
%_output_size.1 (list): 需要进行append的list。
%v.1 (-): append的元素。
"""
scope_name = mapper.normalize_scope_name(node)
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
layer_outputs = [inputs_name[0]]
# 获取当前节点输出的list
current_outputs = [inputs_name[0]]
# 处理输入0,即_output_size.1
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
layer_inputs["list"] = inputs_name[0]
# 处理输入1,即v.1
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, scope_name)
layer_inputs["element"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.append", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name)
return current_inputs, current_outputs | f95977daf6da0fc2aafceda404d4dd6faf4d5f0c | 26,297 |
def res_from_obseravtion_data(observation_data):
"""create a generic residual dataframe filled with np.NaN for
missing information
Parameters
----------
observation_data : pandas.DataFrame
pyemu.Pst.observation_data
Returns
-------
res_df : pandas.DataFrame
"""
res_df = observation_data.copy()
res_df.loc[:, "name"] = res_df.pop("obsnme")
res_df.loc[:, "measured"] = res_df.pop("obsval")
res_df.loc[:, "group"] = res_df.pop("obgnme")
res_df.loc[:, "modelled"] = np.NaN
res_df.loc[:, "residual"] = np.NaN
return res_df | d569c80d1536c3a46c7f9b6120753e56c118a74e | 26,298 |
import re
def dtype_ripper(the_dtype, min_years, max_years):
"""Extract the range of years from the dtype of a structured array.
Args:
the_dtype (list): A list of tuples with each tuple containing two
entries, a column heading string, and a string defining the
data type for that column. Formatted as a numpy dtype list.
min_years (list): The earliest years found in the imported data.
max_years (list): The latest years found in the imported data.
Returns:
Updated lists of minimum and maximum years with the minimum
and maximum found in data_array.
The minimum and maximum years, as integers, that were contained
in the column headings of the dtype definition.
"""
# Strip the dtype into its constituent lists: column names and data formats
colnames, dtypes = zip(*the_dtype)
# Preallocate a list for the years extracted from the column names
year_list = []
# Loop over the list of column names, identify entries that have
# the format specified by a regex, and add the matches to a list
for name in colnames:
year = re.search('^[1|2][0-9]{3}$', name)
if year:
year_list.append(year.group())
# Identify the minimum and maximum years from the list of the years
# in the column names and record them as integers instead of strings
min_yr = int(min(year_list))
max_yr = int(max(year_list))
# Circumvents the action of .append() modifying in place the lists
# passed to the function
new_min_years = min_years + [min_yr]
new_max_years = max_years + [max_yr]
return new_min_years, new_max_years | 1cbcc30cf7760d466187873aa5ea221691b2092d | 26,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.