content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def step(x, b):
"""
The step function for ideal quantization function in test stage.
"""
y = torch.zeros_like(x)
mask = torch.gt(x - b, 0.0)
y[mask] = 1.0
return y | 5,324,900 |
def encode_intended_validator(
validator_address: Union[Address, str],
primitive: bytes = None,
*,
hexstr: str = None,
text: str = None) -> SignableMessage:
"""
Encode a message using the "intended validator" approach (ie~ version 0)
defined in EIP-191_.
Supply the message as exactly one of these three arguments:
bytes as a primitive, a hex string, or a unicode string.
.. WARNING:: Note that this code has not gone through an external audit.
Also, watch for updates to the format, as the EIP is still in DRAFT.
:param validator_address: which on-chain contract is capable of validating this message,
provided as a checksummed address or in native bytes.
:param primitive: the binary message to be signed
:type primitive: bytes or int
:param str hexstr: the message encoded as hex
:param str text: the message as a series of unicode characters (a normal Py3 str)
:returns: The EIP-191 encoded message, ready for signing
.. _EIP-191: https://eips.ethereum.org/EIPS/eip-191
"""
if not is_valid_address(validator_address):
raise ValidationError(
f"Cannot encode message with 'Validator Address': {validator_address}. "
"It must be a checksum address, or an address converted to bytes."
)
# The validator_address is a str or Address (which is a subtype of bytes). Both of
# these are AnyStr, which includes str and bytes. Not sure why mypy complains here...
canonical_address = to_canonical_address(validator_address) # type: ignore
message_bytes = to_bytes(primitive, hexstr=hexstr, text=text)
return SignableMessage(
HexBytes(b'\x00'), # version 0, as defined in EIP-191
canonical_address,
message_bytes,
) | 5,324,901 |
def _ip_desc_from_proto(proto):
"""
Convert protobuf to an IP descriptor.
Args:
proto (protos.keyval_pb2.IPDesc): protobuf of an IP descriptor
Returns:
desc (magma.mobilityd.IPDesc): IP descriptor from :proto:
"""
ip = ip_address(proto.ip.address)
ip_block_addr = ip_address(proto.ip_block.net_address).exploded
ip_block = ip_network(
'{}/{}'.format(
ip_block_addr, proto.ip_block.prefix_len,
),
)
state = _desc_state_proto_to_str(proto.state)
sid = proto.sid.id
ip_type = _desc_type_proto_to_str(proto.type)
return ip_descriptor.IPDesc(
ip=ip, ip_block=ip_block, state=state,
sid=sid, ip_type=ip_type, vlan_id=proto.vlan_id,
) | 5,324,902 |
def inheritdocstrings(cls):
"""A class decorator for inheriting method docstrings.
>>> class A(object):
... class_attr = True
... def method(self):
... '''Method docstring.'''
>>> @inheritdocstrings
... class B(A):
... def method(self):
... pass
>>> B.method.__doc__
'Method docstring.'
"""
for name, cls_attr in inspect.getmembers(cls, callable):
if not name.startswith('_') and not cls_attr.__doc__:
for c in cls.mro():
if c is cls:
continue
attr = c.__dict__.get(name)
if attr and attr.__doc__:
try:
cls_attr.__func__.__doc__ = attr.__doc__
except (AttributeError, TypeError):
# Probably a read-only attribute, swallow it.
pass
break
return cls | 5,324,903 |
def get_totd_text():
"""
Get the text for the Top of the Day post.
:return: The body for the post.
"""
sections = []
# Most Upvoted Posts
top_submissions = sorted([submission for submission in get_reddit().subreddit("all").top("day", limit=5)], key=lambda x: x.score, reverse=True)
items = [format_item(i, item) for i, item in enumerate(top_submissions)]
sections.append(get_templates()["section_template"].format(
section_title="Most Upvoted Posts of the Day",
section_note="",
title_body="Title",
items="\n".join(items),
))
# Most Upvoted Comments
comments = []
for submission in top_submissions:
comments.extend([[comment, comment.score] for comment in submission.comments if isinstance(comment, Comment)])
top_comments = sorted(comments, key=lambda x: x[1], reverse=True)
items = [format_item(i, item) for i, item in enumerate([comment_info[0] for comment_info in top_comments[:5]])]
sections.append(get_templates()["section_template"].format(
section_title="Most Upvoted Comments of the Day",
section_note="\n\n^(Note: These may not be entirely accurate. Currently these are out of the comments taken from the top 5 submissions.)",
title_body="Body",
items="\n".join(items),
))
submission_text = get_templates()["main"].format(date=current_date(), sections="\n\n".join(sections))
return submission_text | 5,324,904 |
def check_checkpoints(store_path):
"""
Inputs
1) store_path: The path where the checkpoint file will be searched at
Outputs
1) checkpoint_file: The checkpoint file if it exists
2) flag: The flag will be set to True if the directory exists at the path
Function: This function takes in the store_path and checks if a prior directory exists
for the task already. If it doesn't, flag is set to False and the function returns an empty string.
If a directory exists the function returns a checkpoint file
"""
#if the directory does not exist return an empty string
if not os.path.isdir(store_path):
return ["", False]
#directory exists but there is no checkpoint file
onlyfiles = [f for f in os.listdir(store_path) if os.path.isfile(os.path.join(store_path, f))]
max_train = -1
flag = False
#Check the latest epoch file that was created
for file in onlyfiles:
if(file.endswith('pth.tr')):
flag = True
test_epoch = file[0]
if(test_epoch > max_train):
max_epoch = test_epoch
checkpoint_file = file
#no checkpoint exists in the directory so return an empty string
if (flag == False):
checkpoint_file = ""
return [checkpoint_file, True] | 5,324,905 |
def dump_ensure_space(file, value, fun_err=None):
"""
Only dump value if space enough in disk.
If is not enough space, then it retry until have space
Note: this method is less efficient and slowly than simple dump
>>> with open("test_ensure_space.tmp", "wb") as f:
... dump_ensure_space(f, "test_value")
:param file: file where dump
:param value: value to dump
:param fun_err: event previous to sleep if error, with params:
times_waiting: times retrying until now
time_to_retry: time to next retry in seconds
err: msg error
:return: None
"""
if fun_err is None:
def fun_err_default(_, __, ___):
return None
fun_err = fun_err_default
times_waiting = 0
retry = True
while retry:
try:
pickle.dump(value, file, pickle.HIGHEST_PROTOCOL)
retry = False
except IOError as err:
if "No space left on device" in str(err):
retry = True
times_waiting += 1
time_to_retry = 0.1 * times_waiting
if time_to_retry > 3600:
time_to_retry = 3600
fun_err(times_waiting, time_to_retry, err)
time.sleep(time_to_retry)
else:
raise | 5,324,906 |
def store_images(input, predicts, target, dataset='promise12'):
"""
store the test or valid image in tensorboardX images container
:param input: NxCxHxW
:param predicts: NxCxHxW
:param target: NxHxW
:return:
"""
N = input.shape[0]
grid_image_list = []
for i in range(N):
channel = input[i].shape[0]
pred = torch.max(predicts[i], 0)[1].cpu().numpy()
mask2s = get_mask_pallete(pred, dataset, channel=channel)
if channel == 3: # rgb
mask2s = torch.from_numpy(np.array(mask2s).transpose([2,0,1])).float()
else: # gray
mask2s = torch.from_numpy(np.expand_dims(np.array(mask2s),axis=0)).float()
gt = target[i].cpu().numpy()
target2s = get_mask_pallete(gt, dataset, channel=channel)
if channel == 3:
target2s = torch.from_numpy(np.array(target2s).transpose([2,0,1])).float()
else:
target2s = torch.from_numpy(np.expand_dims(np.array(target2s), axis=0)).float()
grid_image_list += [input[i].cpu(), mask2s, target2s]
grid_image = make_grid(grid_image_list, normalize=True, scale_each=True)
return grid_image | 5,324,907 |
def snmptable(ipaddress: str, oid: str, community: str = 'public',
port: OneOf[str, int] = 161, timeout: int = 3,
sortkey: Optional[str] = None
) -> OneOf[List[Dict[str, str]], Dict[str, Dict[str, str]]]:
"""
Runs Net-SNMP's 'snmptable' command on a given OID, converts the results
into a list of dictionaries, and optionally sorts the list by a given key.
:param community: the snmpv2 community string
:param ipaddress: the IP address of the target SNMP server
:param oid: the Object IDentifier to request from the target SNMP server
:param port: the port on which SNMP is running on the target server
:param sortkey: the key within each dict upon which to sort the list of
results
:param timeout: the number of seconds to wait for a response from the
SNMP server
:return: a list of dicts, one for each row of the table. The keys of the
dicts correspond to the column names of the table.
:raises `~snmp_cmds.exceptions.SNMPTimeout`: if the target SNMP server
fails to respond
:raises `~snmp_cmds.exceptions.SNMPInvalidAddress`: if the hostname or
IP address supplied is not valid or cannot be resolved
:raises `~snmp_cmds.exceptions.SNMPError`: if the underlying
Net-SNMP command produces an unknown or unhandled error
:raises `~snmp_cmds.exceptions.SNMPTableError`: if the requested OID
is not a valid table
"""
# We want our delimiter to be something that would never show up in the
# wild, so we'll use the non-printable ascii character RS (Record Separator)
delimiter = '\x1E'
ipaddress = validate_ip_address(ipaddress)
host = '{}:{}'.format(ipaddress, port)
cmdargs = [
'snmptable', '-m', 'ALL', '-Pe', '-t', str(timeout), '-r', '0', '-v',
'2c', '-Cif', delimiter, '-c', community, host, oid
]
cmd = run(cmdargs, stdout=PIPE, stderr=PIPE)
# Handle any errors that came up
if cmd.returncode is not 0:
check_for_timeout(cmd.stderr, host)
if b'Was that a table?' in cmd.stderr:
raise SNMPTableError(oid)
else:
handle_unknown_error(' '.join(cmdargs), cmd.stderr)
# Process results
else:
# subprocess returns stdout from completed command as a single bytes
# string. we'll split it into a list of bytes strings, and convert
# each into a standard python string which the csv reader can handle
cmdoutput = cmd.stdout.splitlines()
cmdoutput = [item.decode('utf-8') for item in cmdoutput]
# Strip the table name and the blank line following it from the output,
# so all that remains is the table itself
cmdoutput = cmdoutput[2:]
table_parser = csv.DictReader(cmdoutput, delimiter=delimiter)
results = [element for element in table_parser]
if sortkey:
results.sort(key=lambda i: i[sortkey])
return results | 5,324,908 |
def GetFile(message=None, title=None, directory=None, fileName=None, allowsMultipleSelection=False, fileTypes=None):
"""Ask the user to select a file.
Some of these arguments are not supported:
title, directory, fileName, allowsMultipleSelection and fileTypes are here for compatibility reasons.
"""
default_flags = 0x56 | kNavSupportPackages
args, tpwanted = _process_Nav_args(default_flags, message=message)
_interact()
try:
rr = Nav.NavChooseFile(args)
good = 1
except Nav.error, arg:
if arg[0] != -128: # userCancelledErr
raise Nav.error, arg
return None
if not rr.validRecord or not rr.selection:
return None
if issubclass(tpwanted, Carbon.File.FSRef):
return tpwanted(rr.selection_fsr[0])
if issubclass(tpwanted, Carbon.File.FSSpec):
return tpwanted(rr.selection[0])
if issubclass(tpwanted, str):
return tpwanted(rr.selection_fsr[0].as_pathname())
if issubclass(tpwanted, unicode):
return tpwanted(rr.selection_fsr[0].as_pathname(), 'utf8')
raise TypeError, "Unknown value for argument 'wanted': %s" % repr(tpwanted) | 5,324,909 |
def lda_recommend(context_list):
""" With multiprocessing using Dask"""
print("Recommending")
topn = 500
sleep(0.2)
vec_bow = id2word_dictionary.doc2bow(context_list)
# This line takes a LONG time: it has to map to each of the 300 topics
vec_ldamallet = ldamallet[vec_bow]
# Convert the query to LDA space
sims = malletindex[vec_ldamallet]
sims = sorted(enumerate(sims), key=lambda item: -item[1])[:topn]
# sims is a list of tuples of (docid -- line num in original training file, probability)
return [docid_to_magid.get(docid) for docid, prob in sims] | 5,324,910 |
def grids_skf_lr(data_x, data_y, grid_params, weight_classes=None, scv_folds=5):
"""
:param data_x:
:param data_y:
:param grid_params:
:param weight_classes:
:param scv_folds:
:return:
"""
if weight_classes is None:
weight_classes = {0: 1, 1: 1}
m_log = LogisticRegression(
class_weight=weight_classes,
random_state=0,
multi_class='ovr',
n_jobs=-1
)
grid_s = GridSearchCV(
m_log, grid_params, n_jobs=-1,
cv=StratifiedKFold(n_splits=scv_folds, shuffle=True),
scoring=ml_metric, refit=True, verbose=1
)
minmax = MinMaxScaler()
train_feat_norm = minmax.fit_transform(data_x)
grid_s.fit(train_feat_norm, data_y)
print(grid_s.best_estimator_)
return grid_s.best_estimator_ | 5,324,911 |
def zeros(rows, cols, fortran=True):
"""Return the zero matrix with the given shape."""
order = "F" if fortran else "C"
cparr = cp.zeros(shape=(rows, cols), dtype=cp.complex128, order=order)
return CuPyDense._raw_cupy_constructor(cparr) | 5,324,912 |
def check_if_dataframe(obj):
"""Checks if given object is a pandas DataFrame."""
if not isinstance(obj, pd.DataFrame):
raise TypeError(
f"Provided object ({type(obj).__name__}) is not a pandas.DataFrame"
) | 5,324,913 |
def runCmd(cmdString, verbose=False):
"""Thin wrapper around subprocess.run() for clean error reporting."""
try:
if verbose:
sys.stderr.write("Running command: %s\n" % cmdString)
sys.stderr.flush()
subprocess.run(cmdString, shell=True)
except subprocess.CalledProcessError as e:
sys.stderr.write("ERROR: error running command: %s\n" % cmdString)
sys.stderr.write(e.msg + '\n')
sys.exit(1) | 5,324,914 |
def compute_trapezoidal_approx(bm, t0, y0, dt, sqrt_dt, dt1_div_dt=10, dt1_min=0.01):
"""Estimate int_{t0}^{t0+dt} int_{t0}^{s} dW(u) ds with trapezoidal rule.
Slower compared to using the Gaussian with analytically derived mean and standard deviation, but ensures
true determinism, since this rids the random number generation in the solver, i.e. all randomness comes from `bm`.
The loop is from using the Trapezoidal rule to estimate int_0^1 v(s) ds with step size `dt1`.
"""
dt, sqrt_dt = float(dt), float(sqrt_dt)
dt1 = max(min(1.0, dt1_div_dt * dt), dt1_min)
v = lambda s: [bmi / sqrt_dt for bmi in bm(s * dt + t0)] # noqa
# Estimate int_0^1 v(s) ds by Trapezoidal rule.
# Based on Section 1.4 of Stochastic Numerics for Mathematical Physics.
int_v_01 = [- v0 - v1 for v0, v1 in zip(v(0.0), v(1.0))]
for t in np.arange(0, 1 + 1e-7, dt1, dtype=float):
int_v_01 = [a + 2. * b for a, b in zip(int_v_01, v(t))]
int_v_01 = [a * dt1 / 2. for a in int_v_01]
return [(dt ** (3 / 2) * a - dt * b).to(y0[0]) for a, b in zip(int_v_01, bm(t0))] | 5,324,915 |
def test_adjacency_k_brute_connect():
"""
Adjacency Matrix k-Nearest Neighbors test
"""
# import data
data = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
# set default parameters
n_neighbors = 5
algorithm = 'brute'
method = 'knn'
# sklearn adjacency matrix
nbrs = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm).fit(data)
sklearn_mat = nbrs.kneighbors_graph(data)
sklearn_mat = sklearn_mat.toarray()
# my routine
my_mat = adjacency(data,
n_neighbors=n_neighbors,
algorithm=algorithm,
method=method)
my_mat = my_mat.toarray()
# assert adjacency matrices are equal
msg = 'Distance values comparison.'
assert_equal(sklearn_mat.all(), my_mat.all(), msg=msg) | 5,324,916 |
def test_ssm_provider_find_parameter(boto_fs):
"""
Should find the specific entry in the provider
"""
config_file = 'config.yaml'
session.aws_profile = None
session.aws_region = 'us-east-1'
boto_fs.create_file(config_file, contents=f"""
kms:
arn: {KEY_ARN}
parameters:
- name: 'ssm-param'
type: 'String'
value: 'ABC'
""")
config = ConfigReader(config_file)
provider = SSMProvider(config)
assert isinstance(provider.find('ssm-param'), SSMParameterEntry) | 5,324,917 |
def vavrycuk_psencik_hti(vp1, vs1, p1, d1, e1, y1,
vp2, vs2, p2, d2, e2, y2,
phi, theta1):
"""
Reflectivity for arbitrarily oriented HTI media, using the formulation
derived by Vavrycuk and Psencik [1998], "PP-wave reflection coefficients
in weakly anisotropic elastic media"
"""
theta1 = np.radians(theta1)
phi = np.radians(phi)
theta2, thetas1, thetas2, p = snell(vp1, vp2, vs1, vs2, theta1)
theta = (theta1 + theta2)/2
theta = theta1
G1 = p1*(vs1**2)
G2 = p2*(vs2**2)
Z1 = p1*vp1
Z2 = p2*vp2
a = (vp1 + vp2)/2
B = (vs1 + vs2)/2
Z = (Z1 + Z2)/2
G = (G1 + G2)/2
dZ = Z2 - Z1
da = vp2 - vp1
dG = G2 - G1
dd = d2 - d1
de = e2 - e1
dy = y2 - y1
A = (1/2*(dZ/Z) +
1/2*(da/a)*np.tan(theta)**2 -
2*((B/a)**2)*(dG/G)*np.sin(theta)**2)
B = 1/2*(dd*(np.cos(phi)**2) - 8*((B/a)**2)*dy*(np.sin(phi)*2))
C = 1/2*(de*(np.cos(phi)**4) + dd*(np.cos(phi)**2)*(np.sin(phi)**2))
Rpp = A + B*np.sin(theta)**2 + C*np.sin(theta)**2*np.tan(theta)**2
return(Rpp) | 5,324,918 |
def test_config_set_min(ursadb: UrsadbTestContext) -> None:
""" Sanity check that minimal values are enforced """
assert "error" in ursadb.request('config set "query_max_edge" 0;') | 5,324,919 |
def request_set_endorser(handle: RequestHandle, endorser_did: str):
"""Set the endorser on a prepared request."""
endorser_p = encode_str(endorser_did)
do_call("indy_vdr_request_set_endorser", handle, endorser_p) | 5,324,920 |
def test_spawn_args(spawn, shell):
"""
Test that the arguments for Pexpect spawn are correct.
"""
shell.connect()
spawn.assert_called_with(
'test connection command',
env={'TERM': 'dumb'},
echo=False,
use_poll=True,
)
shell = Shell(
'', spawn_args={'env': {'TERM': 'smart'}, 'echo': True}
)
shell.connect()
spawn.assert_called_with(
'test connection command',
env={'TERM': 'smart'},
echo=True,
use_poll=True,
) | 5,324,921 |
def concatenate_time_series(time_series_seq):
"""Concatenates a sequence of time-series objects in time.
The input can be any iterable of time-series objects; metadata, sampling
rates and other attributes are kept from the last one in the sequence.
This one requires that all the time-series in the list have the same
sampling rate and that all the data have the same number of items in all
dimensions, except the time dimension"""
# Extract the data pointer for each and build a common data block
data = []
metadata = {}
for ts in time_series_seq:
data.append(ts.data)
metadata.update(ts.metadata)
# Sampling interval is read from the last one
tseries = TimeSeries(np.concatenate(data,-1),
sampling_interval=ts.sampling_interval,
metadata=metadata)
return tseries | 5,324,922 |
def test_Device_unit():
"""Test unit setter and getter of Device class."""
dev = Device('dev1')
assert dev.unit is None
dev.unit = 'mV/Pa'
assert dev.unit == 'mV/Pa' | 5,324,923 |
def setup_database_node(*args):
"""Provisions database services in one or list of nodes. USAGE: fab setup_database_node:user@1.1.1.1,user@2.2.2.2"""
for host_string in args:
# Frame the command line to provision database
cmd = frame_vnc_database_cmd(host_string)
# Execute the provision database script
with settings(host_string=host_string):
with settings(warn_only=True):
if detect_ostype() == 'ubuntu':
if not is_xenial_or_above():
sudo('rm /etc/init/supervisor-database.override')
with cd(INSTALLER_DIR):
sudo(cmd) | 5,324,924 |
def deeplink_url_patterns(
url_base_pattern=r'^init/%s/$',
login_init_func=login_init,
):
"""
Returns new deeplink URLs based on 'links' from settings.SAML2IDP_REMOTES.
Parameters:
- url_base_pattern - Specify this if you need non-standard deeplink URLs.
NOTE: This will probably closely match the 'login_init' URL.
"""
resources = get_deeplink_resources()
new_patterns = []
for resource in resources:
new_patterns += [
url( url_base_pattern % resource,
login_init_func,
{
'resource': resource,
},
)
]
return new_patterns | 5,324,925 |
def read_words():
"""
Returns an array of all words in words.txt
"""
lines = read_file('resources/words.txt')
words = []
for line in lines:
words.extend(line.split(' '))
return words | 5,324,926 |
def lynotename(midinote):
"""Find the LilyPond/Pently name of a MIDI note number.
For example, given 60 (which means middle C), return "c'".
"""
octave, notewithin = midinote // 12, midinote % 12
notename = notenames[notewithin]
if octave < 4:
return notename + "," * (4 - octave)
else:
return notename + "'" * (octave - 4) | 5,324,927 |
def check_object(o):
""" Test that o is a valid object identifier."""
if not (isinstance(o, URIRef) or \
isinstance(o, Literal) or \
isinstance(o, BNode)):
raise ObjectTypeError(o) | 5,324,928 |
def json_serial(obj):
"""
Fallback serializier for json. This serializes datetime objects to iso
format.
:param obj: an object to serialize.
:returns: a serialized string.
"""
if isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
return str(obj) | 5,324,929 |
def main():
"""
loads the resources and launches the app
:return: None
"""
app = QtWidgets.QApplication(sys.argv)
load_resources()
controller = NoiceController()
controller.show()
sys.exit(app.exec_()) | 5,324,930 |
def deepgreen_deposit(packaging, file_handle, acc, deposit_record):
"""
Deposit the binary package content to the target repository
:param packaging: the package format identifier
:param file_handle: the file handle on the binary content to deliver
:param acc: the account we are working as
:param deposit_record: provenance object for recording actions during this deposit process
"""
app.logger.info(u"Depositing DeepGreen Package Format:{y} for Account:{x}".format(x=acc.id, y=packaging))
# create a connection object
conn = sword2.Connection(user_name=acc.sword_username, user_pass=acc.sword_password, error_response_raises_exceptions=False, http_impl=client_http.OctopusHttpLayer())
##if acc.repository_software in ["eprints"]:
## # this one adds the package as a new file to the item
## try:
## ur = conn.add_file_to_resource(receipt.edit_media, file_handle, "deposit.zip", "application/zip", packaging)
## except Exception as e:
## msg = u"Received Error:{a} attempting to deposit file in repository for Account:{x} - raising DepositException".format(a=e.message, x=acc.id)
## app.logger.error(msg)
## raise DepositException(msg)
##else:
## # this one would replace all the binary files
## try:
## ur = conn.update_files_for_resource(file_handle, "deposit.zip", mimetype="application/zip", packaging=packaging, dr=receipt)
## except Exception as e:
## msg = u"Received Error:{a} attempting to deposit file in repository for Account:{x} - raising DepositException".format(a=e.message, x=acc.id)
## app.logger.error(msg)
## raise DepositException(msg)
#
# this one would create an collection item as the package's file(s)
try:
ur = conn.create(col_iri=acc.sword_collection, payload=file_handle, filename="deposit.zip", mimetype="application/zip", packaging=packaging)
except Exception as e:
msg = u"Received Error:{a} attempting to deposit file in repository for Account:{x} - raising DepositException".format(a=e.message, x=acc.id)
app.logger.error(msg)
raise DepositException(msg)
# storage manager instance
sm = store.StoreFactory.get()
# find out if this was an error document, and throw an error if so
# (recording deposited/failed on the deposit_record along the way)
if isinstance(ur, sword2.Error_Document):
deposit_record.content_status = "failed"
msg = "Content deposit failed with status {x} (error_href={y})".format(x=ur.code,y=ur.error_href)
# 2020-01-09 TD : check for special cases for 'InvalidXml' in the error document
#ehref = ur.error_href
# 2020-01-13 TD : safety check, e.g. if ur.code == 500 (INTERNAL SERVER ERROR),
# then ur.error_href is None
if not ur.error_href is None:
if "opus-repository" in ur.error_href and "InvalidXml" in ur.error_href:
deposit_record.metadata_status = "invalidxml"
# 2020-01-13 TD : check for special cases for 'PayloadToLarge' in the error document
# (note the typo here in OPUS4 ...)
if "opus-repository" in ur.error_href and "PayloadToLarge" in ur.error_href:
deposit_record.metadata_status = "payloadtoolarge"
#
if app.config.get("STORE_RESPONSE_DATA", False):
sm.store(deposit_record.id, "content_deposit.txt", source_stream=StringIO(msg))
app.logger.debug(u"Received error document depositing Package Format:{y} for Account:{x} - raising DepositException".format(x=acc.id, y=packaging))
raise DepositException(msg)
else:
if app.config.get("STORE_RESPONSE_DATA", False):
msg = "Content deposit was successful"
sm.store(deposit_record.id, "content_deposit.txt", source_stream=StringIO(msg))
deposit_record.content_status = "deposited"
app.logger.debug(u"Successfully deposited Package Format:{y} for Account:{x}".format(x=acc.id, y=packaging))
app.logger.info("DeepGreen Package deposit")
return | 5,324,931 |
def _scrub_participant_table(path_to_data):
"""Scrub PII from the given participant table."""
path = os.path.join(path_to_data, "participant.csv")
with open_for_csv(path, 'r') as input, open("{}.0".format(path), 'w') as output:
reader = csv.reader(input)
writer = csv.writer(output)
headers = next(reader)
writer.writerow(headers)
for i, row in enumerate(reader):
row[headers.index("worker_id")] = row[headers.index("id")]
row[headers.index("unique_id")] = "{}:{}".format(
row[headers.index("id")],
row[headers.index("assignment_id")]
)
writer.writerow(row)
os.rename("{}.0".format(path), path) | 5,324,932 |
def get_library_file(instrument, detector, filt, pupil, wfe, wfe_group,
library_path, wings=False, segment_id=None):
"""Given an instrument and filter name along with the path of
the PSF library, find the appropriate library file to load.
Parameters
-----------
instrument : str
Name of instrument the PSFs are from
detector : str
Name of the detector within ```instrument```
filt : str
Name of filter used for PSF library creation
pupil : str
Name of pupil wheel element used for PSF library creation
wfe : str
Wavefront error. Can be 'predicted' or 'requirements'
wfe_group : int
Wavefront error realization group. Must be an integer from 0 - 9.
library_path : str
Path pointing to the location of the PSF library
wings : bool, optional
Must the library file contain PSF wings or PSF cores? Default is False.
segment_id : int or None, optional
If specified, returns a segment PSF library file and denotes the ID
of the mirror segment
Returns
--------
matches : str
Name of the PSF library file for the instrument and filter name
"""
logger = logging.getLogger('mirage.psf.psf_selection.get_library_file')
psf_files = glob(os.path.join(library_path, '*.fits'))
# Determine if the PSF path is default or not
mirage_dir = expand_environment_variable('MIRAGE_DATA')
gridded_dir = os.path.join(mirage_dir, '{}/gridded_psf_library'.format(instrument.lower()))
if wings:
gridded_dir = os.path.join(gridded_dir, 'psf_wings')
default_psf = library_path == gridded_dir
# Create a dictionary of header information for all PSF library files
matches = []
instrument = instrument.upper()
detector = detector.upper()
filt = filt.upper()
pupil = pupil.upper()
wfe = wfe.lower()
# set default
file_wfe = ''
# handle the NIRISS NRM case
if pupil == 'NRM':
pupil = 'MASK_NRM'
# Handle the DHS for Coarse Phasing - this is a workaround for webbpsf not
# implementing this. We're going to load an ITM image in any case in this mode
# so the PSF is entirely unused, but we need to load something or else MIRAGE errors.
if pupil == 'GDHS0' or pupil == 'GDHS60':
pupil = 'CLEAR'
for filename in psf_files:
try:
header = fits.getheader(filename)
# Determine if it is an ITM file
itm_sim = header.get('ORIGIN', '') == 'ITM'
# Compare the header entries to the user input
file_inst = header['INSTRUME'].upper()
try:
file_det = header['DETECTOR'].upper()
except KeyError:
file_det = header['DET_NAME'].upper()
file_filt = header['FILTER'].upper()
try:
file_pupil = header['PUPIL'].upper()
except KeyError:
# If no pupil mask value is present, then assume the CLEAR is
# being used
if file_inst.upper() == 'NIRCAM':
file_pupil = 'CLEAR'
elif file_inst.upper() == 'NIRISS':
try:
file_pupil = header['PUPIL'].upper() # can be 'MASK_NRM'
except KeyError:
file_pupil = 'CLEARP'
# NIRISS has many filters in the pupil wheel. WebbPSF does
# not make a distinction, but Mirage does. Adjust the info
# to match Mirage's expectations
if file_inst.upper() == 'NIRISS' and file_filt in NIRISS_PUPIL_WHEEL_FILTERS:
save_filt = copy(file_filt)
if file_pupil == 'CLEARP':
file_filt = 'CLEAR'
else:
raise ValueError(('Pupil value is something other than '
'CLEARP, but the filter being used is '
'in the pupil wheel.'))
file_pupil = save_filt
# Same for NIRCam
if file_inst.upper() == 'NIRCAM' and file_filt in NIRCAM_PUPIL_WHEEL_FILTERS:
save_filt = copy(file_filt)
if file_pupil == 'CLEAR':
if save_filt[0:2] == 'F4':
file_filt = 'F444W'
elif save_filt[0:2] == 'F3':
file_filt = 'F322W2'
elif save_filt[0:2] == 'F1':
file_filt = 'F150W2'
else:
raise ValueError(('Pupil value is something other than '
'CLEAR, but the filter being used is '
'in the pupil wheel.'))
file_pupil = save_filt
if segment_id is None and not itm_sim:
opd = header['OPD_FILE']
if 'requirements' in opd:
file_wfe = 'requirements'
elif 'predicted' in opd:
file_wfe = 'predicted'
file_wfe_grp = header['OPDSLICE']
if segment_id is not None:
segment_id = int(segment_id)
file_segment_id = int(header['SEGID'])
if segment_id is None and itm_sim:
# If we have an ITM library, then wfe is
# meaningless, so force it to match
file_wfe = 'predicted'
wfe = 'predicted'
# allow check below to pass for FGS
if instrument.lower() == 'fgs':
file_filt = 'N/A'
filt = 'N/A'
file_pupil = 'N/A'
pupil = 'N/A'
# Evaluate if the file matches the given parameters
match = (file_inst == instrument
and file_det == detector
and file_filt == filt
and file_pupil == pupil
and file_wfe == wfe)
if not wings and segment_id is None and not itm_sim and default_psf:
match = match and file_wfe_grp == wfe_group
if segment_id is not None:
match = match and file_segment_id == segment_id
elif not itm_sim and default_psf:
match = match and file_wfe == wfe
# If so, add to the list of all matches
if match:
matches.append(filename)
except KeyError as e:
warnings.warn('While searching for PSF file, error raised when examining {}:\n{}\nContinuing.'.format(os.path.basename(filename), e))
continue
# Find files matching the requested inputs
if len(matches) == 1:
return matches[0]
elif len(matches) == 0:
logger.info('Requested parameters:\ninstrument {}\ndetector {}\nfilt {}\npupil {}\nwfe {}\n'
'wfe_group {}\nlibrary_path {}\n'.format(instrument, detector, filt, pupil, wfe,
wfe_group, library_path))
raise ValueError("No PSF library file found matching requested parameters.")
elif len(matches) > 1:
raise ValueError("More than one PSF library file matches requested parameters: {}".format(matches)) | 5,324,933 |
def html_mail(sender='me@mail.com', recipients=['them@mail.com'],
html_content='<p>Hi</p>', subject='Hello!',
mailserver='localhost'):
""" html_mail takes input html, sender, recipents and
emails it in a mime type that will show as text or html on
the recipients mail reader.
depends on email, html2text
"""
# Create an email message container - uses multipart/alternative.
# sets up basic email components
message = MIMEMultipart('alternative')
message['Subject'] = subject
message['From'] = sender
message['To'] = recipients
# create a markdown text version of the supplied html content
text = html2text.html2text(html_content)
# setup the MIME types for both email content parts, text - plain & text - html.
email_part1 = MIMEText(text, 'plain')
email_part2 = MIMEText(html_content, 'html')
"""
complete the message composition -
attach the email parts into our message container.
make the order such that the text email is seen by the recipient, in case
they are not reading with a mime & html compatible reader.
"""
message.attach(email_part1)
message.attach(email_part2)
# Send the message via our SMTP server.
conn = smtplib.SMTP(mailserver)
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
try:
return_value = conn.sendmail(sender, recipients, message.as_string())
conn.quit()
except Exception as e:
print "error sending email: html_mail.html_mail %s" % (e,)
return (return_value)
if return_value:
return return_value
else:
return None | 5,324,934 |
def gotu(input_path: str) -> biom.Table:
"""Generate a gOTU table based on sequence alignments.
"""
profile = workflow(input_path, None)['none']
return profile_to_biom(profile) | 5,324,935 |
def read_data_megset(beamf_type):
""" Read and prepare data for plotting."""
if beamf_type == 'lcmv':
settings = config.lcmv_settings
settings_columns = ['reg', 'sensor_type', 'pick_ori', 'inversion',
'weight_norm', 'normalize_fwd', 'use_noise_cov',
'reduce_rank']
data_fname = config.fname.lcmv_megset_results
dfs = []
for subject in [1, 2, 4, 5, 6, 7]:
df = pd.read_csv(data_fname(subject=subject), index_col=0)
df['subject'] = subject
df = df.rename(columns={'focs': 'focality'})
dfs.append(df)
data = pd.concat(dfs, ignore_index=True)
elif beamf_type == 'dics':
settings = config.dics_settings
settings_columns = ['reg', 'sensor_type', 'pick_ori', 'inversion',
'weight_norm', 'normalize_fwd', 'real_filter',
'use_noise_cov', 'reduce_rank']
data_fname = config.fname.dics_megset_results
dfs = []
for subject in [1, 4, 5, 6, 7]:
df = pd.read_csv(data_fname(subject=subject), index_col=0)
df['focality'] = abs(df['focality'])
df['subject'] = subject
dfs.append(df)
data = pd.concat(dfs, ignore_index=True)
else:
raise ValueError('Unknown beamformer type "%s".' % beamf_type)
data['weight_norm'] = data['weight_norm'].fillna('none')
data['pick_ori'] = data['pick_ori'].fillna('none')
data['dist'] *= 1000 # Measure distance in mm
# Average across the subjects
data = data.groupby(settings_columns).agg('mean').reset_index()
assert len(data) == len(settings)
return data | 5,324,936 |
def collect_user_name():
""" Returns the username as provided by the OS. Returns a constant if it
fails.
"""
try:
uname = getpass.getuser()
except Exception as e:
logger = logging.getLogger(__name__)
msg = "Failed to collect the user name: error was {}.".format(e)
logger.warning(msg)
uname = UNKNOWN_UNAME
return uname | 5,324,937 |
def get_dataframe() -> pd.DataFrame():
"""Dummy DataFrame"""
data = [
{"quantity": 1, "price": 2},
{"quantity": 3, "price": 5},
{"quantity": 4, "price": 8},
]
return pd.DataFrame(data) | 5,324,938 |
def get_node_mirna(mirna_name, taxid, psi_mi_to_sql_object):
"""
This function sets up a node dict and returns it. If the node is already in the SQLite database it fetches that node from the db, so it won't be inserted multiple times.
"""
# Testing if the node is already in the database
node_dict = psi_mi_to_sql_object.get_node(mirna_name, node_tax_id=taxid)
if not node_dict:
node_dict = {
"name" : 'HPRD:' + mirna_name,
"tax_id": taxid,
"alt_accession": None,
'pathways': None,
"aliases": None,
"topology": None
}
return node_dict | 5,324,939 |
def generateEndOfQuiz(filename):
"""This method takes a name of file and appends '++' to the very end of the file which is recognised as
end of template by IS.MUNI ROPOT parser.
Args:
filename (string): Name of file where end of template for IS.MUNI ROPOT will be appended to its very end.
"""
with codecs.open(os.path.join(target,filename), 'a', "utf-8") as testTemplate:
testTemplate.write("++") | 5,324,940 |
def Web(website, system_id, address, page, params = {}):
"""Routine for connecting to website that is hosting the database"""
data = ''
params['systemid'] = system_id
params = urllib.urlencode(params)
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
try:
conn = httplib.HTTPConnection(website+':80')
conn.request("POST", '/'+ page, params, headers)
response = conn.getresponse()
data = response.read()
conn.close()
except:
Debug("Unable to connect to website " + str(sys.exc_info()[0]))
return 'error'
else:
#print data
return data | 5,324,941 |
def generate_output_file_name(input_file_name):
"""
Generates an output file name from input file name.
:type input_file_name: str
"""
assert isinstance(input_file_name, str)
output_file_name = input_file_name + ".gen.ipynb"
return output_file_name | 5,324,942 |
def prefixed_field_map(name: str) -> Mapper:
"""
Arguments
---------
name : str
Name of the property.
Returns
-------
Mapper
Field map.
See Also
--------
field_map
"""
return field_map(
name,
api_to_python=add_signed_prefix_as_needed,
python_to_api=remove_signed_prefix_as_needed,
) | 5,324,943 |
def a_star(grid, start, end):
"""A-star algorithm implementation"""
# open and closed nodes
open_nodes = []
closed_nodes = []
# Create a start node and an goal node
start_node = Node(start, None)
goal_node = Node(end, None)
# Add the start node
open_nodes.append(start_node)
# Loop until the open list is empty
while len(open_nodes) > 0:
for event in pygame.event.get():
# Checks for quit event
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# Sort the open list to get the node with the lowest cost first
open_nodes.sort()
# Get the node with the lowest cost
current_node = open_nodes.pop(0)
# Add the current node to the closed list
(i, j) = current_node.position
grid.blocks[i][j].set_color(visited)
pygame.display.update(grid.blocks[i][j].draw())
closed_nodes.append(current_node)
# Check if we have reached the goal, return the path
if current_node == goal_node:
path = []
while current_node != start_node:
path.append(current_node.position)
current_node = current_node.parent
# Return reversed path
return path[::-1]
# Unzip the current node position
(x, y) = current_node.position
# Get neighbors
neighbors = []
if x - 1 >= 0:
neighbors.append((x - 1, y))
if x + 1 < grid.r:
neighbors.append((x + 1, y))
if y - 1 >= 0:
neighbors.append((x, y - 1))
if y + 1 < grid.c:
neighbors.append((x, y + 1))
# Loop neighbors
for next in neighbors:
# Get value from grid
grid_value = grid.blocks[next[0]][next[1]]
# Check if the node is a wall
if grid_value.color == wall:
continue
# Create a neighbor node
neighbor = Node(next, current_node)
# Check if the neighbor is in the closed list
if neighbor in closed_nodes:
continue
# Generate heuristics (Manhattan distance)
neighbor.g = abs(neighbor.position[0] - start_node.position[0]) + abs(
neighbor.position[1] - start_node.position[1]
)
neighbor.h = abs(neighbor.position[0] - goal_node.position[0]) + abs(
neighbor.position[1] - goal_node.position[1]
)
neighbor.f = neighbor.g + neighbor.h
# Check if neighbor is in open list and if it has a lower f value
if add_to_open(open_nodes, neighbor) == True:
open_nodes.append(neighbor) | 5,324,944 |
def update_wf_library(pulses, path):
"""
Update the waveform library in-place.
Parameters
------------
pulses : iterable of pulse object to update
e.g. [X90(q1), X(q1), Y90(q1), Y(q1), X90(q2), X(q2), Y90(q2), Y(q2), ZX90_CR(q1, q2)]
path : path to base name of files to update e.g. /path/to/GST/GST will update files such as
/path/to/GST/GST-APSII1.h5 and /path/to/GST/GST-APSII2.h5
"""
#Look through the pulses and figure out what pulses are associated with which APS
awg_pulses = collections.defaultdict(dict)
translators = {}
def flatten_pulses():
for p in flatten(pulses):
if isinstance(p, CompositePulse):
for sub_p in p.pulses:
yield sub_p
else:
yield p
pulse_list = list(flatten_pulses())
for ct, pulse in enumerate(pulse_list):
awg = pulse.channel.phys_chan.instrument
if awg not in translators:
translators[awg] = getattr(QGL.drivers,
pulse.channel.phys_chan.translator)
if pulse.label not in awg_pulses[awg]:
awg_pulses[awg][pulse.label] = pulse_list[ct]
for awg, ps in awg_pulses.items():
#load the offset dictionary for this AWG
try:
with open(path + "-" + awg + ".offsets", "rb") as FID:
offsets = pickle.load(FID)
except IOError:
print("Offset file not found for {}, skipping pulses {}".format(
awg, [str(p) for p in ps.values()]))
continue
print("Updating pulses for {}".format(awg))
translators[awg].update_wf_library(path + "-" + awg + ".aps", ps,
offsets) | 5,324,945 |
def generate_charts_and_data() -> None:
"""Generate and print the data used in the cool extras section.
Save the relevant charts in the cool extras folder.
"""
graph = get_dataset_data.get_song_graph_from_decades(DECADES)
print('Most similar attributes:',
most_similar_continuous_attr(graph, ignore={'year', 'explicit'}))
_generate_acousticness_energy_chart(graph)
for decade in DECADES:
_generate_data_and_charts_by_decade(graph, decade) | 5,324,946 |
def is_water(residue):
"""
Parameters
----------
residue : a residue from a protein structure object made with PDBParser().
Returns
-------
Boolean
True if residue is water, False otherwise.
"""
residue_id = residue.get_id()
hetfield = residue_id[0]
return hetfield[0] == 'W' | 5,324,947 |
def _step(context, url):
"""
:type context: HackedContext
"""
context.driver.get(url) | 5,324,948 |
def import_all_item_types():
"""
Imports all eve item types. This should only be called once when the database is initially set up.
:param self:
:return:
"""
client = EsiClient()
page_count = client.get_page_count("/v1/universe/types/")
logger.info("{} pages of items to download".format(page_count))
data = client.get_multiple("/v1/universe/types/?page={}", [p+1 for p in range(page_count)])
logger.info("all pages downloaded")
item_ids = []
for page_data in data.values():
item_ids.extend(page_data)
item_data = client.get_multiple("/v3/universe/types/{}/", item_ids)
logger.info("all item data downloaded")
item_objs = []
for item in item_data.values():
i = ObjectType.objects.update_or_create(
ccp_id = item["type_id"],
defaults= {
"name" :item["name"],
"volume" : item.get("volume"),
"packaged_volume" : item.get("packaged_volume"),
"group_id" : item["group_id"],
"icon_id" : item.get("icon_id") if item.get("icon_id") else item["type_id"],
"market_group" : MarketGroup.get_object(item.get("market_group_id")),
"published":item["published"]
}
)
#i.save()
item_objs.append(i)
logger.info("all item data has objects created")
ObjectType.objects.bulk_create(item_objs)
logger.info("all item data committed") | 5,324,949 |
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
return input_tensor * cdf | 5,324,950 |
def index():
""" Route for signing the policy document or REST headers. """
request_payload = request.get_json()
if request_payload.get('headers'):
response_data = sign_headers(request_payload['headers'])
else:
credential = [c for c in request_payload['conditions'] if 'x-amz-credential' in c][0].values()[0]
response_data = sign_policy(request.data, credential)
return jsonify(response_data) | 5,324,951 |
def create_template_bridge(self):
"""Return the template bridge configured."""
if self.config.template_bridge:
templates = self.app.import_object(
self.config.template_bridge, 'template_bridge setting')()
else:
from sphinx.jinja2glue import BuiltinTemplateLoader
templates = BuiltinTemplateLoader()
return templates | 5,324,952 |
def scan(basedir, scanner=SCANNER):
"""Scan basedir, return Fileset models, log warning for broken sets"""
logger = getLogger(__name__)
scanners = scanner.values()
for filesetinfo in detect_filesets(basedir, scanners):
try:
fileset = make_fileset(filesetinfo)
except BrokenFileset as e:
logger.warn(e)
continue
yield fileset | 5,324,953 |
def getIcons(xml):
"""
Print out all icons found in xml
"""
doc = minidom.parse(xml)
node = doc.documentElement
fdroidNode = doc.firstChild
counter = 0
icon = fdroidNode.getElementsByTagName("icon")
iconsnames = []
for iconlocation in icon:
iconsObj = icon[counter].firstChild.data.encode('ascii', 'ignore')
counter += 1
iconsnames.append(iconsObj)
list(set(iconsnames))
## Magical Regex to create downloadable url's
add_fdroid_url = [re.sub(regex_line_start, (repo_url + 'icons/'), string) for string in iconsnames]
##
f = open( 'download_icons.txt', 'w' ) ## TIP: change this line to 'with open("download_apk.txt", "a") as f:' if you want all url's in single file.
f.write("\n".join(add_fdroid_url)) ## Convert list to line breaks, prepend the url, and print to file.
f.close()
f = open( 'icons.txt', 'w' )
f.write("\n".join(iconsnames)) ## Print only icon names (Required for grepping during re-dump/refresh script)
f.close() | 5,324,954 |
def test_one_no_min_limit_correct_number():
"""Parse a limited number."""
args = CommandArgs()
number = args.add_argument("number")
number.max_limit = None
result = args.parse(None, "120")
assert bool(result)
assert result.number == 120 | 5,324,955 |
def make_answer(rdtype, answers=None, additionals=None, authorities=None):
"""For mocking an answer. We make an answer without any message (what would
normally come over the network, to be parsed. We instead make a blank
object for the sake of test complexity, and later attach the appropriate\
rrsets to the answer.
This may cause some tests to fail that test attributes that are assigned
during the creation of an Answer (flags?).
The answers, additionals, and authorities should be lists of strings, with
data fields space-separated. Each string representing one RR. See RFC for
order of field per type.
ex: MX would be '<Preference> <Mail Exchanger>'
"""
answer = dns.resolver.Answer(
dns.name.from_text(TEST_DOMAIN),
getattr(dns.rdatatype, rdtype.upper()),
dns.rdataclass.IN,
dns.message.from_text(''),
raise_on_no_answer=False
)
if answers:
rrset = dns.rrset.from_text(TEST_DOMAIN, 60, 'IN', rdtype, *answers)
answer.response.answer = [rrset]
if additionals:
rrset = dns.rrset.from_text(TEST_DOMAIN, 60, 'IN', rdtype, *additionals)
answer.response.answer = [rrset]
if authorities:
rrset = dns.rrset.from_text(TEST_DOMAIN, 60, 'IN', rdtype, *authorities)
answer.response.authority = [rrset]
return answer | 5,324,956 |
def consolidate_payoff_results(period, reporter_configuration, simulation_output, score_map, priority_based):
"""
Gather per-run metrics according to a simulation result.
:param resolved_per_reporter: Resolved issues per priority, including a priority detail.
:param period: Description of the period.
:param reporter_configuration: List of reporter configuration.
:param completed_per_reporter: List containing completed reports per reporter per run.
:param bugs_per_reporter: List containing found reports per reporter per priority per run.
:param reports_per_reporter: ist containing reported (sic) reports per reporter per priority per run.
:return: Consolidated metrics in a list.
"""
simulation_results = simulation_output.get_consolidated_output(reporter_configuration)
logger.info(
"Payoff function parameters: Priority-based " + str(priority_based) + " Severe Score: " + str(
score_map[simdata.SEVERE_PRIORITY]) + " Non-Severe Score " + str(score_map[simdata.NON_SEVERE_PRIORITY]))
for reporter_info in simulation_results:
reporter_info["period"] = period
payoff_score = get_payoff_score(reporter_info=reporter_info, score_map=score_map, priority_based=priority_based)
reporter_info["payoff_score"] = payoff_score
return simulation_results | 5,324,957 |
def write_sentence_tabs(t, f, fmt=3):
"""writes a sentence in export format
and does NOT write the #EOS
"""
for n in t.terminals:
if n.parent:
parent_id = n.parent.id
else:
parent_id = '0'
extra = ''
if hasattr(n, 'secedge') and n.secedge is not None:
for secedge in n.secedge:
tgt = secedge[1]
if tgt.isTerminal():
tgt_id = tgt.start
else:
tgt_id = tgt.id
extra += '\t%s\t%s' % (secedge[0], tgt_id)
if hasattr(n, 'comment') and n.comment:
if extra:
extra += ' '
else:
extra = '\t'
extra += '%% ' + n.comment
if fmt == 4:
lem = getattr(n, 'lemma', None)
if lem is None:
lem = '--'
lemma_column = pad_with_tabs(lem, 3)
else:
lemma_column = ''
n_word = n.word
f.write('%s%s%s%s%s%s%s\n'%(pad_with_tabs(n_word, 3),
lemma_column,
pad_with_tabs(n.cat, 1),
pad_with_tabs(n.morph, 2),
pad_with_tabs(n.edge_label, 1),
parent_id, extra))
all_nodes = list(t.node_table.values())
all_nodes.sort(key=lambda n: n.id)
if fmt == 4:
lemma_column = pad_with_tabs('--', 3)
else:
lemma_column = ''
for n in all_nodes:
if n is not t.node_table[n.id]:
print("%s: node %s may be duplicate"%(
getattr(t, 'sent_no'), n.id), file=sys.stderr)
assert False
if n.parent:
parent_id = n.parent.id
else:
parent_id = '0'
extra = ''
if hasattr(n, 'secedge') and n.secedge is not None:
for secedge in n.secedge:
tgt = secedge[1]
if tgt.isTerminal():
tgt_id = tgt.start
else:
tgt_id = tgt.id
extra += '\t%s\t%s' % (secedge[0], tgt_id)
if hasattr(n, 'comment') and n.comment is not None:
if extra:
extra += ' '
else:
extra = ' '
extra += '%% ' + n.comment
f.write('%s%s%s%s%s%s%s\n' % (pad_with_tabs('#%s' % (n.id,), 3),
lemma_column,
pad_with_tabs(n.cat, 1),
pad_with_tabs(n.attr, 2),
pad_with_tabs(n.edge_label, 1),
parent_id, extra)) | 5,324,958 |
def artPuttyCtx(*args, **kwargs):
"""
This is a context command to set the flags on the artAttrContext, which is the base context for attribute painting operations.
Returns: None
"""
pass | 5,324,959 |
def single_ray_belief_propagation(
S,
ray_voxel_indices,
ray_to_occupancy_accumulated_pon,
ray_to_occupancy_messages_pon,
output_size
):
"""Run the sum product belief propagation for a single ray
Arguments
---------
S: tensor (M,) dtype=float32
The depth probability distribution for that ray
ray_voxel_indices: tensor (M, 3), dtype=int32
The voxel indices in the voxel grid per ray
ray_to_occupancy_accumulated_pon: tensor (D1, D2, D3), dtype=float32
Accumulator used to hold the quotient of the positive ray to occupancy
message with the negative ray to occupancy message (in logspace)
ray_to_occupancy_messages_pon: tensor (M, 1), dtype=float32
Ray to occupancy messages (in logspace)
output_size: int
Pad the output with 0 until its size is output_size
"""
occupancy_to_ray = extract_occupancy_to_ray_pos(
ray_voxel_indices,
ray_to_occupancy_accumulated_pon,
ray_to_occupancy_messages_pon
)
# Compute the cumulative products in linear time (see eq. 13, 14 Ulusoy
# 3DV)
# For the computation of the cumulative product we need
# the occupancy-to-ray messages for the negative case.
# We append 1 at the top because for the o_1 voxel this term is equal to 1
occupancy_to_ray_neg_cumprod = K.tf.cumprod(
1.0 - occupancy_to_ray,
exclusive=True
)
# Compute the part of the messages that is the same for positive and
# negative messages
common_part = occupancy_to_ray_neg_cumprod * S
ray_to_occupancy_new_common = K.tf.cumsum(
occupancy_to_ray * common_part,
exclusive=True,
)
# Finalize the positive messages
ray_to_occupancy_new_positive = common_part + ray_to_occupancy_new_common
# Finalize the negative messages (adding 2nd part of eq. 14 Ulusoy 3DV)
# The summations we want to calculate are as follows:
# i=1, \sum_{i=2}^N(\cdot)
# i=2, \sum_{i=3}^N(\cdot)
# ...
# i=N-2, \sum_{i=N-1}^N(\cdot)
# lets assume that we have [a, b, c, d, e]. We first inverse the array,
# thus resulting in [e, d, c, b, a] and then we compute the cumulative sum
# on this array. The output is [e, e+d, e+d+c, e+d+c+b, e+d+c+b+a]. However
# we want them in the inverse order, thus we inverse the output once again
# and we have [e+d+c+b+a, e+d+c+b, e+d+c, e+d, e]
# Finally we also divide with the incoming message for the negative case
t1 = K.tf.cumsum(
occupancy_to_ray * common_part,
reverse=True,
exclusive=True
)
t2 = t1 / (1.0 - occupancy_to_ray)
ray_to_occupancy_new_negative = ray_to_occupancy_new_common + t2
# Normalize the positive ray_to_occupancy message
ray_to_occupancy_new_pos =\
ray_to_occupancy_new_positive / (ray_to_occupancy_new_positive + ray_to_occupancy_new_negative)
ray_to_occupancy_pon = K.log(ray_to_occupancy_new_pos) - K.log(1.0 - ray_to_occupancy_new_pos)
# Make the size equal to the output_size by appending 0s
M = K.shape(ray_to_occupancy_pon)[0]
ray_to_occupancy_pon = K.concatenate([
ray_to_occupancy_pon,
K.tf.zeros((output_size-M,))
])
return ray_to_occupancy_pon | 5,324,960 |
def cse_postprocess(cse_output):
""" Perform CSE Postprocessing
:arg: output from SymPy CSE with tuple format: (list of ordered pairs that
contain substituted symbols and their replaced expressions, reduced SymPy expression)
:return: output from SymPy CSE where postprocessing, such as back-substitution of addition/product
of symbols, has been applied to the replaced/reduced expression(s)
>>> from sympy.abc import x, y
>>> from sympy import cse, cos, sin
>>> cse_out = cse(3 + x + cos(3 + x))
>>> cse_postprocess(cse_out)
([], [x + cos(x + 3) + 3])
>>> cse_out = cse(3 + x + y + cos(3 + x + y))
>>> cse_postprocess(cse_out)
([(x0, x + y + 3)], [x0 + cos(x0)])
>>> cse_out = cse(3*x + cos(3*x))
>>> cse_postprocess(cse_out)
([], [3*x + cos(3*x)])
>>> cse_out = cse(3*x*y + cos(3*x*y))
>>> cse_postprocess(cse_out)
([(x0, 3*x*y)], [x0 + cos(x0)])
>>> cse_out = cse(x**2 + cos(x**2))
>>> cse_postprocess(cse_out)
([], [x**2 + cos(x**2)])
>>> cse_out = cse(x**3 + cos(x**3))
>>> cse_postprocess(cse_out)
([(x0, x**3)], [x0 + cos(x0)])
>>> cse_out = cse(x*y + cos(x*y) + sin(x*y))
>>> cse_postprocess(cse_out)
([(x0, x*y)], [x0 + sin(x0) + cos(x0)])
>>> from sympy import exp, log
>>> expr = -x + exp(-x) + log(-x)
>>> cse_pre = cse_preprocess(expr, declare=True)
>>> cse_out = cse(cse_pre[0])
>>> cse_postprocess(cse_out)
([], [_NegativeOne_*x + exp(_NegativeOne_*x) + log(_NegativeOne_*x)])
"""
replaced, reduced = cse_output
replaced, reduced = replaced[:], reduced[:]
i = 0
while i < len(replaced):
sym, expr = replaced[i]; args = expr.args
# Search through replaced expressions for negative symbols
if (expr.func == sp.Mul and len(expr.args) == 2 and any(a1.func == sp.Symbol and \
(a2 == sp.S.NegativeOne or '_NegativeOne_' in str(a2)) for a1, a2 in [args, reversed(args)])):
for k in range(i + 1, len(replaced)):
if sym in replaced[k][1].free_symbols:
replaced[k] = (replaced[k][0], replaced[k][1].subs(sym, expr))
for k in range(len(reduced)):
if sym in reduced[k].free_symbols:
reduced[k] = reduced[k].subs(sym, expr)
# Remove the replaced expression from the list
replaced.pop(i)
if i != 0: i -= 1
# Search through replaced expressions for addition/product of 2 or less symbols
if ((expr.func == sp.Add or expr.func == sp.Mul) and 0 < len(expr.args) < 3 and \
all((arg.func == sp.Symbol or arg.is_integer or arg.is_rational) for arg in expr.args)) or \
(expr.func == sp.Pow and expr.args[0].func == sp.Symbol and expr.args[1] == 2):
sym_count = 0 # Count the number of occurrences of the substituted symbol
for k in range(len(replaced) - i):
# Check if the substituted symbol appears in the replaced expressions
if sym in replaced[i + k][1].free_symbols:
for arg in sp.preorder_traversal(replaced[i + k][1]):
if arg.func == sp.Symbol and str(arg) == str(sym):
sym_count += 1
for k in range(len(reduced)):
# Check if the substituted symbol appears in the reduced expression
if sym in reduced[k].free_symbols:
for arg in sp.preorder_traversal(reduced[k]):
if arg.func == sp.Symbol and str(arg) == str(sym):
sym_count += 1
# If the number of occurrences of the substituted symbol is 2 or less, back-substitute
if 0 < sym_count < 3:
for k in range(i + 1, len(replaced)):
if sym in replaced[k][1].free_symbols:
replaced[k] = (replaced[k][0], replaced[k][1].subs(sym, expr))
for k in range(len(reduced)):
if sym in reduced[k].free_symbols:
reduced[k] = reduced[k].subs(sym, expr)
# Remove the replaced expression from the list
replaced.pop(i); i -= 1
i += 1
return replaced, reduced | 5,324,961 |
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Configure the sensor platform for home assistant."""
scan_interval = hass.data[DATA_KEY]
_LOGGER.info("scan interval= %s", scan_interval)
lightwave_devices = [
LightwaveEnergy("CURRENT_USAGE", scan_interval),
LightwaveEnergy("TODAY_USAGE", scan_interval),
]
add_devices(lightwave_devices) | 5,324,962 |
def lap(j, s, alpha):
""" Laplace coefficient """
def int_func(x):
return np.cos(j*x)/(1. - (2.*alpha*np.cos(x)) + alpha**2.)**s
integral = integrate.quad(int_func, 0., 2.*np.pi)[0]
return 1./np.pi*integral | 5,324,963 |
def calendar_add():
"""Adds a calendar to the database according to the infos in POST data.\
Also creates the calendar in google calendar service if no google_calendar_id is present in POST data.
"""
calendar_name = request.form["calendar_name"]
std_email = request.form["std_email"]
google_calendar_id = request.form["google_calendar_id"]
if check_google_calendar_id(google_calendar_id):
# Add the google calendar directly to the local DB (Assume that Calendar has been already created)
cal_obj = Calendar(summary=calendar_name,
std_email=std_email,
calendar_id_google=google_calendar_id)
try:
db.session.add(cal_obj)
db.session.commit()
except Exception:
flash(('Could not add calendar {} to google calendar'.format(
calendar_name)),
category="error")
return redirect(url_for("get_calendars"))
return redirect(url_for("get_calendars"))
else:
# Creating a google calendar and receiving the gcal ID from Google
cal_record = Calendar.query.filter_by(summary=calendar_name).first()
if cal_record is None:
calendar__ = {
'summary': calendar_name,
'timeZone': 'Africa/Algiers'
}
resp = google.post("/calendar/v3/calendars", json=calendar__)
if resp.status_code == 200:
if "id" in resp.json().keys():
calendar_id = resp.json()["id"]
calendar_obj = Calendar(calendar_id_google=calendar_id,
summary=calendar_name,
std_email=std_email)
db.session.add(calendar_obj)
db.session.commit()
flash(('Added calendar {} to google calendar'.format(
calendar_name)),
category="success")
return redirect(url_for("get_calendars"))
else:
flash(("Invalid response from calendar api"),
category="danger")
return redirect(url_for('get_calendars')), 302
else:
flash(("Calendar API returned a non 200 response"),
category="danger")
return redirect(url_for('get_calendars')), 302
else:
flash(("Calendar {} already found in application database".format(
calendar_name)),
category="info")
return redirect(url_for('get_calendars')), 302 | 5,324,964 |
def proportional_allocation_by_location_and_activity(df, sectorcolumn):
"""
Creates a proportional allocation within each aggregated sector within a location
:param df:
:param sectorcolumn:
:return:
"""
# tmp replace NoneTypes with empty cells
df = replace_NoneType_with_empty_cells(df)
# denominator summed from highest level of sector grouped by location
short_length = min(df[sectorcolumn].apply(lambda x: len(str(x))).unique())
# want to create denominator based on short_length
denom_df = df.loc[df[sectorcolumn].apply(lambda x: len(x) == short_length)].reset_index(drop=True)
grouping_cols = [e for e in ['FlowName', 'Location', 'Activity', 'ActivityConsumedBy', 'ActivityProducedBy']
if e in denom_df.columns.values.tolist()]
denom_df.loc[:, 'Denominator'] = denom_df.groupby(grouping_cols)['HelperFlow'].transform('sum')
# list of column headers, that if exist in df, should be aggregated using the weighted avg fxn
possible_column_headers = ('Location', 'LocationSystem', 'Year', 'Activity', 'ActivityConsumedBy', 'ActivityProducedBy')
# list of column headers that do exist in the df being aggregated
column_headers = [e for e in possible_column_headers if e in denom_df.columns.values.tolist()]
merge_headers = column_headers.copy()
column_headers.append('Denominator')
# create subset of denominator values based on Locations and Activities
denom_df_2 = denom_df[column_headers].drop_duplicates().reset_index(drop=True)
# merge the denominator column with fba_w_sector df
allocation_df = df.merge(denom_df_2,
how='left',
left_on=merge_headers,
right_on=merge_headers)
# calculate ratio
allocation_df.loc[:, 'FlowAmountRatio'] = allocation_df['HelperFlow'] / allocation_df['Denominator']
allocation_df = allocation_df.drop(columns=['Denominator']).reset_index(drop=True)
# fill empty cols with NoneType
allocation_df = replace_strings_with_NoneType(allocation_df)
# fill na values with 0
allocation_df['HelperFlow'] = allocation_df['HelperFlow'].fillna(0)
return allocation_df | 5,324,965 |
def Console():
""" Factory method that returns the Console object most appropriate for the current OS envrionment. """
if os.name == "posix":
return POSIXConsole()
elif os.name == "nt":
return NTConsole()
else:
raise NotImplementedError("Console support not implemented for OS '{}'.".format(os.name)) | 5,324,966 |
def handle_set_ms(msg, value):
"""
Handles an incoming 'set_<on/off>_ms' MQTT message.
:param msg: The incoming MQTT message
:type msg: paho.mqtt.client.MQTTMessage
:param value: The value to set the output to
:type value: bool
:return: None
:rtype: NoneType
"""
try:
ms = int(msg.payload)
except ValueError:
raise InvalidPayload(
"Could not parse ms value %r to an integer." % msg.payload)
suffix = SET_ON_MS_TOPIC if value else SET_OFF_MS_TOPIC
output_name = output_name_from_topic(msg.topic, topic_prefix, suffix)
output_config = output_by_name(output_name)
if output_config is None:
return
set_state(output_config, value)
scheduler.add_task(Task(
time() + ms/1000.0,
set_state,
output_config,
not value
))
_LOG.info(
"Scheduled output %r to change back to %r after %r ms.",
output_config["name"],
not value,
ms
) | 5,324,967 |
def tests():
"""
Run Python unit tests.
"""
with settings(warn_only=True):
local('createdb elections14test')
local('nosetests') | 5,324,968 |
def prepare_target():
"""
Creates a example target face
:return: list of RFTargetVertex
"""
# size = 2
# target = [
# rfsm.RFTargetVertex(0, 0, 0, -size, -size),
# rfsm.RFTargetVertex(0, 1, 1, -size, size),
# rfsm.RFTargetVertex(1, 1, 1, size, size),
# rfsm.RFTargetVertex(1, 0, 0, size, -size),
# ]
# target_faces = [[0, 1, 2], [2, 3, 0]]
size = 2
target = [
rfsm.RFTargetVertex(0, 0, 0, -size, 1-size),
rfsm.RFTargetVertex(0, 1, 1, -size, size),
rfsm.RFTargetVertex(1, 0.5, 0.5, size, 0.45),
]
target_faces = [[0, 1, 2]]
return target, target_faces | 5,324,969 |
def data_fixture():
"""Fixture data."""
data = json.loads(load_fixture("data.json", "evil_genius_labs"))
return {item["name"]: item for item in data} | 5,324,970 |
def vectorize(sentence, idf_weight, vocab, convey='idf'):
"""
idf_weight: {word: weight}
vocab: {word: index}
"""
vec = np.zeros(len(vocab), dtype=np.float32)
for word in sentence:
if word not in vocab:
continue
if convey == 'idf':
vec[vocab[word]] += idf_weight[word]
elif convey == 'count':
vec[vocab[word]] += 1
else:
raise NotImplementedError
return vec | 5,324,971 |
def ln_s(orig, new, verbose=False, force=False):
"""Create symbolic link.
Parameters:
-----------
orig: string
Name of original file
new: string
Name of new, to be created, link
verbose: bool
Verbose output
force: bool
If True, link creation is forced even if file exists
Returns:
--------
None
"""
if os.path.isfile(orig) or os.path.isdir(orig):
if os.path.isfile(new) or os.path.isdir(new):
if force == False:
if verbose:
print('File \'{}\' exists, skipping link creation...'.format(new))
else:
if verbose:
print('File \'{}\' exists, deleting file and create new link...'.format(new))
os.remove(new)
os.symlink(orig, new)
else:
if verbose:
print('Creating link \'{}\' <- \'{}\''.format(orig, new))
os.symlink(orig, new)
else:
if verbose:
print('Original file \'{}\' does not exist, skipping...'.format(orig)) | 5,324,972 |
async def director_v2_service_mock(
aioresponses_mocker: AioResponsesMock,
) -> AioResponsesMock:
"""mocks responses of director-v2"""
# computations
create_computation_pattern = re.compile(
r"^http://[a-z\-_]*director-v2:[0-9]+/v2/computations$"
)
get_computation_pattern = re.compile(
r"^http://[a-z\-_]*director-v2:[0-9]+/v2/computations/.*$"
)
stop_computation_pattern = re.compile(
r"^http://[a-z\-_]*director-v2:[0-9]+/v2/computations/.*:stop$"
)
delete_computation_pattern = get_computation_pattern
projects_networks_pattern = re.compile(
r"^http://[a-z\-_]*director-v2:[0-9]+/v2/dynamic_services/projects/.*/-/networks$"
)
aioresponses_mocker.post(
create_computation_pattern,
callback=create_computation_cb,
status=web.HTTPCreated.status_code,
repeat=True,
)
aioresponses_mocker.post(
stop_computation_pattern,
status=web.HTTPAccepted.status_code,
repeat=True,
)
aioresponses_mocker.get(
get_computation_pattern,
status=web.HTTPAccepted.status_code,
callback=get_computation_cb,
repeat=True,
)
aioresponses_mocker.delete(delete_computation_pattern, status=204, repeat=True)
aioresponses_mocker.patch(projects_networks_pattern, status=204, repeat=True)
# clusters
cluster_route_pattern = re.compile(
r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)?\?(\w+(?:=\w+)?\&?){1,}$"
)
aioresponses_mocker.post(
re.compile(
r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters\?(\w+(?:=\w+)?\&?){1,}$"
),
callback=create_cluster_cb,
status=web.HTTPCreated.status_code,
repeat=True,
)
aioresponses_mocker.get(
re.compile(
r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters\?(\w+(?:=\w+)?\&?){1,}$"
),
callback=list_clusters_cb,
status=web.HTTPCreated.status_code,
repeat=True,
)
aioresponses_mocker.get(
re.compile(
r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)\?(\w+(?:=\w+)?\&?){1,}$"
),
callback=get_cluster_cb,
status=web.HTTPCreated.status_code,
repeat=True,
)
aioresponses_mocker.get(
re.compile(
r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters/[0-9]+/details\?(\w+(?:=\w+)?\&?){1,}$"
),
callback=get_cluster_details_cb,
status=web.HTTPCreated.status_code,
repeat=True,
)
aioresponses_mocker.patch(
re.compile(
r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)\?(\w+(?:=\w+)?\&?){1,}$"
),
callback=patch_cluster_cb,
status=web.HTTPCreated.status_code,
repeat=True,
)
aioresponses_mocker.delete(
re.compile(
r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)\?(\w+(?:=\w+)?\&?){1,}$"
),
status=web.HTTPNoContent.status_code,
repeat=True,
)
aioresponses_mocker.post(
re.compile(r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters:ping$"),
status=web.HTTPNoContent.status_code,
repeat=True,
)
aioresponses_mocker.post(
re.compile(
r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+):ping\?(\w+(?:=\w+)?\&?){1,}$"
),
status=web.HTTPNoContent.status_code,
repeat=True,
)
return aioresponses_mocker | 5,324,973 |
def test_validation_unset_type_hints():
"""Test that unset type hints are handled correctly (and treated as Any)."""
@my_registry.optimizers("test_optimizer.v2")
def test_optimizer_v2(rate, steps: int = 10) -> None:
return None
config = {"test": {"@optimizers": "test_optimizer.v2", "rate": 0.1, "steps": 20}}
my_registry.resolve(config) | 5,324,974 |
def get_online_featurestore_connector(featurestore=None):
"""
Gets a JDBC connector for the online feature store
Args:
:featurestore: the feature store name
Returns:
a DTO object of the JDBC connector for the online feature store
"""
if featurestore is None:
featurestore = project_featurestore()
try: # try with metadata cache
if update_cache_default:
core._get_featurestore_metadata(featurestore, update_cache=True)
return core._do_get_online_featurestore_connector(featurestore,
core._get_featurestore_metadata(featurestore,
update_cache=update_cache_default))
except: # retry with updated metadata
return core._do_get_online_featurestore_connector(featurestore,
core._get_featurestore_metadata(featurestore, update_cache=True)) | 5,324,975 |
def detect_iterative(scales,
counts,
background,
exposure,
output_fits,
output_regions,
debug_output_folder,
overwrite):
"""Run an iterative multi-scale source detection.
"""
from collections import OrderedDict
import numpy as np
from astropy.io import fits
from ..detect import IterativeSourceDetector
# Load data
maps = OrderedDict()
maps['counts'] = counts
maps['background'] = background
maps['exposure'] = exposure
for mapname, filename in maps.items():
log.info('Reading {0} map: {1}'.format(mapname, filename))
maps[mapname] = fits.getdata(filename)
# Compute scales in pixel coordinates
DEG_PER_PIX = np.abs(fits.getval(counts, 'CDELT1'))
scales_deg = scales
scales_pix = np.array(scales_deg) / DEG_PER_PIX
log.info('Number of scales: {0}'.format(len(scales_deg)))
log.info('DEG_PER_PIX: {0}'.format(DEG_PER_PIX))
log.info('Scales in deg: {0}'.format(scales_deg))
log.info('Scales in pix: {0}'.format(scales_pix))
# Run the iterative source detection
detector = IterativeSourceDetector(maps=maps,
scales=scales_pix,
debug_output_folder=debug_output_folder,
overwrite=overwrite)
detector.run()
# Save the results
log.info('Writing {}'.format(output_fits))
detector.save_fits(output_fits)
log.info('Writing {}'.format(output_regions))
detector.save_regions(output_regions)
# detector.save_json('detect.json') | 5,324,976 |
def run():
"""requirement for 1F"""
stations = build_station_list()
inconsistent_list = inconsistent_typical_range_stations(stations)
station_names = []
print(len(inconsistent_list))
# convert a list of inconsistent station objects to a list of their names
for n in range (len(inconsistent_list)):
station_names += [inconsistent_list[n].name]
station_names = sorted(station_names)
print(station_names) | 5,324,977 |
def create_multipoint_geometry(u, v, osr_spref):
"""
wrapper; creates multipoint geometry in given projection
Parameters
----------
u : list of numbers
input coordinates ("Rechtswert")
v : list of numbers
input coordinates ("Hochwert")
osr_spref : OGRSpatialReference
spatial reference of given coordinates
Returns
-------
OGRGeometry
a geometry holding all points defined by (u, v)
"""
point_geom = ogr.Geometry(ogr.wkbMultiPoint)
point_geom.AssignSpatialReference(osr_spref)
for p, _ in enumerate(u):
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(u[p], v[p], 0)
point_geom.AddGeometry(point)
return point_geom | 5,324,978 |
def validate_features_library(features_extraction_library_name):
"""Returns the injected features extraction library"""
validate_importing_features_extraction_library(features_extraction_library_name) | 5,324,979 |
def _CheckChromeRevOption(_option, _opt_str, value, parser):
"""Validate the chrome_rev option."""
value = value.strip()
if value not in constants.VALID_CHROME_REVISIONS:
raise optparse.OptionValueError('Invalid chrome rev specified')
parser.values.chrome_rev = value | 5,324,980 |
def extract_zip(fpath):
"""
Extracts zip file into the containing directory.
"""
with ZipFile(fpath, 'r') as zip_ref:
for m in tqdm(
iterable=zip_ref.namelist(),
total=len(zip_ref.namelist()),
desc='Extracting `{}`'.format(os.path.split(fpath)[1])):
zip_ref.extract(member=m, path=os.path.split(fpath)[0])
#zip_ref.extractall(os.path.split(fpath)[0]) | 5,324,981 |
def log_format_elk(app_name, port, ip, elk_sign="elk"):
"""
:param app_name: 要建立索引的名称
:param elk_sign: 需要添加的过滤关键词
:return:
"""
"""
获得Elastic Search的 Handler
:return:
"""
handler = logstash.TCPLogstashHandler(ip, port=port, version=1, message_type=app_name)
# 添加filter,只发送error和重要信息
handler.addFilter(ContextFilter(elk_sign))
logging.getLogger('').addHandler(handler)
return | 5,324,982 |
def open_tif_image(input_path):
# type: (function) -> np.array
"""Function to open tif images.
Parameters:
input_path (string) = path where the image file is located;
return
np.array of the tif image"""
# get the image_path.
image_path = input_path
# read image
im = skimage.io.imread(image_path, plugin="tifffile")
return im | 5,324,983 |
def check_is_valid_torchvision_architecture(architecture: str):
"""Raises an ValueError if architecture is not part of available torchvision models"""
available = sorted(
name
for name in torchvision.models.__dict__
if name.islower()
and not name.startswith("__")
and callable(torchvision.models.__dict__[name])
)
if architecture not in available:
raise ValueError(f"{architecture} not in {available}") | 5,324,984 |
def get_icd(url: str) -> requests.Response:
"""Get an ICD API endpoint."""
return requests.get(url, headers=get_icd_api_headers()) | 5,324,985 |
def evaluate(words,labels_pred, labels):
"""
labels_pred, labels, words: are sent-level list
eg: words --> [[i love shanghai],[i love u],[i do not know]]
words,pred, right: is a sequence, is label index or word index.
Evaluates performance on test set
"""
# true_tags = ['PER', 'LOC', 'ORG', 'PERSON', 'person', 'loc', 'company']
accs = []
correct_preds, total_correct, total_preds = 0., 0., 0.
for lab, lab_pred, word_sent in zip(labels, labels_pred, words):
accs += [a == b for (a, b) in zip(lab, lab_pred)]
lab_chunks = set(get_chunks(lab))
lab_pred_chunks = set(get_chunks(lab_pred))
correct_preds += len(lab_chunks & lab_pred_chunks)
total_preds += len(lab_pred_chunks)
total_correct += len(lab_chunks)
p = correct_preds / total_preds if correct_preds > 0 else 0
r = correct_preds / total_correct if correct_preds > 0 else 0
f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0
acc = np.mean(accs)
return acc, f1, p, r | 5,324,986 |
def getApproximateArialStringWidth(st: str) -> float:
"""Calculate rough width of a word in a variable width font.
By https://stackoverflow.com/users/234270/speedplane
Args:
st (str): The string you need a width for
Returns:
float: The rough width in picas
To make sure that the names will fit in the space, at the given font size etc.,
if the space can fit 13 M chars across, then getApproximateArialStringWidth("M"*13) gives 10.53,
so the space is 10 picas wide, and we can exclude wider names.
"""
size = 0 # in milinches
for s in st:
if s in "lij|' ":
size += 37
elif s in "![]fI.,:;/\\t":
size += 50
elif s in '`-(){}r"':
size += 60
elif s in "*^zcsJkvxy":
size += 85
elif s in "aebdhnopqug#$L+<>=?_~FZT" + string.digits:
size += 95
elif s in "BSPEAKVXY&UwNRCHD":
size += 112
elif s in "QGOMm%W@":
size += 135
else:
size += 50
return size * 6 / 1000.0 | 5,324,987 |
def prepare_text(input_string):
"""Converts an input string into a list containing strings.
Parameters
----------
input_string : string
String to convert to a list of string.
Returns
-------
out_list : list
List containing the input string.
"""
# Converting a string into lower cases
temp_string = input_string.lower()
# Spliting up the characters of the string in lower cases
out_list = temp_string.split()
return out_list | 5,324,988 |
def update(ctx):
"""Update asset."""
pass | 5,324,989 |
def TDF_Tool_TagList(*args):
"""
* Returns the entry of <aLabel> as list of integers in <aTagList>.
:param aLabel:
:type aLabel: TDF_Label &
:param aTagList:
:type aTagList: TColStd_ListOfInteger &
:rtype: void
* Returns the entry expressed by <anEntry> as list of integers in <aTagList>.
:param anEntry:
:type anEntry: TCollection_AsciiString &
:param aTagList:
:type aTagList: TColStd_ListOfInteger &
:rtype: void
"""
return _TDF.TDF_Tool_TagList(*args) | 5,324,990 |
def img(header, body=None):
"""Alternate to Markdown's image tag. See
http://octopress.org/docs/plugins/image-tag/ for usage."""
attrs = re.match(__img_re, header).groupdict()
m = re.match(__img_re_title, attrs['title'])
if m:
attrs['title'] = m.groupdict()['title']
attrs['alt'] = m.groupdict()['alt']
elif 'title' in attrs:
attrs['alt'] = attrs['title'].replace('"', '"')
if 'class' in attrs:
attrs['class'] = attrs['class'].replace('"', '')
if attrs:
return '<img ' + ' '.join('%s="%s"' % (k, v) for k, v in iteritems(attrs) if v) + ' />'
return ("Error processing input, expected syntax: "
"{% img [class name(s)] [http[s]:/]/path/to/image [width [height]] "
"[title text | \"title text\" [\"alt text\"]] %}") | 5,324,991 |
def conexao_bd():
"""
Função que se conecta a um banco de dados MySQL
"""
# Pedido de senha caso o acesso ao BD necessite
# caso contrario so dar ENTER
conexao = sql.connect(
host='localhost',
user='root',
password=senha
)
cursor = conexao.cursor()
return cursor | 5,324,992 |
def test_map_class_pass_4(example_results, tmpdir, read_image):
""" Make a map with reasonable inputs, --before, --after, --qa,
--predict-proba switches
"""
image = tmpdir.join('classmap.gtif').strpath
runner = CliRunner()
result = runner.invoke(
cli,
['-v', 'map',
'--root', example_results['root'],
'--result', example_results['results_dir_classified'],
'--image', example_results['example_img'],
'--after', '--before', '--qa', '--predict-proba',
'class', '2005-06-01', image
])
assert result.exit_code == 0
img = read_image(image)
np.testing.assert_equal(img[0, ...], classmap)
np.testing.assert_equal(img[1, ...], classmap_proba)
np.testing.assert_equal(img[2, ...], classmap_qa) | 5,324,993 |
def fill_bin_content(ax, sens, energy_bin, gb, tb):
"""
Parameters
--------
Returns
--------
"""
for i in range(0,gb):
for j in range(0,tb):
theta2 = 0.005+0.005/2+((0.05-0.005)/tb)*j
gammaness = 0.1/2+(1/gb)*i
text = ax.text(theta2, gammaness, "%.2f %%" % sens[energy_bin][i][j],
ha="center", va="center", color="w", size=8)
return ax | 5,324,994 |
def main(args=None):
"""
Console script for grumpy_tools.
The default command `grumpy run` will ran if no other selected.
It mimics the CPython options, when possible and applicable.
Please take a look on `grumpy run --help` for its implemented options.
Example: all the following lines outputs Hello on the STDOUT\n
$ python -c 'print("Hello")'\n
$ grumpy -c 'print("Hello")'\n
$ grumpy run -c 'print("Hello")'
""" | 5,324,995 |
def secure_account(account):
"""A utility function that secures accounts.
It just generates a PBKDF2 encrypted password hash.
"""
account['password'] = pbkdf2_sha256.encrypt(
account['password'],
rounds=20000,
salt_size=16
) | 5,324,996 |
def main():
""" Print out all of the options available to ForceBalance. """
options = None
tgt_opts = [None]
if len(sys.argv) == 2:
options, tgt_opts = parser.parse_inputs(sys.argv[1])
out = []
out.append("# ForceBalance input file generated by MakeInputFile.py")
out.append("# The octothorpe '#' is a comment symbol")
out.append("# There are two sections, the main options ($options) and the target options ($target)")
out.append("# A ForceBalance calculation will have one $options section and as one $target section per optimization target")
out.append("# The most important options are listed at the top; options are also roughly grouped by their application")
out.append("# Note: If the specified value is 'None' then the option will truly be set to None - not the string 'None'")
out.append("# Note: Section option types are more complicated and may require you to read the documentation")
out.append("# Note: Boolean option types require no value, the key being present implies 'True'")
out.append("# Note: List option types are specified using spaces as the delimiter - i.e. forcefield ff1.itp ff2.itp ; delete empty brackets before use [] ")
out.append("")
out += parser.printsection("$options",options,parser.gen_opts_types)
for tgt_opt in tgt_opts:
out.append("\n")
out += parser.printsection("$target",tgt_opt,parser.tgt_opts_types)
for line in out:
print(line) | 5,324,997 |
def blackbody1d(temperature, radius, distance=10*u.pc,
lambda_min=2000, lambda_max=10000, dlambda=1):
"""
One dimensional blackbody spectrum.
Parameters
----------
temperature : float or `~astropy.units.Quantity`
Blackbody temperature.
If not a Quantity, it is assumed to be in Kelvin.
radius : `~astropy.units.Quantity`
Radius of spherical blackbody.
Must be a Quantity.
distance : `~astropy.units.Quantity`
Distance of blackbody source.
Must be a Quantity.
Default is 10 pc so absolute and apparent magnitudes will be the same.
lambda_min : float
Minimum wavelength for spectrum (in Angstroms).
lambda_max : float
Maximum wavelength for spectrum (in Angstroms).
dlambda : float
Wavelength interval for spectrum (in Angstroms).
Returns
-------
bb : `~starkit.fix_spectrum1d.SKSpectrum1D`
Blackbody spectrum.
"""
if not hasattr(radius, 'unit'):
raise ValueError("radius needs to be a quantity (e.g., 1 * u.cm)")
if not hasattr(distance, 'unit'):
raise ValueError("distance needs to be a quantity (e.g., 1 * u.pc)")
wavelength = np.arange(lambda_min, lambda_max, dlambda) * u.AA
# the factor of pi sr is from the angular integral
flux = np.pi * u.sr * (radius/distance)**2 * blackbody_lambda(wavelength, temperature)
# theoretical quantity has no uncertainty
uncertainty = np.zeros_like(flux)
bb = SKSpectrum1D.from_array(wavelength, flux, uncertainty)
return bb | 5,324,998 |
def expm1_op_tensor(x):
"""
See :func:`oneflow.expm1`
"""
return Expm1()(x) | 5,324,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.