content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def plot(*args, **kwargs):
"""Plot the csv file.
Usage: ph plot
ph plot --index=col
ph plot --kind=bar
ph plot --kind=scatter --x=col1 --y=col2
ph plot --style=k--
"""
try:
import matplotlib.pyplot as plt
except ImportError:
exit("plot depends on matplotlib, install ph[plot]")
df = pipein()
index = kwargs.get("index")
if index is not None:
df = df.set_index(index)
df.plot(
kind=kwargs.get("kind", "line"), # default pandas plot is line
style=kwargs.get("style"),
x=kwargs.get("x"),
y=kwargs.get("y"),
)
plt.show()
pipeout(df) | 5,328,600 |
def strip_names(ttx_path):
"""Clear several nameIDs to prevent the font from being installable on desktop OSs.
ttx_path: Path of the .ttx font to be modified.
"""
# nameIDs which will be erased
nameIDs = [1, 2, 4, 16, 17, 18]
tree = parse(ttx_path)
root = tree.getroot()
for child in root.find('name'):
if int(child.attrib['nameID']) in nameIDs:
child.text = ' '
tree.write(ttx_path) | 5,328,601 |
def pull_sls_hardware(sls_file=None):
"""Query API-GW and retrieve token.
Args:
sls_file: generated sls json file.
Returns:
API token.
"""
if sls_file:
sls_hardware = [
hardware[x] for hardware in [sls_file.get("Hardware", {})] for x in hardware
]
return sls_hardware
if not sls_file:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
debug = False
def on_debug(debug=False, message=None):
if debug:
print("DEBUG: {}".format(message))
#
# Convenience wrapper around remote calls
#
def remote_request(
remote_type,
remote_url,
headers=None,
data=None,
verify=False,
debug=False,
):
remote_response = None
while True:
try:
response = requests.request(
remote_type,
url=remote_url,
headers=headers,
data=data,
verify=verify,
)
on_debug(debug, "Request response: {}".format(response.text))
response.raise_for_status()
remote_response = json.dumps({})
if response.text:
remote_response = response.json()
break
except Exception as err:
message = "Error calling {}: {}".format(remote_url, err)
raise SystemExit(message) from err
return remote_response
#
# Get the admin client secret from Kubernetes
#
secret = None
try:
config.load_kube_config()
v1 = client.CoreV1Api()
secret_obj = v1.list_namespaced_secret(
"default",
field_selector="metadata.name=admin-client-auth",
)
secret_dict = secret_obj.to_dict()
secret_base64_str = secret_dict["items"][0]["data"]["client-secret"]
on_debug(
debug,
"base64 secret from Kubernetes is {}".format(secret_base64_str),
)
secret = base64.b64decode(secret_base64_str.encode("utf-8"))
on_debug(debug, "secret from Kubernetes is {}".format(secret))
except Exception as err:
print("Error collecting secret from Kubernetes: {}".format(err))
sys.exit(1)
#
# Get an auth token by using the secret
#
token = None
try:
token_url = "https://api-gw-service-nmn.local/keycloak/realms/shasta/protocol/openid-connect/token"
token_data = {
"grant_type": "client_credentials",
"client_id": "admin-client",
"client_secret": secret,
}
token_request = remote_request(
"POST",
token_url,
data=token_data,
debug=debug,
)
token = token_request["access_token"]
on_debug(
debug=debug,
message="Auth Token from keycloak (first 50 char): {}".format(
token[:50],
),
)
except Exception as err:
print("Error obtaining keycloak token: {}".format(err))
sys.exit(1)
#
# Get existing SLS data for comparison (used as a cache)
#
sls_cache = None
sls_url = "https://api-gw-service-nmn.local/apis/sls/v1/hardware"
auth_headers = {"Authorization": "Bearer {}".format(token)}
try:
sls_cache = remote_request(
"GET",
sls_url,
headers=auth_headers,
verify=False,
)
on_debug(
debug=debug,
message="SLS data has {} records".format(len(sls_cache)),
)
except Exception as err:
print("Error requesting Networks from SLS: {}".format(err))
sys.exit(1)
on_debug(debug=debug, message="SLS records {}".format(sls_cache))
return sls_cache | 5,328,602 |
def create_virtual_service(clientToken=None, meshName=None, meshOwner=None, spec=None, tags=None, virtualServiceName=None):
"""
Creates a virtual service within a service mesh.
A virtual service is an abstraction of a real service that is provided by a virtual node directly or indirectly by means of a virtual router. Dependent services call your virtual service by its virtualServiceName , and those requests are routed to the virtual node or virtual router that is specified as the provider for the virtual service.
For more information about virtual services, see Virtual services .
See also: AWS API Documentation
Exceptions
:example: response = client.create_virtual_service(
clientToken='string',
meshName='string',
meshOwner='string',
spec={
'provider': {
'virtualNode': {
'virtualNodeName': 'string'
},
'virtualRouter': {
'virtualRouterName': 'string'
}
}
},
tags=[
{
'key': 'string',
'value': 'string'
},
],
virtualServiceName='string'
)
:type clientToken: string
:param clientToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.\nThis field is autopopulated if not provided.\n
:type meshName: string
:param meshName: [REQUIRED]\nThe name of the service mesh to create the virtual service in.\n
:type meshOwner: string
:param meshOwner: The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then the account that you specify must share the mesh with your account before you can create the resource in the service mesh. For more information about mesh sharing, see Working with Shared Meshes .
:type spec: dict
:param spec: [REQUIRED]\nThe virtual service specification to apply.\n\nprovider (dict) --The App Mesh object that is acting as the provider for a virtual service. You can specify a single virtual node or virtual router.\n\nvirtualNode (dict) --The virtual node associated with a virtual service.\n\nvirtualNodeName (string) -- [REQUIRED]The name of the virtual node that is acting as a service provider.\n\n\n\nvirtualRouter (dict) --The virtual router associated with a virtual service.\n\nvirtualRouterName (string) -- [REQUIRED]The name of the virtual router that is acting as a service provider.\n\n\n\n\n\n\n
:type tags: list
:param tags: Optional metadata that you can apply to the virtual service to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.\n\n(dict) --Optional metadata that you apply to a resource to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.\n\nkey (string) -- [REQUIRED]One part of a key-value pair that make up a tag. A key is a general label that acts like a category for more specific tag values.\n\nvalue (string) --The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).\n\n\n\n\n
:type virtualServiceName: string
:param virtualServiceName: [REQUIRED]\nThe name to use for the virtual service.\n
:rtype: dict
ReturnsResponse Syntax
{
'virtualService': {
'meshName': 'string',
'metadata': {
'arn': 'string',
'createdAt': datetime(2015, 1, 1),
'lastUpdatedAt': datetime(2015, 1, 1),
'meshOwner': 'string',
'resourceOwner': 'string',
'uid': 'string',
'version': 123
},
'spec': {
'provider': {
'virtualNode': {
'virtualNodeName': 'string'
},
'virtualRouter': {
'virtualRouterName': 'string'
}
}
},
'status': {
'status': 'ACTIVE'|'DELETED'|'INACTIVE'
},
'virtualServiceName': 'string'
}
}
Response Structure
(dict) --
virtualService (dict) --
The full description of your virtual service following the create call.
meshName (string) --
The name of the service mesh that the virtual service resides in.
metadata (dict) --
An object that represents metadata for a resource.
arn (string) --
The full Amazon Resource Name (ARN) for the resource.
createdAt (datetime) --
The Unix epoch timestamp in seconds for when the resource was created.
lastUpdatedAt (datetime) --
The Unix epoch timestamp in seconds for when the resource was last updated.
meshOwner (string) --
The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it\'s the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes .
resourceOwner (string) --
The AWS IAM account ID of the resource owner. If the account ID is not your own, then it\'s the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with Shared Meshes .
uid (string) --
The unique identifier for the resource.
version (integer) --
The version of the resource. Resources are created at version 1, and this version is incremented each time that they\'re updated.
spec (dict) --
The specifications of the virtual service.
provider (dict) --
The App Mesh object that is acting as the provider for a virtual service. You can specify a single virtual node or virtual router.
virtualNode (dict) --
The virtual node associated with a virtual service.
virtualNodeName (string) --
The name of the virtual node that is acting as a service provider.
virtualRouter (dict) --
The virtual router associated with a virtual service.
virtualRouterName (string) --
The name of the virtual router that is acting as a service provider.
status (dict) --
The current status of the virtual service.
status (string) --
The current status of the virtual service.
virtualServiceName (string) --
The name of the virtual service.
Exceptions
AppMesh.Client.exceptions.BadRequestException
AppMesh.Client.exceptions.ConflictException
AppMesh.Client.exceptions.ForbiddenException
AppMesh.Client.exceptions.InternalServerErrorException
AppMesh.Client.exceptions.LimitExceededException
AppMesh.Client.exceptions.NotFoundException
AppMesh.Client.exceptions.ServiceUnavailableException
AppMesh.Client.exceptions.TooManyRequestsException
:return: {
'virtualService': {
'meshName': 'string',
'metadata': {
'arn': 'string',
'createdAt': datetime(2015, 1, 1),
'lastUpdatedAt': datetime(2015, 1, 1),
'meshOwner': 'string',
'resourceOwner': 'string',
'uid': 'string',
'version': 123
},
'spec': {
'provider': {
'virtualNode': {
'virtualNodeName': 'string'
},
'virtualRouter': {
'virtualRouterName': 'string'
}
}
},
'status': {
'status': 'ACTIVE'|'DELETED'|'INACTIVE'
},
'virtualServiceName': 'string'
}
}
:returns:
AppMesh.Client.exceptions.BadRequestException
AppMesh.Client.exceptions.ConflictException
AppMesh.Client.exceptions.ForbiddenException
AppMesh.Client.exceptions.InternalServerErrorException
AppMesh.Client.exceptions.LimitExceededException
AppMesh.Client.exceptions.NotFoundException
AppMesh.Client.exceptions.ServiceUnavailableException
AppMesh.Client.exceptions.TooManyRequestsException
"""
pass | 5,328,603 |
def copy(stream, credentials, direction, hdfsFile=None, hdfsFileAttrName=None, localFile=None, name=None):
"""Copy a Hadoop Distributed File to local and copy a local file to te HDFS.
Repeatedly scans a HDFS directory and writes the names of new or modified files that are found in the directory to the output stream.
Args:
topology(Topology): Topology to contain the returned stream.
credentials(dict|str|file): The credentials of the IBM cloud Analytics Engine service in *JSON* (idct) or JSON string (str) or the path to the *configuration file* (``hdfs-site.xml`` or ``core-site.xml``). If the *configuration file* is specified, then this file will be copied to the 'etc' directory of the application bundle.
direction(str): This mandatory parameter specifies the direction of copy. The parameter can be set with the following values. **'copyFromLocalFile'** Copy a file from local disk to the HDFS file system. **'copyToLocalFile'** Copy a file from HDFS file system to the local disk.
hdfsFile(str): This parameter Specifies the name of HDFS file or directory. If the name starts with a slash, it is considered an absolute path of HDFS file that you want to use. If it does not start with a slash, it is considered a relative path, relative to the /user/userid/hdfsFile .
localFile(str): This parameter specifies the name of local file to be copied. If the name starts with a slash, it is considered an absolute path of local file that you want to copy. If it does not start with a slash, it is considered a relative path, relative to your project data directory.
schema(Schema): Optional output stream schema. Default is ``CommonSchema.String``. Alternative a structured streams schema with a single attribute of type ``rstring`` is supported.
name(str): Source name in the Streams context, defaults to a generated name.
Returns:
Output Stream containing the result message and teh elapsed time with schema :py:const:`~streamsx.hdfs.FileCopySchema`.
"""
Direction=_convert_copy_direction_string_to_enum(direction)
credentials, hdfsUri, hdfsUser, hdfsPassword, configPath = _setCredentials(credentials, stream.topology)
_op = _HDFS2FileCopy(stream, configPath=configPath, credentials=credentials, hdfsUri=hdfsUri, hdfsUser=hdfsUser, hdfsPassword=hdfsPassword, direction=Direction, hdfsFileAttrName=hdfsFileAttrName, localFile=localFile , schema=FileCopySchema, name=name)
return _op.outputs[0] | 5,328,604 |
def normalize(value: str) -> str:
"""Normalize a string by removing '-' and capitalizing the following character"""
char_list: List[str] = list(value)
length: int = len(char_list)
for i in range(1, length):
if char_list[i - 1] in ['-']:
char_list[i] = char_list[i].upper()
return ''.join(char_list).replace('-', '') | 5,328,605 |
def encryptpwd(ctx, encrypt_password, salt):
"""设置加密密码"""
assert len(encrypt_password) > 0, "No encrypt-password"
am = ctx.obj.account_manager
am.set_encrypt_password(encrypt_password, salt)
am.save() | 5,328,606 |
def save_data(userId, columns, tableName, searchTerm="", objType="", sortBy=None,
tableId=None, isDefaultOnDashboard=False, maxRows=0,
dashboard=None, clone=False, row=0, grid_col=0, sizex=0,
sizey=0):
"""
Saves the customized table in the dashboard. Called by save_search and
save_new_dashboard via ajax in views.py.
"""
try:
if searchTerm:
searchTerm = HTMLParser.HTMLParser().unescape(searchTerm)
#if user is editing a table
if tableId :
newSavedSearch = SavedSearch.objects(id=tableId).first()
if not newSavedSearch:
raise Exception("Cannot find Table")
elif clone:
clonedSavedSearch = cloneSavedSearch(newSavedSearch, dashboard.id)
else:
newSavedSearch = SavedSearch()
cols = []
for col in columns:
if "field" not in col or "caption" not in col:
continue
cols.append(col)
if not cols:
raise("There are no columns to save")
newSavedSearch.tableColumns = cols
newSavedSearch.name = tableName
oldDashId = None
if dashboard and newSavedSearch.dashboard != dashboard.id:
newSavedSearch.dashboard= dashboard.id
#if it is not a deault dashboard table, it must have a searchterm and objtype
if searchTerm:
newSavedSearch.searchTerm = searchTerm
if objType:
newSavedSearch.objType = objType
#this is to identify the default tables on every user dashboard
newSavedSearch.isDefaultOnDashboard = isDefaultOnDashboard
if sortBy:
newSavedSearch.sortBy = sortBy
if sizex:
newSavedSearch.sizex = sizex
elif not newSavedSearch.sizex:
newSavedSearch.sizex = 50
if sizey:
newSavedSearch.sizey = sizey
elif maxRows and maxRows != newSavedSearch.maxRows:
newSavedSearch.sizey = int(maxRows)+1
elif not newSavedSearch.sizey:
newSavedSearch.sizey = 7
if row:
newSavedSearch.row = row
elif not newSavedSearch.row:
newSavedSearch.row = 1
if grid_col:
newSavedSearch.col = grid_col
elif not newSavedSearch.col:
newSavedSearch.col = 1
if maxRows:
newSavedSearch.maxRows = maxRows;
newSavedSearch.save()
#if the old dashboard is empty, delete it
if oldDashId:
deleteDashboardIfEmpty(oldDashId)
except Exception as e:
print "ERROR: "
print e
return {'success': False,
'message': "An unexpected error occurred while saving table. Please refresh and try again"}
return {'success': True,'message': tableName+" Saved Successfully!"} | 5,328,607 |
def heatmap(plot,
client_color=False,
low=(255, 200, 200), high=(255, 0, 0),
spread=0, transform="cbrt", **kwargs):
"""
Produce a heatmap from a set of shapes.
A heatmap is a scale of how often a single thing occurs.
This is a convenience function that encodes a common configuration,
and parameters to support the most common variations.
plot -- Plot to convert into a heatmap
low -- Low color of the heatmap. Default is a light red
high -- High color of the heatmap. Default is full saturation red.
spread -- How far (if any) should values be spread. Default is 0.
transform -- Apply a transformation before building a color ramp?
Understood values are 'cbrt', 'log', 'none' and None.
The default is 'cbrt', for cuberoot, an approximation of
perceptual correction on monochromatic scales.
kwargs -- Further arguments passed on to replot for greater control.
"""
transform = transform.lower() if transform is not None else None
if client_color:
shader = Id()
kwargs['reserve_val'] = kwargs.get('reserve_val', 0)
else:
shader = InterpolateColor(low=low, high=high)
if transform == "cbrt":
shader = Cuberoot() + shader
elif transform == "log":
shader = Log() + shader
elif transform == "none" or transform is None:
pass
else:
raise ValueError("Unrecognized transform '{0}'".format(transform))
if spread > 0:
shader = Spread(factor=spread, shape="circle") + shader
kwargs['points'] = kwargs.get('points', True)
return replot(plot,
agg=Count(),
info=Const(val=1),
shader=shader,
**kwargs) | 5,328,608 |
def add_comment_to_task(task_id, comment, status=None):
"""
Adds comment to given task
:param task_id:
:param comment:
:param status:
:return:
"""
task = get_task(task_id, as_dict=True)
if not task:
return
if not status:
status = gazu.task.get_task_status(task)
else:
status = gazu.task.get_task_status_by_name(status)
if not status:
return
return gazu.task.add_comment(task, status, comment) | 5,328,609 |
def score(h,r,t):
"""
:param h: (batch_size, dim)
:param r: (dim, )
:param t: (dim, )
:return:
"""
return np.dot(h, np.transpose(r*t)) | 5,328,610 |
def migrate_file(file_name, file_content):
"""Migrate file."""
return V4Migrator(file_name, file_content).migrate() | 5,328,611 |
def _GetOrganizedAnalysisResultBySuspectedCL(analysis_result):
"""Group tests it they have the same suspected CLs."""
organized_results = defaultdict(list)
if not analysis_result:
return organized_results
for step_failure in analysis_result.get('failures', []):
step_name = step_failure['step_name']
supported = step_failure.get('supported', True)
step_revisions_index = {}
organized_suspected_cls = organized_results[step_name]
is_flaky = step_failure.get('flaky', False)
if not step_failure.get('tests'):
# Non swarming, just group the whole step together.
shared_result = {
'first_failure': step_failure['first_failure'],
'last_pass': step_failure.get('last_pass'),
'supported': supported,
'tests': [],
'suspected_cls': step_failure['suspected_cls'],
'flaky': is_flaky,
}
organized_suspected_cls.append(shared_result)
continue
# Swarming tests.
for index, cl in enumerate(step_failure['suspected_cls']):
step_revisions_index[cl['revision']] = index
# Groups tests by suspected CLs' revision.
# Keys are the indices of each test in the test list.
# Format is as below:
# {
# 1: {
# 'tests': ['test1', 'test2'],
# 'revisions': ['rev1'],
# 'suspected_cls': [
# # suspected cl info for rev1 at step level.
# ]
# },
# 3: {
# 'tests': ['test3'],
# 'revisions': ['rev3', 'rev2'],
# 'suspected_cls': [
# # suspected cl info for rev2, rev3 at step level.
# ]
# }
# }
tests_group = defaultdict(list)
for index, test in enumerate(step_failure['tests']):
# Get all revisions for this test and check if there is
# any other test has the same culprit(represented by revision) set and
# are flaky or reliable at the same time.
test_name = test['test_name']
is_flaky = test.get('flaky', False)
found_group = False
revisions = set()
for cl in test['suspected_cls']:
revisions.add(cl['revision'])
for group in tests_group.values():
# Found tests that have the same culprit(represented by revision),
# add current test to group.
if revisions == set(group['revisions']) and is_flaky == group['flaky']:
group['tests'].append(test_name)
found_group = True
break
if not found_group:
# First test with that revision set, add a new group.
group_suspected_cls = []
for revision in revisions:
group_suspected_cls.append(
step_failure['suspected_cls'][step_revisions_index[revision]])
tests_group[index] = {
'tests': [test_name],
'revisions': list(revisions),
'suspected_cls': group_suspected_cls,
'flaky': is_flaky,
}
for index, group in tests_group.iteritems():
# Reorganize heuristic results by culprits.
test_result = step_failure['tests'][index]
shared_result = {
'first_failure': test_result['first_failure'],
'last_pass': test_result.get('last_pass'),
'supported': supported,
'tests': group['tests'],
'suspected_cls': group['suspected_cls'],
'flaky': group['flaky'],
}
organized_suspected_cls.append(shared_result)
return organized_results | 5,328,612 |
def volume_division(in_volume1, in_volume2):
"""Divide a volume by another one
Args:
in_volume1 (nibabel volume): data will be a [m,n,o] array
in_volume2 (nibabel volume): data will be a [m,n,o] array
Returns:
out_volume (nibabel volume): data will be a [m,n,o] array,
division of in_volume1[array] by in_volume2[array], where
both in_volume1[array] and in_volume2[array] have been
reoriented to a canonical orientation ('RAS')
"""
# read input volumes
#-- convert both volumes to RAS orientation
in_volume1_ras = nib.as_closest_canonical(in_volume1)
in_volume2_ras = nib.as_closest_canonical(in_volume2)
#-- volumes data and affine
in_volume1_data = in_volume1_ras.get_data()
in_volume1_affine = in_volume1_ras.affine.copy()
in_volume2_data = in_volume2_ras.get_data()
#-- sanity check
if in_volume1_data.shape != in_volume2_data.shape:
raise ValueError('the input volumes must have the same size')
#-- get rid of NaN values
in_volume1_data[np.isnan(in_volume1_data)] = 0
in_volume2_data[np.isnan(in_volume2_data)] = 0
# divide the two volumes
#-- check volume 2 pixels 0-intensities
volume2_0_x, volume2_0_y, volume2_0_z = np.where(in_volume2_data == 0)
in_volume2_data[in_volume2_data == 0] = 1
#-- main division
out_volume_data = in_volume1_data/in_volume2_data
#-- re-process volume 2 0-intensity pixels
out_volume_data[volume2_0_x, volume2_0_y, volume2_0_z] = 0
# save output
out_volume_affine = in_volume1_affine.copy()
out_volume = nib.Nifti1Image(out_volume_data, out_volume_affine)
return out_volume | 5,328,613 |
def sparse_eye(num_rows,
num_columns=None,
dtype=dtypes.float32,
name=None):
"""Creates a two-dimensional sparse tensor with ones along the diagonal.
Args:
num_rows: Non-negative integer or `int32` scalar `tensor` giving the number
of rows in the resulting matrix.
num_columns: Optional non-negative integer or `int32` scalar `tensor` giving
the number of columns in the resulting matrix. Defaults to `num_rows`.
dtype: The type of element in the resulting `Tensor`.
name: A name for this `Op`. Defaults to "eye".
Returns:
A `SparseTensor` of shape [num_rows, num_columns] with ones along the
diagonal.
"""
with ops.name_scope(name, default_name="eye", values=[num_rows, num_columns]):
num_rows = _make_int64_tensor(num_rows, "num_rows")
num_columns = num_rows if num_columns is None else _make_int64_tensor(
num_columns, "num_columns")
# Create the sparse tensor.
diag_size = math_ops.minimum(num_rows, num_columns)
diag_range = math_ops.range(diag_size, dtype=dtypes.int64)
return sparse_tensor.SparseTensor(
indices=array_ops.stack([diag_range, diag_range], axis=1),
values=array_ops.ones(diag_size, dtype=dtype),
dense_shape=[num_rows, num_columns]) | 5,328,614 |
def count_num_sents_cluster(sents_vectors, sections_sents, n_clusters):
"""
Cluster sentences and count the number of times that sentences from each
section appear in each cluster.
Ex: 4 sents from introduction and 3 sentences from conclusion in cluster x.
"""
labels, centroids = cluster_sents(sents_vectors, n_clusters)
sections = ['abstract', 'introduction', 'conclusion', 'text']
sents_cluster_values = []
n_sents_by_cluster = []
for c in range(n_clusters):
n_sents = {}
for sec in sections:
n_sents[sec] = 0.0
# Get indices in c cluster
indices_cluster = np.where(labels == c)[0]
for i in indices_cluster:
if sections_sents[i] != 'highlights':
n_sents[sections_sents[i]] += 1
n_sents_by_cluster.append(n_sents)
for lbl in labels:
sents_cluster_values.append(n_sents_by_cluster[lbl].values())
columns = ['n_sents_intro', 'n_sents_text', 'n_sents_abst',
'n_sents_conclu']
return np.array(sents_cluster_values), columns | 5,328,615 |
def add_kubelet_token(hostname):
"""
Add a token for a node in the known tokens
:param hostname: the name of the node
:returns: the token added
"""
file = "{}/credentials/known_tokens.csv".format(snapdata_path)
old_token = get_token("system:node:{}".format(hostname))
if old_token:
return old_token.rstrip()
alpha = string.ascii_letters + string.digits
token = "".join(random.SystemRandom().choice(alpha) for _ in range(32))
uid = "".join(random.SystemRandom().choice(string.digits) for _ in range(8))
with open(file, "a") as fp:
# TODO double check this format. Why is userid unique?
line = '{},system:node:{},kubelet-{},"system:nodes"'.format(token, hostname, uid)
fp.write(line + os.linesep)
return token.rstrip() | 5,328,616 |
def check_password_pwned(password, fast=False):
"""Check if a password is in the pwned-passwords list.
:param password: The plaintext password
:param fast: Whether the check should finish quickly, even if that may
indicate not being able to check the password. This should
be used during interactive requests
:return: A bool indicating whether the password has been pwned or not,
or `None` in case checking it was not possible.
"""
timeout = 1 if fast else 3
sha = hashlib.sha1(password.encode()).hexdigest().upper()
hashes = _get_pwned_hashes(sha[:5], timeout)
if hashes is None:
return None
return sha[5:] in hashes | 5,328,617 |
def is_plugin_loaded(plugin_name):
"""
Return whether given plugin is loaded or not
:param plugin_name: str
:return: bool
"""
return maya.cmds.pluginInfo(plugin_name, query=True, loaded=True) | 5,328,618 |
def drydown_service(lat: float = Query(...), lon: float = Query(...)):
"""Babysteps."""
return handler(lon, lat) | 5,328,619 |
def service_vuln_iptable(hostfilter=None):
"""Returns a dict of services. Contains a list of IPs with (vuln, sev)
'0/info': { 'host_id1': [ (ipv4, ipv6, hostname), ( (vuln1, 5), (vuln2, 10) ... ) ] },
{ 'host_id2': [ (ipv4, ipv6, hostname), ( (vuln1, 5) ) ] }
"""
service_dict = {}
# go through each t_service_vulns identifier that is unique
query = (db.t_service_vulns.id > 0) & (db.t_service_vulns.f_services_id == db.t_services.id)
query = create_hostfilter_query(hostfilter, query, 't_services')
for service in db(query).select(db.t_service_vulns.f_services_id, groupby=db.t_service_vulns.f_services_id):
# find all the records with the service_id
q = (db.t_service_vulns.f_services_id == service.f_services_id)
q &= (db.t_service_vulns.f_vulndata_id == db.t_vulndata.id)
ip_dict = {}
# go through each
for row in db(q).select(cache=(cache.ram,120)):
svc_rec = db.t_services(row.t_service_vulns.f_services_id)
port_txt = "%s/%s" % (svc_rec.f_number, svc_rec.f_proto)
host_rec = get_host_record(svc_rec.f_hosts_id)
ip_info = ip_dict.setdefault(host_rec.f_ipaddr, [])
if row.t_vulndata.f_vulnid not in map(lambda x: x[0], ip_info):
ip_info.append((row.t_vulndata.f_vulnid, row.t_vulndata.f_severity, row.t_vulndata.f_cvss_score))
ip_dict[host_rec.f_ipaddr] = ip_info
for k, v in ip_dict.iteritems():
service_dict.setdefault(port_txt, dict())
service_dict[port_txt][k] = v
return service_dict | 5,328,620 |
def validate_tier_name(name):
"""
Property: Tier.Name
"""
valid_names = [WebServer, Worker]
if name not in valid_names:
raise ValueError("Tier name needs to be one of %r" % valid_names)
return name | 5,328,621 |
def version_get(): # noqa: E501
"""Version
Version # noqa: E501
:rtype: InlineResponse2006
"""
response = InlineResponse2006()
version = Version()
v = CoreApiVersion.query.filter_by(active=True).first()
if v:
version.version = v.version
version.reference = v.reference
response.data = [version]
return cors_200(response_body=response)
else:
return cors_500(details='Unable to retrieve version information') | 5,328,622 |
def language_register(df):
"""
Add 'training language', 'test language', 'training register' and 'test register'
columns to a dataframe.
This assumes that:
- the dataframe contains a 'training set' and 'test set' column
- the sets mentioned in these columns are properly documented in
the scone_phobia.metadata.corpora module. The only exception is
if a set is called 'None'. In that case, the new columns will
be set to 'None' as well.
"""
df['training language'] = ['None' if e == 'None' else corpora.language(e)
for e in df['training set']]
df['test language'] = ['None' if e == 'None' else corpora.language(e)
for e in df['test set']]
df['training register'] = ['None' if e == 'None' else corpora.register(e)
for e in df['training set']]
df['test register'] = ['None' if e == 'None' else corpora.register(e)
for e in df['test set']]
return df | 5,328,623 |
def balances(cfg, wdir, plotpath, filena, name, model):
"""Plot everything related to energy and water mass budgets.
This method provides climatological annal mean maps of TOA, atmospheric
and surface energy budgets, time series of annual mean anomalies in the
two hemispheres and meridional sections of meridional enthalpy
transports. Scatter plots of oceanic vs. atmospheric meridional
enthalpy transports are also provided.
Arguments:
- wdir: the working directory;
- plotpath: the path where the plot has to be saved;
- filena: the files containing input fields;
- name: the name of the variable associated with the input field;
- model: the name of the model to be analysed;
"""
cdo = Cdo()
provlog = ProvenanceLogger(cfg)
nsub = len(filena)
pdir = plotpath
plotentname = pdir + '/{}_heat_transp.png'.format(model)
plotwmbname = pdir + '/{}_wmb_transp.png'.format(model)
plotlatname = pdir + '/{}_latent_transp.png'.format(model)
# timesery = np.zeros([nsub, 2])
dims, ndims, tmean, zmean, timeser = global_averages(nsub, filena, name)
transp_mean = np.zeros([nsub, ndims[1]])
lat_maxm = np.zeros([nsub, 2, len(dims[3])])
tr_maxm = np.zeros([nsub, 2, len(dims[3])])
lim = [55, 55, 25]
for i_f in np.arange(nsub):
transp = transport(zmean[i_f, :, :], timeser[i_f, :, 0], dims[1])
transp_mean[i_f, :], list_peak = transports_preproc(
dims[1], ndims[3], lim[i_f], transp)
lat_maxm[i_f, :, :] = list_peak[0]
tr_maxm[i_f, :, :] = list_peak[1]
if nsub == 3:
ext_name = [
'TOA Energy Budget', 'Atmospheric Energy Budget',
'Surface Energy Budget'
]
transpty = (-6E15, 6E15)
coords = [dims[0], dims[1]]
plot_climap_eb(model, pdir, coords, tmean, ext_name)
fig = plt.figure()
strings = ['Meridional heat transports', 'Latitude [deg]', '[W]']
lats = dims[1]
for i in np.arange(nsub):
filename = filena[i] + '.nc'
if name[i] == 'toab':
nameout = 'total'
elif name[i] == 'atmb':
nameout = 'atmos'
elif name[i] == 'surb':
nameout = 'ocean'
nc_f = wdir + '/{}_transp_mean_{}.nc'.format(nameout, model)
removeif(nc_f)
lat_model = 'lat_{}'.format(model)
pr_output(transp_mean[i, :], filename, nc_f, nameout, lat_model)
name_model = '{}_{}'.format(nameout, model)
cdo.chname('{},{}'.format(nameout, name_model),
input=nc_f,
output='aux.nc')
move('aux.nc', nc_f)
cdo.chname('lat,{}'.format(lat_model), input=nc_f, output='aux.nc')
move('aux.nc', nc_f)
attr = ['{} meridional enthalpy transports'.format(nameout), model]
provrec = provenance_meta.get_prov_transp(attr, filename,
plotentname)
provlog.log(nc_f, provrec)
plot_1m_transp(lats, transp_mean[i, :], transpty, strings)
plt.grid()
plt.savefig(plotentname)
plt.close(fig)
plot_1m_scatter(model, pdir, lat_maxm, tr_maxm)
elif nsub == 2:
ext_name = ['Water mass budget', 'Latent heat budget']
transpwy = (-2E9, 2E9)
transply = (-6E15, 6E15)
coords = [dims[0], dims[1]]
plot_climap_wm(model, pdir, coords, tmean, ext_name, name)
nc_f = wdir + '/{}_transp_mean_{}.nc'.format('wmb', model)
removeif(nc_f)
filena[0] = filena[0].split('.nc', 1)[0]
filename = filena[0] + '.nc'
pr_output(transp_mean[0, :], filename, nc_f, 'wmb', 'lat')
attr = ['water mass transport', model]
provrec = provenance_meta.get_prov_transp(attr, filename, plotwmbname)
provlog.log(nc_f, provrec)
nc_f = wdir + '/{}_transp_mean_{}.nc'.format('latent', model)
removeif(nc_f)
filena[1] = filena[1].split('.nc', 1)[0]
filename = filena[1] + '.nc'
pr_output(transp_mean[1, :], filename, nc_f, 'latent', 'lat')
attr = ['latent energy transport', model]
provrec = provenance_meta.get_prov_transp(attr, filename, plotlatname)
provlog.log(nc_f, provrec)
strings = ['Water mass transports', 'Latitude [deg]', '[kg*s-1]']
fig = plt.figure()
plot_1m_transp(dims[1], transp_mean[0, :], transpwy, strings)
plt.grid()
plt.savefig(plotwmbname)
plt.close(fig)
strings = ['Latent heat transports', 'Latitude [deg]', '[W]']
fig = plt.figure()
plot_1m_transp(dims[1], transp_mean[1, :], transply, strings)
plt.grid()
plt.savefig(plotlatname)
plt.close(fig)
for i_f in np.arange(nsub):
fig = plt.figure()
axi = plt.subplot(111)
axi.plot(dims[3], timeser[i_f, :, 0], 'k', label='Global')
axi.plot(dims[3], timeser[i_f, :, 1], 'r', label='SH')
axi.plot(dims[3], timeser[i_f, :, 2], 'b', label='NH')
plt.title('Annual mean {}'.format(ext_name[i_f]))
plt.xlabel('Years')
plt.ylabel('[W/m2]')
axi.legend(loc='upper center',
bbox_to_anchor=(0.5, -0.07),
shadow=True,
ncol=3)
plt.tight_layout()
plt.grid()
plt.savefig(pdir + '/{}_{}_timeser.png'.format(model, name[i_f]))
plt.close(fig) | 5,328,624 |
def example_seg_2() -> Dict[str, Any]:
"""
Simple evaluation example for dice score for multiclass semantic segmentation
Inputs are 4 pairs of segmentation files: one including predictions and one targets
"""
# define iterator
def data_iter():
dir_path = pathlib.Path(__file__).parent.resolve()
predicted_list = os.listdir(os.path.join(dir_path,"inputs/semantic_segmentation/predicted/"))
labels_path = os.path.join(dir_path,"inputs/semantic_segmentation/labeled/")
for predicted in predicted_list :
id = os.path.basename(predicted).split('.')[0]
label_path = os.path.join(labels_path, id,'seg.nii.gz')
predicted_path = os.path.join(dir_path,"inputs/semantic_segmentation/predicted/",predicted)
sample_dict = {}
sample_dict["id"] = id
sample_dict["pred.array"] = np.asanyarray(nib.load(predicted_path).dataobj)
sample_dict["label.array"] = np.asanyarray(nib.load(label_path).dataobj)
yield sample_dict
# list of metrics
metrics = OrderedDict([
("dice", MetricDice(pred="pred.array", target="label.array")),
])
evaluator = EvaluatorDefault()
results = evaluator.eval(ids=None, data=data_iter(),batch_size=1, metrics=metrics)
return results | 5,328,625 |
def clone_pod(module, array):
"""Create Pod Clone"""
changed = True
if not module.check_mode:
changed = False
if get_target(module, array) is None:
if not get_destroyed_target(module, array):
try:
array.clone_pod(module.params['name'],
module.params['target'])
changed = True
except Exception:
module.fail_json(msg='Clone pod {0} to pod {1} failed.'.format(module.params['name'],
module.params['target']))
else:
module.fail_json(msg='Target pod {0} already exists but deleted.'.format(module.params['target']))
module.exit_json(changed=changed) | 5,328,626 |
def init(request):
"""
Wraps an incoming WSGI request in a Context object and initializes
several important attributes.
"""
set_umask() # do it once per request because maybe some server
# software sets own umask
if isinstance(request, Context):
context, request = request, request.request
else:
context = AllContext(request)
context.clock.start('total')
context.clock.start('init')
context.lang = setup_i18n_preauth(context)
context.session = context.cfg.session_service.get_session(context)
context.user = setup_user(context, context.session)
context.lang = setup_i18n_postauth(context)
def finish():
pass
context.finish = finish
context.reset()
context.clock.stop('init')
return context | 5,328,627 |
def fetch_arm_sketch(X, ks, tensor_proj=True, **kwargs_rg):
"""
:param X: the tensor of dimension N
:param ks: array of size N
:param tensor_proj: True: use tensor random projection,
otherwise, use normal one
:param kwargs_rg:
:return: list of two, first element is list of arm sketches
and the second one is list of random matrices with size I_n\times k_n
"""
arm_sketches = []
omegas = []
for i, n in enumerate(X.shape):
shape = list(X.shape)
del shape[i]
if not tensor_proj:
omega = random_matrix_generator(shape, ks[i], **kwargs_rg)
arm_sketch = tl.unfold(X, mode=i) @ omega
arm_sketches.append(arm_sketch)
omegas.append(omega)
else:
omega = tensor_random_matrix_generator(shape, ks[i], **kwargs_rg)
arm_sketch = tl.unfold(X, mode=i) @ omega
arm_sketches.append(arm_sketch)
omegas.append(omega)
return arm_sketches, omegas | 5,328,628 |
def fitness(member):
"""Computes the fitness of a species member.
http://bit.ly/ui-lab5-dobrota-graf"""
if member < 0 or member >= 1024:
return -1
elif member >= 0 and member < 30:
return 60.0
elif member >= 30 and member < 90:
return member + 30.0
elif member >= 90 and member < 120:
return 120.0
elif member >= 120 and member < 210:
return -0.83333 * member + 220
elif member >= 210 and member < 270:
return 1.75 * member - 322.5
elif member >= 270 and member < 300:
return 150.0
elif member >= 300 and member < 360:
return 2.0 * member - 450
elif member >= 360 and member < 510:
return -1.8 * member + 918
elif member >= 510 and member < 630:
return 1.5 * member - 765
elif member >= 630 and member < 720:
return -1.33333 * member + 1020
elif member >= 720 and member < 750:
return 60.0
elif member >= 750 and member < 870:
return 1.5 * member - 1065
elif member >= 870 and member < 960:
return -2.66667 * member + 2560
else:
return 0 | 5,328,629 |
def test_multiple_messages() -> None:
"""
Tests the order of multiple messages.
"""
messages = [
CommitMessage(Position(pos), Payload("M %d" % pos)) for pos in range(10)
]
def checker(source: FakeSourceBackend, producer: FakeProducerBackend):
source.mocked_fetch.assert_called()
source.get_next_scheduled_task.assert_called()
source.commit_positions.assert_called_once_with(9, 9)
producer.mocked_write.assert_has_calls(
[call(StreamMessage(m.payload)) for m in messages]
)
run_loop(
10,
messages=messages,
tasks=lambda _: ScheduledTask(
datetime.now() + timedelta(seconds=100), None, None
),
checker=checker,
) | 5,328,630 |
def style_loss(content_feats, style_grams, style_weights):
"""
Computes the style loss at a set of layers.
Inputs:
- feats: list of the features at every layer of the current image.
- style_targets: List of the same length as feats, where style_targets[i] is
a Tensor giving the Gram matrix the source style image computed at
layer style_layers[i].
- style_weights: List of the same length as style_targets, where style_weights[i]
is a scalar giving the weight for the style loss at layer style_layers[i].
Returns:
- style_loss: A Tensor contataining the scalar style loss.
"""
style_loss = tf.constant(0.0)
for i in tf.range(len(content_feats)):
layer_var = gram_matrix(content_feats[i])
loss_i = tf.reduce_mean((layer_var - style_grams[i])**2) * style_weights[i]
style_loss = tf.add(style_loss, loss_i)
return style_loss | 5,328,631 |
def get_annotations(joints_2d, joints_3d, scale_factor=1.2):
"""Get annotations, including centers, scales, joints_2d and joints_3d.
Args:
joints_2d: 2D joint coordinates in shape [N, K, 2], where N is the
frame number, K is the joint number.
joints_3d: 3D joint coordinates in shape [N, K, 3], where N is the
frame number, K is the joint number.
scale_factor: Scale factor of bounding box. Default: 1.2.
Returns:
centers (ndarray): [N, 2]
scales (ndarray): [N,]
joints_2d (ndarray): [N, K, 3]
joints_3d (ndarray): [N, K, 4]
"""
# calculate joint visibility
visibility = (joints_2d[:, :, 0] >= 0) * \
(joints_2d[:, :, 0] < train_img_size[0]) * \
(joints_2d[:, :, 1] >= 0) * \
(joints_2d[:, :, 1] < train_img_size[1])
visibility = np.array(visibility, dtype=np.float32)[:, :, None]
joints_2d = np.concatenate([joints_2d, visibility], axis=-1)
joints_3d = np.concatenate([joints_3d, visibility], axis=-1)
# calculate bounding boxes
bboxes = np.stack([
np.min(joints_2d[:, :, 0], axis=1),
np.min(joints_2d[:, :, 1], axis=1),
np.max(joints_2d[:, :, 0], axis=1),
np.max(joints_2d[:, :, 1], axis=1)
],
axis=1)
centers = np.stack([(bboxes[:, 0] + bboxes[:, 2]) / 2,
(bboxes[:, 1] + bboxes[:, 3]) / 2],
axis=1)
scales = scale_factor * np.max(bboxes[:, 2:] - bboxes[:, :2], axis=1) / 200
return centers, scales, joints_2d, joints_3d | 5,328,632 |
def Intensity(flag, Fin):
"""
I=Intensity(flag,Fin)
:ref:`Calculates the intensity of the field. <Intensity>`
:math:`I(x,y)=F_{in}(x,y).F_{in}(x,y)^*`
Args::
flag: 0= no normalization, 1=normalized to 1, 2=normalized to 255 (for bitmaps)
Fin: input field
Returns::
I: intensity distribution (N x N square array of doubles)
"""
I = _np.abs(Fin.field)**2
if flag > 0:
Imax = I.max()
if Imax == 0.0:
raise ValueError('Cannot normalize because of 0 beam power.')
I = I/Imax
if flag == 2:
I = I*255
return I | 5,328,633 |
def index(request):
"""
Display the index for user related actions.
"""
export_form = ExportForm(request.POST or None)
return render(request, "users/index.html", {"export_form": export_form}) | 5,328,634 |
def test_topic_regex() -> None:
"""Test the regex property."""
for parts, topic in BASIC_TOPICS:
t = Topic(parts)
assert t.regex == compile(f"^{topic}$")
for parts, topic, example in WILDCARD_TOPICS:
t = Topic(parts)
assert t.regex.match(example)
assert not t.regex.match("u85932q4fds9/3£2####") | 5,328,635 |
def build():
"""Compile the site into the build directory."""
clean()
_mkdir_if_not_present(BUILD_DIR)
# compile html pages
for src_file in _iter_paths(JS_DIR, '*'):
dest_path = os.path.join(BUILD_DIR, src_file.name)
src_path = os.path.join(JS_DIR, src_file.name)
shutil.copy(src_path, dest_path)
print('[*] Compiled pages into', BUILD_DIR)
shutil.copytree(IMG_DIR, BUILD_IMG_DIR)
print('[*] Copied img assets into', BUILD_IMG_DIR) | 5,328,636 |
def test_load_vep97_parsed_variant(one_vep97_annotated_variant, real_populated_database, case_obj):
"""test first parsing and then loading a vep v97 annotated variant"""
# GIVEN a variant annotated using the following CSQ entry fields
csq_header = "Allele|Consequence|IMPACT|SYMBOL|Gene|Feature_type|Feature|BIOTYPE|EXON|INTRON|HGVSc|HGVSp|cDNA_position|CDS_position|Protein_position|Amino_acids|Codons|Existing_variation|DISTANCE|STRAND|FLAGS|SYMBOL_SOURCE|HGNC_ID|CANONICAL|TSL|APPRIS|CCDS|ENSP|SWISSPROT|TREMBL|UNIPARC|REFSEQ_MATCH|SOURCE|GIVEN_REF|USED_REF|BAM_EDIT|SIFT|PolyPhen|DOMAINS|HGVS_OFFSET|MOTIF_NAME|MOTIF_POS|HIGH_INF_POS|MOTIF_SCORE_CHANGE|MES-NCSS_downstream_acceptor|MES-NCSS_downstream_donor|MES-NCSS_upstream_acceptor|MES-NCSS_upstream_donor|MES-SWA_acceptor_alt|MES-SWA_acceptor_diff|MES-SWA_acceptor_ref|MES-SWA_acceptor_ref_comp|MES-SWA_donor_alt|MES-SWA_donor_diff|MES-SWA_donor_ref|MES-SWA_donor_ref_comp|MaxEntScan_alt|MaxEntScan_diff|MaxEntScan_ref|LoFtool|ExACpLI|GERP++_NR|GERP++_RS|REVEL_rankscore|phastCons100way_vertebrate|phyloP100way_vertebrate|CLINVAR|CLINVAR_CLNSIG|CLINVAR_CLNVID|CLINVAR_CLNREVSTAT|genomic_superdups_frac_match"
header = [word.upper() for word in csq_header.split("|")]
# WHEN parsed using
parsed_vep97_annotated_variant = parse_variant(
variant=one_vep97_annotated_variant, vep_header=header, case=case_obj
)
# GIVEN a database without any variants
adapter = real_populated_database
assert adapter.variant_collection.find_one() is None
# WHEN loading the variant into the database
adapter.load_variant(variant_obj=parsed_vep97_annotated_variant)
# THEN the variant is loaded with the fields correctly parsed
# revel score
variant = adapter.variant_collection.find_one()
assert isinstance(variant["revel_score"], float)
# conservation fields
for key, value in variant["conservation"].items():
assert value == ["NotConserved"]
# clinvar fields
assert isinstance(variant["clnsig"][0]["accession"], int)
assert variant["clnsig"][0]["value"] in REV_CLINSIG_MAP # can be str or int
assert isinstance(variant["clnsig"][0]["revstat"], str) | 5,328,637 |
def test_network_predictions():
"""Tests imbDRL.metrics.network_predictions."""
X = [7, 7, 7, 8, 8, 8]
with pytest.raises(ValueError) as exc:
metrics.network_predictions([], X)
assert "`X` must be of type" in str(exc.value)
X = np.array([[1, 2], [2, 1], [3, 4], [4, 3]])
y_pred = metrics.network_predictions(lambda x, step_type, training: (tf.convert_to_tensor(x), None), X)
assert np.array_equal(y_pred, [1, 0, 1, 0]) | 5,328,638 |
def _get_javascript_and_find_feature_flag(client: HttpSession, script_uri: str, headers: Dict[str, Any] = None) -> Any:
"""
Read through minified javascript for feature flags
"""
flag_str = None
# Since this is a large request, read incrementally
with client.get(
script_uri,
headers=headers,
stream=True,
name="Login.Feature_Toggles.GetJS",
catch_response=True
) as res:
test_response_for_error(res, script_uri)
res.encoding = "utf-8"
prev_chunk = ""
""" Sample regexes for the feature flag are:
var RAW_DEFAULT_FEATURE_FLAGS=0xdc9fffceebc;
var RAW_DEFAULT_FEATURE_FLAGS=5802956083228348;
var RAW_DEFAULT_FEATURE_FLAGS=jsbi__WEBPACK_IMPORTED_MODULE_10__["default"].BigInt("0b110100100111011100000111111111111111001110111010111100");
"""
for chunk in res.iter_content(8192, decode_unicode=True):
if flag_str:
# Not reading the whole stream will throw errors, so continue reading once found
continue
script_regexes = [
r'RAW_DEFAULT_FEATURE_FLAGS=(0x\w+|\d+);',
r'RAW_DEFAULT_FEATURE_FLAGS=jsbi__WEBPACK_IMPORTED_MODULE_\d+__\["default"\].BigInt\("(0b[01]+)"\);',
]
for script_regex in script_regexes:
js_match = re.search(script_regex, prev_chunk + chunk)
if js_match:
flag_str = js_match.groups()[0]
prev_chunk = chunk
return flag_str | 5,328,639 |
def write(msg, level='INFO', html=False, attachment=None, launch_log=False):
"""Write the message to the log file using the given level.
Valid log levels are ``TRACE``, ``DEBUG``, ``INFO`` (default since RF
2.9.1), ``WARN``, and ``ERROR`` (new in RF 2.9). Additionally it is
possible to use ``HTML`` pseudo log level that logs the message as HTML
using the ``INFO`` level.
Attachment should contain a dict with "name", "data" and "mime" values
defined. See module example.
Instead of using this method, it is generally better to use the level
specific methods such as ``info`` and ``debug`` that have separate
:param msg: argument to control the message format.
:param level: log level
:param html: format or not format the message as html.
:param attachment: a binary content to attach to the log entry
:param launch_log: put the log entry on Launch level
"""
log_message = LogMessage(msg)
log_message.level = level
log_message.attachment = attachment
log_message.launch_log = launch_log
logger.write(log_message, level, html) | 5,328,640 |
def next_power_of_two(v: int):
""" returns x | x == 2**i and x >= v """
v -= 1
v |= v >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
v += 1
return v | 5,328,641 |
def triangle(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:
"""Continuous triangle wave.
Args:
times: Times to output wave for.
amp: Pulse amplitude. Wave range is [-amp, amp].
freq: Pulse frequency. units of 1/dt.
phase: Pulse phase.
"""
return amp*(-2*np.abs(
sawtooth(times, 1, freq, phase=(phase-np.pi/2)/2)) + 1).astype(np.complex_) | 5,328,642 |
def add_token(token_sequence: str, tokens: str) -> str:
"""Adds the tokens from 'tokens' that are not already contained in
`token_sequence` to the end of `token_sequence`::
>>> add_token('', 'italic')
'italic'
>>> add_token('bold italic', 'large')
'bold italic large'
>>> add_token('bold italic', 'bold')
'bold italic'
>>> add_token('red thin', 'stroked red')
'red thin stroked'
"""
for tk in tokens.split(' '):
if tk and token_sequence.find(tk) < 0:
token_sequence += ' ' + tk
return token_sequence.lstrip() | 5,328,643 |
def aggregate_values(mapping: dict, agg_fcn: Literal["mean", "prod"]):
"""Aggregates the values of the input (nested) mapping according to the
specified aggregation method. This function modifies the input in place.
Parameters
---------
mapping
The mapping to be aggregated.
agg_fcn
Aggregation function. Only `mean` or `prod` aggregation supported.
"""
for key, value in mapping.items():
if isinstance(value, dict):
aggregate_values(mapping[key], agg_fcn)
else:
aggregator = methodcaller(agg_fcn, value)
mapping[key] = aggregator(np) | 5,328,644 |
def ket2dm(psi):
"""
convert a ket into a density matrix
Parameters
----------
psi : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
return np.einsum("i, j -> ij", psi, psi.conj()) | 5,328,645 |
def _make_feature_stats_proto(
stats_values,
feature_name):
"""Creates the FeatureNameStatistics proto for one feature.
Args:
stats_values: A Dict[str,float] where the key of the dict is the name of the
custom statistic and the value is the numeric value of the custom
statistic of that feature. Ex. {
'Mutual Information': 0.5,
'Correlation': 0.1 }
feature_name: The name of the feature.
Returns:
A FeatureNameStatistic proto containing the custom statistics for a
feature.
"""
result = statistics_pb2.FeatureNameStatistics()
result.name = feature_name
# Sort alphabetically by statistic name to have deterministic ordering
stat_names = sorted(stats_values.keys())
for stat_name in stat_names:
result.custom_stats.add(name=stat_name, num=stats_values[stat_name])
return result | 5,328,646 |
def build_lr_scheduler(
cfg, optimizer: torch.optim.Optimizer
) -> torch.optim.lr_scheduler._LRScheduler:
"""
Build a LR scheduler from config.
"""
name = cfg.NAME
if name == "WarmupMultiStepLR":
return WarmupMultiStepLR(
optimizer,
cfg.STEPS,
cfg.GAMMA,
warmup_factor=cfg.WARMUP_FACTOR,
warmup_iters=cfg.WARMUP_ITERS,
warmup_method=cfg.WARMUP_METHOD,
)
elif name == "WarmupCosineLR":
return WarmupCosineLR(
optimizer,
cfg.MAX_ITER,
warmup_factor=cfg.WARMUP_FACTOR,
warmup_iters=cfg.WARMUP_ITERS,
warmup_method=cfg.WARMUP_METHOD,
)
elif name == "OneCycleLR":
return OneCycleLR(
optimizer,
cfg.MAX_LR,
total_steps=cfg.MAX_ITER,
pct_start=cfg.PCT_START,
base_momentum=cfg.BASE_MOM,
max_momentum=cfg.MAX_MOM,
div_factor=cfg.DIV_FACTOR
)
else:
raise ValueError("Unknown LR scheduler: {}".format(name)) | 5,328,647 |
def mock_gateway_features(
tasks: MagicMock, transport_class: MagicMock, nodes: dict[int, Sensor]
) -> None:
"""Mock the gateway features."""
async def mock_start_persistence() -> None:
"""Load nodes from via persistence."""
gateway = transport_class.call_args[0][0]
gateway.sensors.update(nodes)
tasks.start_persistence.side_effect = mock_start_persistence
async def mock_start() -> None:
"""Mock the start method."""
gateway = transport_class.call_args[0][0]
gateway.on_conn_made(gateway)
tasks.start.side_effect = mock_start | 5,328,648 |
def cconv(x, y, P):
""" Periodic convolution with period P of two signals x and y
"""
x = _wrap(x, P)
h = _wrap(y, P)
return np.fromiter([np.dot(np.roll(x[::-1], k+1), h) for k in np.arange(P)], float) | 5,328,649 |
def add_arguments():
"""
Function to parse the command line arguments
Options:
[-f]: Name of text file with corpus
"""
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str,
help="The file with the text.")
return parser.parse_args() | 5,328,650 |
def assert_(val: bool, msg: Literal["Spin 7 failed"]):
"""
usage.scipy: 2
"""
... | 5,328,651 |
async def publish_summary_as_entry(
db_session: Session,
bot_installation_id: uuid.UUID,
issue_pr,
content: Any,
context_id: str,
summary: EntrySummaryReport,
) -> Any:
"""
Publish summary from locust or checks to Bugout entry.
If entry were deleted from journal it creates new one.
"""
repo_pr_list = issue_pr.comments_url.rstrip("/").split("/")[4:-1]
orgranization = repo_pr_list[0]
repository = repo_pr_list[1]
issue_number = repo_pr_list[3]
context_url = f"https://github.com/{'/'.join(repo_pr_list)}"
tags = [
orgranization,
repository,
issue_number,
context_id,
"github",
"pull_request",
issue_pr.branch,
"autogenerated",
]
title = f"PR #{issue_number} on {orgranization}/{repository}: {summary.title}"
index_configuration = (
db_session.query(GithubIndexConfiguration)
.filter(GithubIndexConfiguration.github_oauth_event_id == bot_installation_id)
.first()
)
journal_id = index_configuration.index_url.rstrip("/").split("/")[-2]
bugout_user = (
db_session.query(GitHubBugoutUser)
.filter(GitHubBugoutUser.event_id == bot_installation_id)
.first()
)
try:
entry = bugout_api.get_entry(
token=bugout_user.bugout_access_token,
journal_id=journal_id,
entry_id=issue_pr.entry_id,
)
bugout_api.update_entry_content(
token=bugout_user.bugout_access_token,
journal_id=journal_id,
entry_id=entry.id,
title=title,
content=content,
)
except Exception as e:
logger.error(
f"Failed receiving entry with id: {issue_pr.entry_id}, creating new one"
)
entry = bugout_api.create_entry(
token=bugout_user.bugout_access_token,
journal_id=journal_id,
title=title,
content=content,
tags=tags,
context_url=context_url,
context_id=context_id,
context_type="github",
)
logger.info(
f"GitHub summary stored as entry with id: {issue_pr.entry_id} to journal with id: {journal_id}"
)
return entry.id | 5,328,652 |
def keypoint_loss_targets(uvd, keys_uvd, mparams):
"""Computes the supervised keypoint loss between computed and gt keypoints.
Args:
uvd: [batch, order, num_targs, 4, num_kp] Predicted set of keypoint uv's
(pixels).
keys_uvd: [batch, order, num_targs, 4, num_kp] The ground-truth set of uvdw
coords.
mparams: model parameters.
Returns:
Keypoint projection loss of size [batch, order].
"""
print('uvd shape in klt [batch, order, num_targs, 4, num_kp]:', uvd.shape)
print('keys_uvd shape in klt [batch, order, num_targs, 4, num_kp]:',
keys_uvd.shape)
keys_uvd = nets.to_norm(keys_uvd, mparams)
uvd = nets.to_norm(uvd, mparams)
wd = tf.square(uvd[..., :2, :] - keys_uvd[..., :2, :])
wd = tf.reduce_sum(wd, axis=[-1, -2]) # uv dist [batch, order, num_targs]
print('wd shape in klt [batch, order, num_targs]:', wd.shape)
wd = tf.reduce_mean(wd, axis=[-1]) # [batch, order]
return wd | 5,328,653 |
def RunTests():
"""Runs all functions in __main__ with names like TestXyz()."""
sys.exit(_RunAll('Test', _RunOneTest)) | 5,328,654 |
def get_initial_epoch(log_path):
"""
从log文件中获取最近训练结束时的epoch,重新运行代码后会接着这个epoch继续训练。
"""
initial_epoch = 0
if tf.gfile.Exists(log_path):
with open(log_path) as log_file:
line_ind = -1
for _, line in enumerate(log_file):
line = line.strip()
if line == '':
continue
t = re.split(r',', line)[0]
if not t.isdigit():
continue
line_ind = 1+int(t)
initial_epoch = line_ind
return initial_epoch | 5,328,655 |
def categorise_town_flood_risk(stations, dt, degree, risklevel=3, plot=False):
"""A function that takes a list "stations" of station objects, performs polyfit over a period of "dt" days up to a "degree" degree and then returns towns with their respective flood risk.
The flood risk is determined by three tolerances (tol1, tol2, tol3) which are taken in as arguments. tol1 defines the lower boundary of severe flood risk. tol2 defines the lower boundary
of high flood risk and tol3 defines the lower boundary of moderate flood risk. Any towns with a flood risk index below tol3 are deemed to have a low risk of flooding. These tolerances are
calculated internally by modelling the risks as a normal distribution. tol1 == mean + 2 * standard deviation, tol2 == mean + standard deviation, tol3 == mean. This is not an accurate warning
system when all stations have risk of flooding, but it gives a good indication for stations that have a higher than normal increase in water level. The variable 'risklevel' sets the threshold for
returning endangered towns and can take values: 0, 1, 2, 3 in accordance to the classification mentioned above. "plot" enables or disabled plotting of the waterlevel with fitted polynomial for towns at risk."""
towns = {}
for station in stations: #iterate through list of station objects and create dictionary of stations in a town
if station.town in towns.keys():
towns[station.town].append(station)
else:
towns[station.town] = [station]
townsandrisks = [] #create empty list for towns and their risk rating
total_risks = [] #create empty list for all risk-indices to calculate spread of risk and thresholds for flooding
risk_by_town = [] #create empty list for towns and their highes risk-index
for town in towns.keys():
riskindicator = []
stations = towns.get(town)
for station in stations:
dates, levels = fetch_measure_levels(station.measure_id, datetime.timedelta(dt))
if len(dates) == 0: #use polyfit function to extract coefficients and d0 from the stations in question
continue
poly, d0 = polyfit(dates, levels, degree)
coeffs = poly.c
diffcoeffs = np.zeros(len(coeffs))
for i in range(len(coeffs)):
diffcoeffs[i] = coeffs[i]*(len(coeffs)-i-1) #create list with differentiated coefficients (original coefficient multiplied by the grade of the polynomial (determined by i))
diff_value = 0 #calculate the instantaneus rate of change at the last date in the list of dates
x = matplotlib.dates.date2num(dates)
for i in range(len(diffcoeffs)):
diff_value += diffcoeffs[i]*(x[-1]+d0)**i
riskindicator.append(diff_value)
total_risks.append(diff_value)
if len(riskindicator) == 0:
continue
risk_by_town.append((town, max(riskindicator)))
mean = np.mean(total_risks) #as we are trying to find relative flood risks and want to determine the towns with the highest flood risk in the country, we can assume that the risk-index is distributed normally
st_dev = np.std(total_risks) #using the mean and standard deviation we can set the boundaries for severe, high, moderate and low flood risk
tol1 = mean + 2 * st_dev
tol2 = mean + st_dev
tol3 = mean
for town in risk_by_town:
if town[1] > tol1: #append and sort a list of towns and their risk level as tuples. 0 == low, 1 == moderate, 2 == high, 3 == severe
townsandrisks.append((town[0], 3)) #Exact tolerances to be determined once 2F completed!
elif town[1] <= tol1 and town[1] > tol2:
townsandrisks.append((town[0], 2))
elif town[1] <= tol2 and town[1] > tol3:
townsandrisks.append((town[0], 1))
elif town[1] <= tol3:
townsandrisks.append((town[0], 0))
sorted_by_key(townsandrisks, 1, reverse=True)
greatestrisks = []
for town in townsandrisks:
if town[1] >= risklevel:
if town[1] == 3:
greatestrisks.append((town[0], 'severe'))
elif town[1] == 2:
greatestrisks.append((town[0], 'high'))
elif town[1] == 1:
greatestrisks.append((town[0], 'moderate'))
elif town[1] == 0:
greatestrisks.append((town[0], 'low')) #return list of towns with severe flood risk
for town in greatestrisks:
station = towns[town[0]][0]
dates, levels = fetch_measure_levels(station.measure_id, datetime.timedelta(dt))
if plot == True:
plot_water_level_with_fit(station, dates, levels, degree)
else:
continue
return greatestrisks | 5,328,656 |
def create_clients(num_clients, client_data, input_str='input', label_str='label', client_str='client-',
distribute=False):
"""
create K clients
:param config: network_config
:param num_clients: the number of clients
:param client_data: Dictionary of clients data. data[client][input or label]
:param input_str: input string of client_data
:param label_str: label string of label_data
:return: List(Client)
"""
clients = []
for i in range(num_clients):
client_id = i+1
client = Client(client_id,
client_data[f'{client_str}{client_id}'][input_str],
client_data[f'{client_str}{client_id}'][label_str],
distribute)
clients.append(client)
return clients | 5,328,657 |
def l2_loss(
h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
grad_h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
theta: np.ndarray,
x, y):
"""l2_loss: standard l2 loss.
The l2 loss is defined as (h(x) - y)^2. This is usually used for linear
regression in the sum of squares.
:param h: hypothesis function that models our data (x) using theta
:type h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param grad_h: function for the gradient of our hypothesis function
:type grad_h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param theta: The parameters of our hypothesis fucntion
:type theta: np.ndarray
:param x: A matrix of samples and their respective features.
:type x: np.ndarray of shape (samples, features)
:param y: The expected targets our model is attempting to match
:type y: np.ndarray of shape (samples,)
:return: The l2 loss value
:rtype: float
"""
return np.sum(np.square((h(theta, x) - y))) | 5,328,658 |
def test_multiple_read():
"""
>>> from uliweb.i18n import gettext_lazy as _, i18n_ini_convertor
>>> from io import StringIO
>>> x = Ini(env={'_':_}, convertors=i18n_ini_convertor, lazy=True)
>>> buf = StringIO(\"\"\"
... [default]
... option = 'abc'
... [other]
... option = default.option
... option1 = '{{option}} xxx'
... option2 = '{{default.option}}'
... option3 = '{{other.option}}'
... \"\"\")
>>> x.read(buf)
>>> buf1 = StringIO(\"\"\"
... [default]
... option = 'hello'
... \"\"\")
>>> x.read(buf1)
>>> x.freeze()
>>> print (x)
#coding=UTF-8
<BLANKLINE>
[default]
option = 'hello'
[other]
option = 'hello'
option1 = 'hello xxx'
option2 = 'hello'
option3 = 'hello'
<BLANKLINE>
""" | 5,328,659 |
def BestLogLikelihood(aln, alphabet=None, exclude_chars = None,
allowed_chars='ACGT', motif_length=None, return_length=False):
"""returns the best log-likelihood according to Goldman 1993.
Arguments:
- alphabet: a sequence alphabet object.
- motif_length: 1 for nucleotide, 2 for dinucleotide, etc ..
- exclude_chars: a series of characters used to exclude motifs
- allowed_chars: only motifs that contain a subset of these are
allowed
- return_length: whether to also return the number of alignment columns
"""
assert alphabet or motif_length, "Must provide either an alphabet or a"\
" motif_length"
# need to use the alphabet, so we can enforce character compliance
if alphabet:
kwargs = dict(moltype=alphabet.MolType)
motif_length = alphabet.getMotifLen()
else:
kwargs = {}
aln = LoadSeqs(data=aln.todict(), **kwargs)
columns = aligned_columns_to_rows(aln, motif_length, exclude_chars,
allowed_chars)
num_cols = len(columns)
log_likelihood = get_G93_lnL_from_array(columns)
if return_length:
return log_likelihood, num_cols
return log_likelihood | 5,328,660 |
def process_h5_file(h5_file):
"""Do the processing of what fields you'll use here.
For example, to get the artist familiarity, refer to:
https://github.com/tbertinmahieux/MSongsDB/blob/master/PythonSrc/hdf5_getters.py
So we see that it does h5.root.metadata.songs.cols.artist_familiarity[songidx]
and it would translate to:
num_songs = len(file['metadata']['songs'])
file['metadata']['songs'][:num_songs]['artist_familiarity']
Since there is one song per file, it simplifies to:
file['metadata']['songs'][:1]['artist_familiarity']
I recommend downloading one file, opening it with h5py, and explore/practice
To see the datatype and shape:
http://millionsongdataset.com/pages/field-list/
http://millionsongdataset.com/pages/example-track-description/
"""
return h5_file['metadata']['songs'][:1]['artist_familiarity'][0] | 5,328,661 |
async def test_options_flow(hass: HomeAssistant) -> None:
"""Test we get the form."""
# Create MockConfigEntry
config_entry: MockConfigEntry = MockConfigEntry(
domain=const.DOMAIN,
data={"country": "GB", "subdiv": "England"},
title="UK Holidays",
)
config_entry.add_to_hass(hass)
# Initialise Options Flow
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert "type" in result and "step_id" in result and "flow_id" in result
# Check that the first options step is user
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
# Enter data into the form
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"country": "GB"},
)
assert (
"type" in result
and "step_id" in result
and "flow_id" in result
and "errors" in result
)
# Should pass to the subdiv step
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "subdiv"
assert result["errors"] == {}
# ...add England for subdiv
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"subdiv": "England"},
)
assert (
"type" in result
and "step_id" in result
and "flow_id" in result
and "errors" in result
)
# Should pass to the pop step
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pop"
assert result["errors"] == {}
# ... wil leave pop enpty
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={},
)
assert "type" in result and "data" in result
# Should create entry
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {"country": "GB", "subdiv": "England"} | 5,328,662 |
def run_geometry_engine(index=0):
"""
Run the geometry engine a few times to make sure that it actually runs
without exceptions. Convert n-pentane to 2-methylpentane
"""
import logging
logging.basicConfig(level=logging.INFO)
import copy
from perses.utils.openeye import iupac_to_oemol
molecule_name_1 = 'benzene'
molecule_name_2 = 'biphenyl'
#molecule_name_1 = 'imatinib'
#molecule_name_2 = 'erlotinib'
molecule1 = iupac_to_oemol(molecule_name_1)
molecule2 = iupac_to_oemol(molecule_name_2)
new_to_old_atom_mapping = align_molecules(molecule1, molecule2)
sys1, pos1, top1 = oemol_to_openmm_system(molecule1)
sys2, pos2, top2 = oemol_to_openmm_system(molecule2)
import perses.rjmc.geometry as geometry
import perses.rjmc.topology_proposal as topology_proposal
from perses.tests.utils import compute_potential_components
sm_top_proposal = topology_proposal.TopologyProposal(new_topology=top2, new_system=sys2, old_topology=top1, old_system=sys1,
old_chemical_state_key='',new_chemical_state_key='', logp_proposal=0.0, new_to_old_atom_map=new_to_old_atom_mapping, metadata={'test':0.0})
sm_top_proposal._beta = beta
geometry_engine = geometry.FFAllAngleGeometryEngine(metadata={})
# Turn on PDB file writing.
geometry_engine.write_proposal_pdb = True
geometry_engine.pdb_filename_prefix = 't13geometry-proposal'
test_pdb_file = open("%s_to_%s_%d.pdb" % (molecule_name_1, molecule_name_2, index), 'w')
def remove_nonbonded_force(system):
"""Remove NonbondedForce from specified system."""
force_indices_to_remove = list()
for [force_index, force] in enumerate(system.getForces()):
if force.__class__.__name__ == 'NonbondedForce':
force_indices_to_remove.append(force_index)
for force_index in force_indices_to_remove[::-1]:
system.removeForce(force_index)
valence_system = copy.deepcopy(sys2)
remove_nonbonded_force(valence_system)
integrator = openmm.VerletIntegrator(1*unit.femtoseconds)
integrator_1 = openmm.VerletIntegrator(1*unit.femtoseconds)
ctx_1 = openmm.Context(sys1, integrator_1)
ctx_1.setPositions(pos1)
ctx_1.setVelocitiesToTemperature(300*unit.kelvin)
integrator_1.step(1000)
pos1_new = ctx_1.getState(getPositions=True).getPositions(asNumpy=True)
context = openmm.Context(sys2, integrator)
context.setPositions(pos2)
state = context.getState(getEnergy=True)
print("Energy before proposal is: %s" % str(state.getPotentialEnergy()))
openmm.LocalEnergyMinimizer.minimize(context)
new_positions, logp_proposal = geometry_engine.propose(sm_top_proposal, pos1_new, beta)
logp_reverse = geometry_engine.logp_reverse(sm_top_proposal, new_positions, pos1, beta)
print(logp_reverse)
app.PDBFile.writeFile(top2, new_positions, file=test_pdb_file)
test_pdb_file.close()
context.setPositions(new_positions)
state2 = context.getState(getEnergy=True)
print("Energy after proposal is: %s" %str(state2.getPotentialEnergy()))
print(compute_potential_components(context))
valence_integrator = openmm.VerletIntegrator(1*unit.femtoseconds)
platform = openmm.Platform.getPlatformByName("Reference")
valence_ctx = openmm.Context(valence_system, valence_integrator, platform)
valence_ctx.setPositions(new_positions)
vstate = valence_ctx.getState(getEnergy=True)
print("Valence energy after proposal is %s " % str(vstate.getPotentialEnergy()))
final_potential = state2.getPotentialEnergy()
return final_potential / final_potential.unit | 5,328,663 |
def test_match_type() -> None:
"""Tests matching string type."""
assert type(match_type(b"hello", bytes)) is bytes
assert type(match_type(b"hello", str)) is str
assert type(match_type(u"hello", bytes)) is bytes
assert type(match_type(b"hello", str)) is str | 5,328,664 |
def add_changes_metrics(df, connection):
"""This function joins the data from the jira_issues table with the data
from the FTS3 table. It gets, for each issue, the number of lines_added,
lines_removed and files_changed.
"""
out_df = df.copy()
metrics = df["key"].apply(get_commits_from_issue, args=(connection,))
out_df["num_commits"] = list(map(lambda x: x[1], metrics))
out_df["lines_added"] = list(map(lambda x: x[2], metrics))
out_df["lines_removed"] = list(map(lambda x: x[3], metrics))
out_df["files_changed"] = list(map(lambda x: x[4], metrics))
return out_df | 5,328,665 |
def binary_str(num):
""" Return a binary string representation from the posive interger 'num'
:type num: int
:return:
Examples:
>>> binary_str(2)
'10'
>>> binary_str(5)
'101'
"""
# Store mod 2 operations results as '0' and '1'
bnum = ''
while num > 0:
bnum = str(num & 0x1) + bnum
num = num >> 1
return bnum | 5,328,666 |
def sample_target_pos(batch_size,TARGET_MAX_X, TARGET_MIN_X, TARGET_MAX_Y, TARGET_MIN_Y):
"""
Sample target_position or robot_position by respecting to their limits.
"""
random_init_x = np.random.random_sample(batch_size) * (TARGET_MAX_X - TARGET_MIN_X) + \
TARGET_MIN_X
random_init_y = np.random.random_sample(batch_size) * (TARGET_MAX_Y - TARGET_MIN_Y) + \
TARGET_MIN_Y
return th.FloatTensor(np.concatenate((random_init_x[...,None], random_init_y[...,None]),axis=1)) | 5,328,667 |
def clf_perceptron(vector_col:str,
df_train:pd.DataFrame,
model:Perceptron,
) -> list:
"""return classification for multi-layer perception
Arguments:
vector_col (str): name of the columns with vectors to classify
df_train (pd.DataFrame): dataframe with training data
model (sklearn.linear_model.Perceptron): fitted (multi-layer) perceptron
Returns:
list of "0" and "1" predictions
"""
#if return_ranking: return list(model.decision_function(df[vector_col].to_list()))
return list(model.predict(df_train[vector_col].to_list())) | 5,328,668 |
def compute_phot_error(flux_variance, bg_phot, bg_method, ap_area, epadu=1.0):
"""Computes the flux errors using the DAOPHOT style computation
Parameters
----------
flux_variance : array
flux values
bg_phot : array
background brightness values.
bg_method : string
background method
ap_area : array
the area of the aperture in square pixels
epadu : float
(optional) Gain in electrons per adu (only use if image units aren't e-). Default value is 1.0
Returns
-------
flux_error : array
an array of flux errors
"""
bg_variance_terms = (ap_area * bg_phot['aperture_std'] ** 2.) * (1. + ap_area/bg_phot['aperture_area'])
variance = flux_variance / epadu + bg_variance_terms
flux_error = variance ** .5
return flux_error | 5,328,669 |
def set_timed_message(update: Update, context: CallbackContext):
""" Creates a job that outputs a user-defined message in a certain time. """
try:
time_str = context.args[0]
content_args = context.args[1]
for s in context.args[2:]:
content_args += (" " + s)
if check_validity_of_time_string(time_str):
time = datetime.strptime(time_str, "%H:%M")
time = timedelta(hours=time.hour, minutes=time.minute)
context.job_queue.run_once(run_timed_message,
datetime.now(pytz.UTC) + time,
(context.user_data['chat_id'], content_args),
name=str(context.user_data['chat_id'])+'-once')
update.message.reply_text("Created a timed message about " + content_args
+ " that will run in " + str(int(time.total_seconds())) + " seconds from now")
else:
update.message.reply_text("Couldn't parse the time, sorry")
raise ValueError
except (ValueError, IndexError):
update.message.reply_text("Usage: /timer <HH:MM> <content>") | 5,328,670 |
def reflect_table(conn, table_name, schema='public'):
"""Reflect basic table attributes."""
column_meta = list(get_column_metadata(conn, table_name, schema=schema))
primary_key_columns = list(get_primary_keys(conn, table_name, schema=schema))
columns = [Column(**column_data) for column_data in column_meta]
primary_key = PrimaryKey(primary_key_columns)
return Table(table_name, columns, primary_key, schema=schema) | 5,328,671 |
def evaluate(c, config_name, predecessors=False, successors=False):
"""Evaluate model for given configuration.
"""
run_workflow_tasks(
c,
get_task_workflow('model_evaluate', predecessors, successors),
config_name
) | 5,328,672 |
def get_token(token_file):
"""
Reads the first line from token_file to get a token
"""
with open(token_file, "r") as fin:
ret = fin.read().strip()
if not ret:
raise ReleaseException("No valid token found in {}".format(token_file))
return ret | 5,328,673 |
def get_description(soup):
"""Извлечь текстовое описание вакансии"""
non_branded = soup.find('div', {'data-qa':'vacancy-description'})
branded = soup.find('div', {'class':'vacancy-section HH-VacancyBrandedDescription-DANGEROUS-HTML'})
description = non_branded or branded
return description.get_text() | 5,328,674 |
def read_fits(fn: Path, ifrm: int, twoframe: bool) -> np.ndarray:
"""
ifits not ifrm for fits!
"""
if fits is None:
raise ImportError('Need Astropy for FITS')
# memmap = False required thru at least Astropy 1.3.2 due to BZERO used...
with fits.open(fn, mode='readonly', memmap=False) as f:
if twoframe:
frame = f[0][ifrm:ifrm+2, :, :]
else:
frame = f[0][ifrm+1, :, :]
return frame | 5,328,675 |
def normalize(arrList):
"""
Normalize the arrayList, meaning divide each column by its standard deviation if that
standard deviation is nonzero, and leave the column unmodified if it's standard deviation
is zero.
Args:
arrList (a list of lists of numbers)
Returns:
list, list: A pair consisting of a list each entry of which is the standard deviation
of the corresponding column of arrList, and an arrayList (a list of lists)
each column of which is that column divided by the standard deviation of
the column that column if that standard deviation is nonzero. Columns
with zero standard deviation are left unchanged.
>>> normalize([[1, 2, 3], [6, 7, 8]]) # doctest:+ELLIPSIS
([2.5, 2.5, 2.5],...
>>> normalize([[1, 2, 3], [1, 7, 3]]) # doctest:+ELLIPSIS
([0.0, 2.5, 0.0],...
>>> normalize([[]])
([], [[]])
"""
_, centered = mean_center(arrList)
centered_squared = multiply(centered, centered)
stdevs = list(map(lambda x: x**0.5, columnwise_means(centered_squared)))
nonzero_stdevs = list(map(lambda x: 1 if x == 0 else x, stdevs))
inverses = list(map(lambda x: 1/x, nonzero_stdevs))
return stdevs, scalarMultCols(inverses, arrList) | 5,328,676 |
def test_ipywidgets(sphinx_run):
"""Test that ipywidget state is extracted and JS is included in the HTML head."""
sphinx_run.build()
# print(sphinx_run.status())
assert sphinx_run.warnings() == ""
assert "js_files" in sphinx_run.env.nb_metadata["ipywidgets"]
assert set(sphinx_run.env.nb_metadata["ipywidgets"]["js_files"]) == {
"ipywidgets_state",
"ipywidgets_0",
"ipywidgets_1",
}
head_scripts = sphinx_run.get_html().select("head > script")
assert any("require.js" in script.get("src", "") for script in head_scripts)
assert any("embed-amd.js" in script.get("src", "") for script in head_scripts) | 5,328,677 |
def get_reddit_backup_urls(mode):
"""
Parse reddit backups on pushshift.io
:param mode: 'Q' for questions, 'A' for answers
:return: dict of (year, month): backup_url
"""
mode = {'Q': 'submissions', 'A': 'comments'}[mode]
page = requests.get(REDDIT_URL + mode)
soup = BeautifulSoup(page.content, 'lxml')
files = [it for it in soup.find_all(attrs={'class': 'file'})]
f_urls = [tg.find_all(lambda x: x.has_attr('href'))[0]['href']
for tg in files if len(tg.find_all(lambda x: x.has_attr('href'))) > 0]
dict_date_url = {}
for url_st in f_urls:
ls = re.findall(r"20[0-9]{2}-[0-9]{2}", url_st)
if len(ls) > 0:
yr, mt = ls[0].split('-')
dict_date_url[(int(yr), int(mt))] = REDDIT_URL + mode + url_st[1:]
return dict_date_url | 5,328,678 |
def collect_elf_segments(elf, file_type, segment_els, section_prefix, namespace, image, machine, pools):
"""
Process all of the segment elements in a program/kernel, etc.
"""
elf_seg_names = elf_segment_names(elf)
shash = segments_hash(segment_els)
# Test that every segment element references a segment in the ELF
# file.
elf_seg_names_txt = elf_seg_names.values()
for seg_name in shash.keys():
if seg_name not in elf_seg_names_txt:
raise MergeError, \
'%s: Cannot find segment "%s" in the ELF file. ' \
'Valid values are %s' % (namespace.abs_name('.'), seg_name, elf_seg_names_txt)
collected_segments = []
i = 0
last_segment = None # The last segment processed.
group = []
for segment in elf.segments:
attrs = image.new_attrs(namespace, for_segment = True)
attrs.virt_addr = segment.vaddr
attrs.attach = segment.flags
attrs.elf_flags = segment.flags
# The kernel is *very* picky about alignment, so use the
# segment's alignment rules by default.
if file_type == image.KERNEL:
attrs.align = segment.align
# XXX: The kernel image is generated very weirdly on x86. These
# hacks are connected to some other PC99 hacks in tools/build.py.
if elf.machine == EM_386:
attrs.align = machine.min_page_size()
attrs.phys_addr = segment.paddr
if elf_seg_names.has_key(i):
seg_name = elf_seg_names[i]
attrs.name = seg_name
if shash.has_key(seg_name):
segment_el = shash[seg_name]
attrs.phys_addr = getattr(segment_el, 'phys_addr', attrs.phys_addr)
attrs.physpool = getattr(segment_el, 'physpool', attrs.physpool)
attrs.align = getattr(segment_el, 'align', attrs.align)
attrs.pager = getattr(segment_el, 'pager', attrs.pager)
attrs.direct = getattr(segment_el, 'direct', attrs.direct)
attrs.protected = getattr(segment_el, 'protected', attrs.protected)
if hasattr(segment_el, 'attach'):
attrs.attach = attach_to_elf_flags(segment_el.attach)
if hasattr(segment_el, 'pager'):
attrs.pager = make_pager_attr(segment_el.pager)
if hasattr(segment_el, 'cache_policy'):
attrs.cache_policy = machine.get_cache_policy(segment_el.cache_policy)
else:
attrs.name = str(i)
s = image.add_segment(segment_index = i,
segment = segment,
section_prefix = section_prefix,
file_type = file_type,
attrs = attrs,
machine = machine,
pools = pools)
if s is not None:
collected_segments.append(s)
# It is possible for different segments to occupy the same
# page of memory (yes! really!). To accommodate this
# place segments that have the same flags into the same
# static allocation groups, where sub-page allocation is
# allowed.
#
# Do not set the maximum distance the segments can be
# apart. The distance support was originally designed
# with this code in mind, but with testing it's been shown
# that it is difficult to get the distance value right.
if last_segment is not None and \
last_segment.flags != segment.flags:
image.add_group(None, group)
group = []
group.append(s)
last_segment = segment
i = i + 1
image.add_group(None, group)
return collected_segments | 5,328,679 |
def follow_operation(operation_uri: str, unpack_metadata=None):
"""
Params:
operation_uri: URI of the operation to follow.
unpack_metadata: Function to unpack the operation's metadata. Return a line of text to summarize
the current progress of the operation.
If not given, progress will not be shown.
"""
import time
from yurt.util import retry
operations = get_pylxd_client().operations
# Allow time for operation to be created.
try:
retry(
lambda: operations.get(operation_uri), # pylint: disable=no-member
retries=10,
wait_time=0.5
)
operation = operations.get(operation_uri) # pylint: disable=no-member
except pylxd.exceptions.NotFound:
raise LXCException(
f"Timed out while waiting for operation to be created.")
logging.info(operation.description)
while True:
try:
operation = operations.get( # pylint: disable=no-member
operation_uri
)
if unpack_metadata:
print(f"\r{unpack_metadata(operation.metadata)}", end="")
time.sleep(0.5)
except pylxd.exceptions.NotFound:
print("\nDone")
break
except KeyboardInterrupt:
break | 5,328,680 |
def getStrVector(tarstr,cdict,clen=None):
"""
将字符串向量化,向量的每一项对应字符集中一种字符的频数,字符集、每种字符在向量中对应的下标由cdict提供
"""
if not clen:
clen=len(cdict.keys())
vec=[0]*clen
for c in tarstr:
vec[cdict[c]]+=1
#vec[cdict[c]]=1
return vec | 5,328,681 |
def process_request(registry: ServiceRegistry, url: str) -> str:
""" Given URL (customer name), make a Request to handle interaction """
# Make the container that this request gets processed in
container = registry.create_container()
# Put the url into the container
container.register_singleton(url, Url)
# Create a View to generate the greeting
view = container.get(View)
# Generate a response
response = view()
return response | 5,328,682 |
def percent_color(percentage: float) -> str:
""" Generate a proper color for a percentage for printing. """
color = 'red'
if percentage > 30:
color = 'yellow'
if percentage > 70:
color = 'green'
return colored(percentage, color) | 5,328,683 |
def _generate_image_and_label_batch(image, image_raw, label, min_queue_examples,
batch_size, shuffle):
"""Construct a queued batch of images and labels.'''
example: min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples_in_queue)
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 8
if shuffle:
images, images_raw, labels = tf.train.shuffle_batch(
[image, image_raw, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
images, images_raw, label_batch = tf.train.batch(
[image, image_raw, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer.
tf.summary.image('images', images)
return images, images_raw, label_batch | 5,328,684 |
def parsePDB(pdb, altloc='A', model=None):
"""Return an :class:`.AtomGroup` and/or dictionary containing header data
parsed from a PDB file.
This function extends :func:`.parsePDBStream`.
See :ref:`parsepdb` for a detailed usage example.
:arg pdb: a PDB identifier or a filename
"""
title, ext = os.path.splitext(os.path.split(pdb)[1])
if ext == '.gz':
title, ext = os.path.splitext(title)
if len(title) == 7 and title.startswith('pdb'):
title = title[3:]
with open(pdb, 'r') as fd:
result = parsePDBStream(fd, altloc, model)
return result | 5,328,685 |
def elgamal_add(*ciphertexts: ElGamalCiphertext) -> ElGamalCiphertext:
"""
Homomorphically accumulates one or more ElGamal ciphertexts by pairwise multiplication. The exponents
of vote counters will add.
"""
assert len(ciphertexts) != 0, "Must have one or more ciphertexts for elgamal_add"
pads = [c.pad for c in ciphertexts]
data = [c.data for c in ciphertexts]
return ElGamalCiphertext(mult_p(*pads), mult_p(*data)) | 5,328,686 |
def find_min_cost_thresholds(roc_curves, base_rates, proportions, cost_matrix):
"""Compute thresholds by attribute values that minimize cost.
:param roc_curves: Receiver operating characteristic (ROC)
by attribute.
:type roc_curves: dict
:param base_rates: Base rate by attribute.
:type base_rates: dict
:param proportions: Proportion of each attribute value.
:type proportions: dict
:param cost_matrix: Cost matrix by [[tn, fp], [fn, tp]].
:type cost_matrix: sequence
:return: Thresholds, FPR and TPR by attribute and cost value.
:rtype: tuple
"""
# pylint: disable=cell-var-from-loop
cutoffs = {}
fpr_tpr = {}
cost = 0
thresholds = _extract_threshold(roc_curves)
for group, roc in roc_curves.items():
def group_cost_function(index):
fpr = roc[0][index]
tpr = roc[1][index]
return -_cost_function(fpr, tpr,
base_rates[group], cost_matrix)
cost_per_threshold = [group_cost_function(index)
for index in range(len(thresholds))]
cutoff_index = np.argmin(cost_per_threshold)
cutoffs[group] = thresholds[cutoff_index]
fpr_tpr[group] = (roc[0][cutoff_index],
roc[1][cutoff_index])
cost += group_cost_function(cutoff_index) * proportions[group]
return cutoffs, fpr_tpr, cost | 5,328,687 |
def get_content_type_encoding(curi):
"""
Determine the content encoding based on the `Content-Type` Header.
`curi` is the :class:`CrawlUri`.
"""
content_type = "text/plain"
charset = ""
if curi.rep_header and "Content-Type" in curi.rep_header:
(content_type, charset) = extract_content_type_encoding(
curi.rep_header["Content-Type"])
if charset == "" and curi.content_body and len(curi.content_body) >= 512:
# no charset information in the http header
first_bytes = curi.content_body[:512].lower()
ctypestart = first_bytes.find("content-type")
if ctypestart != -1:
# there is a html header
ctypestart = first_bytes.find("content=\"", ctypestart)
ctypeend = first_bytes.find("\"", ctypestart + 9)
return extract_content_type_encoding(
first_bytes[ctypestart + 9:ctypeend])
return (content_type, charset) | 5,328,688 |
def truncate_errors(install_stdout, install_errors, language_detection_errors,
compile_errors, max_error_len=10*1024):
"""
Combine lists of errors into a single list under a maximum length.
"""
install_stdout = install_stdout or []
install_errors = install_errors or []
language_detection_errors = language_detection_errors or []
compile_errors = compile_errors or []
all_errors = install_stdout + install_errors + language_detection_errors + compile_errors
result = []
if sum(len(line) for line in all_errors) <= max_error_len:
if install_stdout or install_errors:
result.append(INSTALL_ERROR_START)
result.extend(install_stdout)
result.append(INSTALL_ERROR_MID)
result.extend(install_errors)
result.append(INSTALL_ERROR_END)
result.extend(language_detection_errors)
result.extend(compile_errors)
return result
def bound_errors(source, bound):
total_length = sum(len(line) for line in source)
if total_length <= bound:
return total_length, source
length = 0
current = 0
result = []
# Take 1/3 from start of errors
while current < len(source) and (
length == 0 or
length + len(source[current]) < bound // 3):
result.append(source[current])
length += len(source[current])
current += 1
if current < len(source):
result.append("...(output truncated)...")
end_errors = []
end = current
current = -1
while current >= -(len(source) - end) and (
len(end_errors) == 0 or
length + len(source[current])) < bound:
end_errors.append(source[current])
length += len(source[current])
current -= 1
result.extend(reversed(end_errors))
return length, result
remaining_length = max_error_len
if install_stdout or install_errors:
result.append(INSTALL_ERROR_START)
used, lines = bound_errors(install_stdout, 0.2 * max_error_len)
remaining_length -= used
result.extend(lines)
result.append(INSTALL_ERROR_MID)
used, lines = bound_errors(install_errors,
max(0.3 * max_error_len,
0.5 * max_error_len - used))
remaining_length -= used
result.extend(lines)
result.append(INSTALL_ERROR_END)
_, lines = bound_errors(language_detection_errors + compile_errors, remaining_length)
result.extend(lines)
return result | 5,328,689 |
def is_balanced(expression: str) -> bool:
"""
Checks if a string is balanced.
A string is balanced if the types of brackets line up.
:param expression: is the expression to evaluate.
:raise AttributeError: if the expression is None.
:return: a boolean value determining if the string is balanced.
"""
if expression is None:
raise AttributeError("Expression cannot be None.")
bracket_map = {
"(": ")",
"[": "]",
"{": "}",
"<": ">"
}
stack: List[str] = []
balanced: bool = False
for letter in expression:
# We have a matching opening bracket
if letter in bracket_map:
stack.append(letter)
if len(stack) >= 1:
previous = peek(stack)
# We have a matching ending bracket
if bracket_map[previous] == letter:
popped = stack.pop(len(stack) - 1)
balanced = True
else:
balanced = False
return balanced | 5,328,690 |
def get_committee_assignment(state: BeaconState,
epoch: Epoch,
validator_index: ValidatorIndex
) -> Optional[Tuple[Sequence[ValidatorIndex], CommitteeIndex, Slot]]:
"""
Return the committee assignment in the ``epoch`` for ``validator_index``.
``assignment`` returned is a tuple of the following form:
* ``assignment[0]`` is the list of validators in the committee
* ``assignment[1]`` is the index to which the committee is assigned
* ``assignment[2]`` is the slot at which the committee is assigned
Return None if no assignment.
"""
next_epoch = get_current_epoch(state) + 1
assert epoch <= next_epoch
start_slot = compute_start_slot_at_epoch(epoch)
for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH):
for index in range(get_committee_count_at_slot(state, Slot(slot))):
committee = get_beacon_committee(state, Slot(slot), CommitteeIndex(index))
if validator_index in committee:
return committee, CommitteeIndex(index), Slot(slot)
return None | 5,328,691 |
def is_custfmt0(*args):
"""
is_custfmt0(F) -> bool
Does the first operand use a custom data representation?
@param F (C++: flags_t)
"""
return _ida_bytes.is_custfmt0(*args) | 5,328,692 |
def create_procedure(server, db_name, schema_name, func_name, s_type,
s_version, with_args=False, args=""):
"""This function add the procedure to schema"""
try:
connection = utils.get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
pg_cursor = connection.cursor()
if s_type == 'pg':
query = "CREATE PROCEDURE {0}.{1}" \
"({2})" \
" LANGUAGE 'sql'" \
" SECURITY DEFINER AS $$" \
" SELECT 1; $$;".format(schema_name, func_name, args)
else:
if s_version >= 90500:
query = "CREATE PROCEDURE {0}.{1}" \
"({2})" \
" SECURITY DEFINER AS $BODY$ BEGIN" \
" NULL; END; $BODY$".format(schema_name, func_name,
args)
else:
query = "CREATE PROCEDURE {0}.{1}" \
"({2})" \
" AS $BODY$ BEGIN" \
" NULL; END; $BODY$".format(schema_name, func_name,
args)
pg_cursor.execute(query)
connection.commit()
# Get 'oid' from newly created function
pg_cursor.execute("SELECT pro.oid, pro.proname FROM"
" pg_proc pro WHERE pro.proname='%s'" %
func_name)
functions = pg_cursor.fetchone()
connection.close()
return functions
except Exception:
traceback.print_exc(file=sys.stderr) | 5,328,693 |
def test_update_preferred_places_case_first_option(sample_person):
"""Test changing preferred exits change first to second"""
sample_person.update_preferred_places(PossibleExits.CAFE)
assert sample_person.preferred_places_today == [
PossibleExits.BAR, PossibleExits.CAFE, PossibleExits.CINEMA
] | 5,328,694 |
def test_nb():
"""Test notebook code"""
godag = get_godag("go-basic.obo", optional_attrs={'relationship'})
go_leafs = set(o.item_id for o in godag.values() if not o.children)
virion = 'GO:0019012'
gosubdag_r0 = GoSubDag(go_leafs, godag)
nt_virion = gosubdag_r0.go2nt[virion]
print(nt_virion)
print('r0 THE VALUE OF dcnt IS: {dcnt}'.format(dcnt=nt_virion.dcnt))
gosubdag_r1 = GoSubDag(go_leafs, godag, relationships=True)
nt_virion = gosubdag_r1.go2nt[virion]
print(nt_virion)
print('r1 THE VALUE OF dcnt IS: {dcnt}'.format(dcnt=nt_virion.dcnt))
gosubdag_partof = GoSubDag(go_leafs, godag, relationships={'part_of'})
nt_virion = gosubdag_partof.go2nt[virion]
print(nt_virion)
print('THE VALUE OF dcnt IS: {dcnt}'.format(dcnt=nt_virion.dcnt))
virion_descendants = gosubdag_partof.rcntobj.go2descendants[virion]
print('{N} descendants of virion were found'.format(N=len(virion_descendants)))
# Limit plot of descendants to get a smaller plot
virion_capsid_fiber = {'GO:0098033', 'GO:0098032'}
gosubdag_partof.prt_goids(virion_capsid_fiber,
'{NS} {GO} dcnt({dcnt}) D-{depth:02} {GO_name}')
# Limit plot size by choosing just two virion descendants
# Get a subset containing only a couple virion descendants and their ancestors
pltdag = GoSubDag(virion_capsid_fiber, godag, relationships={'part_of'})
pltobj = GoSubDagPlot(pltdag)
pltobj.plt_dag('virion_capsid_fiber.png') | 5,328,695 |
def get_nfa_by_graph(
graph: nx.MultiDiGraph, start_nodes: Set[int] = None, final_nodes: Set[int] = None
) -> NondeterministicFiniteAutomaton:
"""
Creates a Nondeterministic Finite Automaton for a specified graph.
If start_nodes and final_nodes are not specified, all nodes are considered start and end.
Parameters
----------
graph: nx.MultiDiGraph
Graph for creating NFA
start_nodes: Set[int]
Set of start nodes
final_nodes: Set[int]
Set of final nodes
Returns
-------
EpsilonNFA
Epsilon Nondeterministic Finite Automaton which equivalent to graph
Raises
------
ValueError
If node does not present in the graph
"""
nfa = NondeterministicFiniteAutomaton()
# add the necessary transitions to automaton
for node_from, node_to in graph.edges():
edge_data = graph.get_edge_data(node_from, node_to)[0]["label"]
nfa.add_transition(node_from, edge_data, node_to)
if (start_nodes and final_nodes) is None:
if not nfa.states:
for node in graph.nodes:
nfa.add_start_state(State(node))
nfa.add_final_state(State(node))
else:
for state in nfa.states:
nfa.add_start_state(state)
nfa.add_final_state(state)
return nfa
if start_nodes:
for start_node in start_nodes:
state = State(start_node)
if state not in nfa.states:
raise ValueError(f"\nNode {start_node} does not present in the graph")
nfa.add_start_state(state)
if final_nodes:
for final_node in final_nodes:
state = State(final_node)
if state not in nfa.states:
raise ValueError(f"\nNode {final_node} does not present in the graph")
nfa.add_final_state(state)
return nfa | 5,328,696 |
def just_info(*msg):
""" Print a log line, but respecting the graph """
line = ""
if settings.print_group_name:
line += " "*(settings.GROUP_NAME_WIDTH+1)
if settings.print_tracks:
line += tracks.write()
line += " " + " ".join(map(str, msg))
logging.getLogger('mupf').info(line) | 5,328,697 |
def print_key_val(init, value, pre_indent=0, end=','):
"""Print the key and value and insert it into the code list.
:param init: string to initialize value e.g.
"'key': " or "url = "
:param value: value to print in the dictionary
:param pre_indent: optional param to set the level of indentation,
defaults to 0
:param end: optional param to set the end, defaults to comma
"""
indent = INDENT * pre_indent
# indent is up to the first single quote
start = indent + len(init)
# 80 is the print line minus the starting indent
# minus 2 single quotes, 1 space, and 1 backslash
left = PRINTLINE - start - 4
code = []
code.append("{i}{s}'{v}'".format(i=" " * indent, s=init, v=value[:left]))
if len(value) > left:
code[-1] += " \\"
# figure out lines by taking the length of the value and dividing by
# chars left to the print line
lines = int(math.ceil(len(value) / float(left)))
for i in xrange(1, lines):
delim = " \\"
if i == lines - 1:
delim = end
code.append("{i}'{v}'{d}".format(i=" " * start,
v=value[i * left:(i+1) * left],
d=delim))
else:
code[-1] += end
return code | 5,328,698 |
def not_found_error(e):
"""HTTP 404 view"""
# Ignore unused arguments
# pylint: disable=W0613
return render_template("errors/404.html"), 404 | 5,328,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.