content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import json
def project_file_read(request):
"""
get content of project file
:param request: request object
:return: file content
"""
if request.method == 'POST':
data = json.loads(request.body)
path = join(data['path'], data['label'])
# binary file
with open(path, 'rb') as f:
return HttpResponse(f.read().decode('utf-8')) | 854dcd4bb8475e84f020e10229f1729dcd8980ec | 3,629,500 |
from typing import Final
def ast_to_graph(root: Node) -> nx.DiGraph:
"""
You will create your regular expression with a specific syntax which is
transformed into an AST, however the regular expression engine expects
to navigate in a graph. As it is too complicated to navigate inside the
AST directly, this function transforms the AST into an actual graph.
Notes
-----
Since each node, except the :py:class:`nsre.ast.Final` ones, have children
nodes, the idea is to insert nodes into the graph one at a time and then to
work on those new nodes to transform it into its content.
Also, there is implicitly a :code:`_Initial` and a :code:`_Terminal` node.
The graph exploration will start from the initial node and the regular
expression will be considered to be a match if when the input sequence is
entirely consumed you can transition to the terminal node.
By example, you got a node A which is a concatenation of B and C. Suppose
that the code looks like this:
>>> from nsre import *
>>> c = Final(Eq('c'))
>>> b = Final(Eq('b'))
>>> a = c + b
>>> g = ast_to_graph(a)
Then the first graph you're going to get is
:code:`_Initial` -> :code:`A` -> :code:`_Terminal`
But then the algorithm is going to transform A into its content and you'll
end up with the new graph
:code:`_Initial` -> :code:`B` -> :code:`C` -> :code:`_Terminal`
And so on if B and C have content of their own (they don't in the current
example).
The way to transform a node into its content depends on the node type, of
course. That's why you'll find in this file a bunch of :code:`_explore_*`
methods which are actually the ways to transform a specific node into a
graph.
The overall algorithm here is to have a to-do list (the "explore" variable)
which contains the set of currently unexplored nodes. When a node is
transformed into its content, the newly inserted nodes are also added to
the to-do list and will be explored at the next iteration. This goes on
and on until the whole AST has been transformed into a graph.
Another detail is that capture groups are indicated with a start and a
stop marker on the edges. Each edge can potentially contain in its data
a "start_captures" or a "stop_captures" list. They contain the name, in
order, of capture groups to start or stop. The capture should start right
after the start and before the stop marker.
See Also
--------
_explore_concatenation, _explore_alternation, _explore_maybe,
_explore_any_number, _explore_capture
"""
g = nx.DiGraph()
initial = _Initial()
terminal = _Terminal()
g.add_nodes_from([initial, root, terminal])
g.add_edge(initial, root)
g.add_edge(root, terminal)
explore = {root}
while explore:
for node in [*explore]:
explore.remove(node)
if isinstance(node, Final):
pass
elif isinstance(node, Concatenation):
_explore_concatenation(explore, g, node)
elif isinstance(node, Alternation):
_explore_alternation(explore, g, node)
elif isinstance(node, Maybe):
_explore_maybe(explore, g, node)
elif isinstance(node, AnyNumber):
_explore_any_number(explore, g, node)
elif isinstance(node, Capture):
_explore_capture(explore, g, node)
return g | c57178bae24655f29c1fdd36002551a16cd07f0b | 3,629,501 |
def transform(record):
"""
Transforms (maps) a record.
Parameters
----------
record : dict
The record to transform.
Returns
-------
dict
The transformed record.
"""
return {
record["stakeholder_approach"]: {
record["stakeholder_id"]: {
"name": record["stakeholder_name"],
record["deliverable_id"]: {
"name": record["deliverable_name"]
}
}
}
} | cc9e378c96ee78c46f52184051c3d69568807e0b | 3,629,502 |
def easeInQuart(n):
"""A quartic tween function that begins slow and then accelerates.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
"""
_checkRange(n)
return n**4 | 50f075b8ce1ffd0a1b6dcbf02995275c3b8dff0d | 3,629,503 |
def apply(dataframe, parameters=None, variant=CLASSIC):
"""
Discover a StarStar model from an ad-hoc built dataframe
Parameters
-------------
df
Dataframe
parameters
Possible parameters of the algorithm
variant
Variant of the algorithm, possible values: classic
Returns
-------------
perspectives_heu
Dictionary of perspectives associated to Heuristics Net
"""
return VERSIONS[variant](dataframe, parameters=parameters) | 615671920f2a60e356cf79b64da70e383bff4248 | 3,629,504 |
import os
def create_win_scite(folders):
"""
create a batch file to start scite
@param folders see @see fn create_win_batches
@return operations (list of what was done)
"""
text = ['@echo off',
'set CURRENT2=%~dp0',
'call "%CURRENT2%env.bat"',
'set SCITE=%PYTHON_TOOLS%\\Scite\\wscite\\scite.exe',
'start "scite" /B "%SCITE%" "%1"']
text = "\n".join(text)
name = os.path.join(folders["config"], "scite.bat")
with open(name, "w") as f:
f.write(text)
return [('batch', name)] | a0c4065509e4edbbdde362d2f0a1fcba3f2a0ec2 | 3,629,505 |
def invoke_cli(cli_runner):
""" invoking cli commands with options"""
return partial(cli_runner.invoke, cli) | e8811d3e5d640cd41b8f7de71cf190f9f7a72621 | 3,629,506 |
import uuid
def create_launch_template(client, name, image_ami_id,
iam_instance_profile_arn, user_data):
"""Create a launch template for provisioning instances
Args:
client (EC2.Client): boto3 ec2 client
name (str): the name of the launch template
image_ami_id (str): the ami id for the launched instances
iam_instance_profile_arn (str): ARN for for the Iam instance profile
to attach to launched instances
user_data (str): line break seperated commands to run on instance start
Returns:
object: launch template context object
"""
client_token = str(uuid.uuid4())
response = client.create_launch_template(
DryRun=False,
ClientToken=client_token,
LaunchTemplateName=name,
LaunchTemplateData={
'EbsOptimized': False,
'IamInstanceProfile': {
'Arn': iam_instance_profile_arn,
},
'ImageId': image_ami_id,
'Monitoring': {
'Enabled': True
},
'InstanceInitiatedShutdownBehavior': 'terminate',
'UserData': user_data,
'TagSpecifications': [
{
'ResourceType': 'instance',
'Tags': [
{
'Key': 'Name',
'Value': 'CBM3 Worker Instance'
},
]
},
{
'ResourceType': 'volume',
'Tags': [
{
'Key': 'Name',
'Value': 'CBM3 Worker volume'
},
]
},
]
},
TagSpecifications=[
{
'ResourceType': 'launch-template',
'Tags': [
{
'Key': 'name',
'Value': 'CBM3 launch template'
},
]
},
]
)
return Namespace(
launch_template_name=response["LaunchTemplate"]["LaunchTemplateName"],
launch_template_id=response["LaunchTemplate"]["LaunchTemplateId"]) | fc918d436500a04b854571ac4fcb699aa9edf623 | 3,629,507 |
def FlowGradedComplex(complex, discrete_flow):
"""
Overview:
Given a complex and a graph on its top dimensional cells,
produce a GradedComplex such that the preimage of a down set
is the collection of cells in the closure of all the
associated top cells
Inputs:
complex : a complex
flow_graph : a function from vertices to out-adjacent vertices
Algorithm:
Apply strongly connected components algorithm and determine
reachability relation among the strong components to learn
a poset. Associated to each poset vertex is a collection of
top cells.
"""
# Step 1. Compute the poset of strongly connected components
vertices = [ cell for cell in complex(complex.dimension())]
(dag, mapping) = CondensationGraph(vertices, discrete_flow)
#poset = Poset(dag)
# Step 2. Extend the mapping from top-cells to all cells
# Basic idea: since the component indexing furnishes a linear
# extension of the poset, we assign each cell to
# the minimum indexed poset which contains a top cell
# it is incident.
# for cell in reversed(range(0,len(complex))):
# current_value = mapping[cell]
# for bd_cell in complex.boundary({cell}):
# mapping[bd_cell] = min(mapping.get(bd_cell,current_value), current_value)
#num_nontop_cells = complex.size() - complex.size(complex.dimension())
#valuation = lambda x : min([mapping[z] for z in complex.star(x) if z >= num_nontop_cells])
grading = construct_grading(complex, lambda x : mapping[x] );
return dag, GradedComplex(complex, grading) | 90ecf9cce9a3814037099bfec049220d37c52673 | 3,629,508 |
from relevanceai.utils import make_id
import random
import string
from typing import List
from typing import Dict
def mock_documents(number_of_documents: int = 100, vector_length=5):
"""
Utility function to mock documents. Aimed at helping users reproduce errors
if required.
The schema for the documents is as follows:
.. code-block::
{'_chunk_': 'chunks',
'_chunk_.label': 'text',
'_chunk_.label_chunkvector_': {'chunkvector': 5},
'insert_date_': 'date',
'sample_1_description': 'text',
'sample_1_label': 'text',
'sample_1_value': 'numeric',
'sample_1_vector_': {'vector': 5},
'sample_2_description': 'text',
'sample_2_label': 'text',
'sample_2_value': 'numeric',
'sample_2_vector_': {'vector': 5},
'sample_3_description': 'text',
'sample_3_label': 'text',
'sample_3_value': 'numeric',
'sample_3_vector_': {'vector': 5}}
Parameters
------------
number_of_documents: int
The number of documents to mock
vector_length: int
The length of vectors
.. code-block::
from relevanceai.package_utils.datasets import mock_documents
documents = mock_documents(10)
"""
def generate_random_string(string_length: int = 5) -> str:
"""Generate a random string of letters and numbers"""
return "".join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(string_length)
)
def generate_random_vector(vector_length: int = vector_length) -> List[float]:
"""Generate a random list of floats"""
return [random.random() for _ in range(vector_length)]
def generate_random_label(label_value: int = 5) -> str:
return f"label_{random.randint(0, label_value)}"
def generate_random_integer(min: int = 0, max: int = 100) -> int:
return random.randint(min, max)
def vector_document() -> Dict:
document = {
"sample_1_label": generate_random_label(),
"sample_2_label": generate_random_label(),
"sample_3_label": generate_random_label(),
"sample_1_description": generate_random_string(),
"sample_2_description": generate_random_string(),
"sample_3_description": generate_random_string(),
"sample_1_vector_": generate_random_vector(),
"sample_2_vector_": generate_random_vector(),
"sample_3_vector_": generate_random_vector(),
"sample_1_value": generate_random_integer(),
"sample_2_value": generate_random_integer(),
"sample_3_value": generate_random_integer(),
"_chunk_": [
{
"label": generate_random_label(),
"label_chunkvector_": generate_random_vector(),
}
],
}
document["_id"] = make_id(document)
return document
return [vector_document() for _ in range(number_of_documents)] | 83ffbac02725b2f46f8308af01184e0bb46c7f4b | 3,629,509 |
import numpy
def rearrange(blist, flist):
"""Alligns the number of evaluations taken from the blist with the correpsning flist"""
final_b=[]
final_f=[]
for i in range(0,len(blist)): #runs over dimensions
erg_b = numpy.empty((0), float)
erg_f = [numpy.empty ((0), float), numpy.empty ((0), float), numpy.empty ((0), float)]
for j in range(0,len(blist[i])): #runs over function evaluations
erg_b=numpy.append(erg_b,blist[i][j])
erg_f[0]=numpy.append(erg_f[0],numpy.median(flist[i][j]))
erg_f[1]=numpy.append(erg_f[1],prctile(flist[i][j], [0.25]))
erg_f[2]=numpy.append(erg_f[2],prctile(flist[i][j], [0.75]))
final_b.append(erg_b)
final_f.append(erg_f)
return final_b, final_f | 1c8865ab3f65bc4fea34098fa1c718b134a2dfc0 | 3,629,510 |
def change_password_fields():
"""Return Change Password Fields"""
return f.Fields([
f.PasswordField('Old Password', v.required),
f.PasswordField('New Password', v.required),
f.PasswordField('Confirm New Password', v.required),
]) | 249b380da4b7b538456162b2049d9abcc80574c5 | 3,629,511 |
from dateutil import tz
def montage_stream(ims, montage_order=None, channel_order=[0, 1, 2],
clear_none=True):
"""From a sequence of single-channel field images, montage multichannels.
Suppose the input is a list:
```
ims = [green1a, blue1a, red1a, green1b, blue1b, red1b,
green2a, blue2a, red2a, green2b, blue2b, red2b]
```
with channel order ``[2, 0, 1]`` and montage order ``[1, 0]``, then
the output will be:
```
[rgb1_ba, rgb2_ba]
```
Parameters
----------
ims : iterator of array, shape (M, N)
A list of images in which consecutive images represent single
channels of the same image. (See example.)
montage_order : array-like of int, optional
The order of the montage images (in 1D or 2D).
channel_order : list of int, optional
The order in which the channels appear.
Returns
-------
montaged_stream : iterator of arrays
An iterator of the images composed into multi-channel montages.
Examples
--------
>>> images = (i * np.ones((4, 5), dtype=np.uint8) for i in range(24))
>>> montaged = list(montage_stream(images, [[0, 1], [2, 3]], [2, 0, 1]))
>>> len(montaged)
2
>>> montaged[0].shape
(8, 10, 3)
>>> montaged[0][0, 0, :]
array([2, 0, 1], dtype=uint8)
>>> montaged[0][4, 5, :]
array([11, 9, 10], dtype=uint8)
>>> montaged[1][4, 5, :]
array([23, 21, 22], dtype=uint8)
"""
if montage_order is None:
montage_order = cellomics.SPIRAL_CLOCKWISE_RIGHT_25
montage_order = np.array(montage_order)
ntiles = montage_order.size
if clear_none:
nchannels = len([i for i in channel_order if i is not None])
else:
nchannels = len(channel_order)
return tz.pipe(ims, c.partition(nchannels),
c.map(stack_channels(order=channel_order)),
c.partition(ntiles),
c.map(montage(order=montage_order))) | 0e2bc0307740ff673b2aeaa1a8e8bef32a75921f | 3,629,512 |
def load_and_prepare_cmd(filename,verbose=False): # (g, gr) = load_and_prepare_cmd('fieldA.csv')
"""
Loads in data. Returns pandas Series of g-r and r respectively.
"""
FIELD = pd.read_csv("fieldA.csv")
g = FIELD["g"] # probs slower than inital idea
gr = g - FIELD["r"]
mask = (g>14) & (g<24) & (gr>-0.5) & (gr<2.5)
if verbose:
print("Length of g and gr are {0:d} and {1:d} respectively".format(len(g),len(gr)))
return gr.where(mask), g.where(mask) | 995099663179c6d813f32117c9514011201df00a | 3,629,513 |
def make_rows(cngrs_prsn):
"""Output a list of dicitonaries for each JSON object representing a
congressperson.
Each individaul dictionary will contain information about the congressperson
as well as info about their term.
"""
name = cngrs_prsn["name"]["first"] + " " + cngrs_prsn["name"]["last"]
birthday = cngrs_prsn["bio"].get("birthday", None)
gender = cngrs_prsn["bio"]["gender"]
terms = cngrs_prsn["terms"]
rows = []
for t in terms:
row = {}
row["name"] = name
row["birthday"] = birthday
row["gender"] = gender
row["term_start"] = t["start"]
row["term_end"] = t["end"]
row["term_type"] = t["type"]
row["party"] = t.get("party") # Defaults to None
rows.append(row)
return rows | a80c55c3db1261a339ec08814c0f532efd35e45a | 3,629,514 |
def getNumProfileArgs(device):
""" Get the number of Power Profile fields for a specific device
Parameters:
device -- DRM device identifier
This varies per ASIC, so ensure that we get the right number of arguments
"""
profile = getSysfsValue(device, 'profile')
numHiddenFields = 0
if not profile:
return 0
# Get the 1st line (column names)
fields = profile.splitlines()[0]
# SMU7 has 2 hidden fields for SclkProfileEnable and MclkProfileEnable
if 'SCLK_UP_HYST' in fields:
numHiddenFields = 2
# If there is a CLOCK_TYPE category, that requires a value as well
if 'CLOCK_TYPE(NAME)' in fields:
numHiddenFields = 1
# Subtract 2 to remove NUM and MODE NAME, since they're not valid Profile fields
return len(fields.split()) - 2 + numHiddenFields | d4baa9936bc1d89a4fd3aca8f7aec4796d407bb7 | 3,629,515 |
def singlePass(A,Omega,k,s=1,check=False):
"""
The single pass algorithm for the Hermitian Eigenvalues Problems (HEP) as presented in [1].
Inputs:
- :code:`A`: the operator for which we need to estimate the dominant eigenpairs.
- :code:`Omega`: a random gassian matrix with :math:`m \\geq k` columns.
- :code:`k`: the number of eigenpairs to extract.
Outputs:
- :code:`d`: the estimate of the :math:`k` dominant eigenvalues of :math:`A`.
- :code:`U`: the estimate of the :math:`k` dominant eigenvectors of :math:`A,\\, U^T U = I_k`.
"""
nvec = Omega.nvec()
assert(nvec >= k )
Y_pr = MultiVector(Omega)
Y = MultiVector(Omega)
for i in range(s):
Y_pr.swap(Y)
MatMvMult(A, Y_pr, Y)
Q = MultiVector(Y)
Q.orthogonalize()
Zt = Y_pr.dot_mv(Q)
Wt = Y.dot_mv(Q)
Tt = np.linalg.solve(Zt, Wt)
T = .5*Tt + .5*Tt.T
d, V = np.linalg.eigh(T)
sort_perm = np.abs(d).argsort()
sort_perm = sort_perm[::-1]
d = d[sort_perm[0:k]]
V = V[:,sort_perm[0:k]]
U = MultiVector(Omega[0], k)
MvDSmatMult(Q, V, U)
if check:
check_std(A, U, d)
return d, U | 888f4337a7317a22c32bdb20b06d13f0aad4eb95 | 3,629,516 |
def _cons6_77(m6, L66, L67, d_byp, k, Cp, h_byp, dw1, kw1, dw2, kw2,
adiabatic_duct=False, conv_approx=False):
"""dz constrant for edge bypass sc touching 2 corner bypass sc"""
term1_out = 0.0
if not adiabatic_duct:
if conv_approx:
R2 = 1 / h_byp + dw2 / 2 / kw2
term1_out = L66 / m6 / Cp / R2 # conv / cond to duct 2 MW
else:
term1_out = h_byp * L66 / m6 / Cp # conv to outer duct
if conv_approx:
R1 = 1 / h_byp + dw1 / 2 / kw1
term1_in = L66 / m6 / Cp / R1 # conv / cond to duct 1 MW
else:
term1_in = h_byp * L66 / m6 / Cp
term2 = 2 * k * d_byp / m6 / Cp / L67 # cond to adj bypass corner
return 1 / (term1_in + term1_out + term2) | cedeaf3125454f4b73f082d43eeb7078a4b71412 | 3,629,517 |
def load_user(username):
"""Load user by usename."""
return User.get_by_username(username) | e037dc226ffff9bc900bc08e2fd323816b3eb7f3 | 3,629,518 |
def num_to_hex_string(num, size=1, little_endian=False):
"""Convert a given number to hex string.
Converts a number to a big endian hexstring of a suitable size, optionally little endian
Args:
num (int) : Input int for which we need to get the hex string
size (int) : The required size in bytes, eg 1 for Uint8, 2 for Uint16. Defaults to 1.
Returns:
(str)
"""
if num < 0:
raise Exception("num should be unsigned (>= 0)")
if size % 1 != 0:
raise TypeError("size must be a whole integer")
size = size * 2
hexstring = hex(num)[2:]
hexstr_len = len(hexstring)
output = (
hexstring if hexstr_len % size == 0 else ("0" * (size) + hexstring)[hexstr_len:]
)
if little_endian:
return reverse_hex(output)
return output | 44093d7b7372998398c360e66b2c2a9115f197de | 3,629,519 |
def validate(number, table=None):
"""Checks to see if the number provided passes the Damm algorithm."""
if not bool(number):
raise InvalidFormat()
try:
valid = checksum(number, table=table) == 0
except Exception:
raise InvalidFormat()
if not valid:
raise InvalidChecksum()
return number | 7d516863b2858a5ee922be5046a5ac3add3c7854 | 3,629,520 |
from typing import Optional
from typing import Sequence
def get_saliva_example(sample_times: Optional[Sequence[int]] = None) -> SalivaRawDataFrame:
"""Return saliva example data.
Parameters
----------
sample_times : list of int, optional
sample times of saliva samples in minutes
Returns
-------
:obj:`~biopsykit.utils.datatype_helper.SalivaRawDataFrame`
dataframe with example raw saliva data
"""
return load_saliva_wide_format(
_get_data("cortisol_sample.csv"),
saliva_type="cortisol",
condition_col="condition",
sample_times=sample_times,
) | 004724f81e3bec47bc8a3846567e2da41e27130b | 3,629,521 |
from typing import List
from typing import Union
def process_json_content(input_type: str,
content_array: List[str],
json_content_array: Union[List[str], None]):
"""
Process the array of json_contents
"""
if json_content_array is None:
return [''] * len(content_array)
if input_type == 'FRAME':
return list(map(process_frame_json_content, json_content_array))
return [element if is_url(element) else dumps(element) for element in json_content_array] | be7567f34d31790307c4db9e71e1ef4890bf080e | 3,629,522 |
from typing import Set
from typing import Tuple
from typing import FrozenSet
def knapsack(
items: Set[Tuple[float, float]], max_weight: float
) -> FrozenSet[Tuple[float, float]]:
"""Given a set of (value, weight) pairs and a maximum weight, return the
most valuable subset of items whose total weight does not exceed the
maximum weight.
Design idea: Enumerate all subsets of the items and pick the one with the
highest value that doesn't exceed the maximum weight.
Complexity: O(2^n) time, O(n) space.
"""
max_value = 0
max_value_set: FrozenSet[Tuple[float, float]] = frozenset()
for ss in subsets(items):
weight = sum(item[1] for item in ss)
value = sum(item[0] for item in ss)
if weight > max_weight:
continue
if value > max_value:
max_value = value
max_value_set = ss
return max_value_set | f0bd51643da1f1cea24116e95fb4ea25e3d6dde0 | 3,629,523 |
import json
def get_json(query_parsed):
"""Call taskwarrior, returning objects from json"""
result, err = call_taskwarrior(
'export %s rc.json.array=on rc.verbose=nothing' % query_parsed)
return json.loads(result) | c0537b3a7ca70839543e7867049bee8926f1da2c | 3,629,524 |
def get_list_of_results(results):
"""Modify the outputs so that they are returned in a list format where it is
sometimes easier to be used by other functions.
Parameters
----------
results : list
A list of named tuples for each iteration
Returns
-------
list, list, list
Three lists that include all waits, services and blocks of all runs of
all individuals
"""
all_waits = [w.waiting_times for w in results]
all_services = [s.service_times for s in results]
all_blocks = [b.blocking_times for b in results]
all_props = [p.proportion_within_target for p in results]
return all_waits, all_services, all_blocks, all_props | b5903e3b99aeb37ce90190e86a7cd6e2408ad35b | 3,629,525 |
def get_dagger_of_native(gate: Gate) -> Gate:
"""
:param gate: A gate from native gate set
:return: the conjugated and transposed gate
"""
if isinstance(gate, Gate):
if gate.name == "RZ":
return RZ(-gate.params[0], gate.qubits[0])
if gate.name == "RX":
return RX(-gate.params[0], gate.qubits[0])
if gate.name == "CZ":
return CZ(*gate.qubits)
raise ValueError("Unsupported gate: " + str(gate)) | 4cc765c9deda05b7bec2811614d801b9cbd50836 | 3,629,526 |
from typing import List
def get_movies_list(request_params: dict) -> List[ShortMovie]:
"""
:param request_params: Get request query params
:return: List of movies, grabed with ElasticSearch
"""
sort_value = request_params.get('sort')
sort_order = request_params.get('sort_order')
limit = int(request_params.get('limit'))
page = int(request_params.get('page'))
if sort_value == SortField.TITLE.value:
sort_value = f'{sort_value}.raw' # Format .raw for ElasticSearch
request_data = {
'size': limit,
'from': (page - 1) * limit, # Cannot be more than 10.000
'sort': [
{
sort_value: sort_order
}
],
'_source': ['id', 'title', 'imdb_rating'],
}
# Parse string query and create request for the ElasticSearch
search_query = request_params.get('search')
if search_query:
request_data['query'] = {
'multi_match': {
'query': search_query,
'fuzziness': 'auto',
'fields': [
'title^5',
'description^4',
'genre^3',
'actors_names^3',
'writers_names^2',
'director'
]
}
}
response = es_requests.get('movies', request_data)
if not response.ok:
response.raise_for_status()
data = response.json()
result = data['hits']['hits']
movies = []
for record in result:
movie_raw = record.get('_source')
movies.append(ShortMovie(
id=movie_raw.get('id'),
title=movie_raw.get('title'),
imdb_rating=movie_raw.get('imdb_rating')
))
return movies | f0704aee0b29f661bb4c7f820db6c500d633aefb | 3,629,527 |
def statUniq(passwords, status):
"""produce data about unicity stats"""
unicity = {"empty":0, "non empty": 0, "unique": 0}
unicity['empty'] = passwords[status].count('')
unicity['non empty'] = len( passwords[status] ) - unicity['empty']
unicity['unique'] = len( set( passwords[status] ))
return unicity | 645e20c4dceeb1ee7dee028776709ec739e8a6e0 | 3,629,528 |
import re
def _read_logo(content):
""" Read info from logo in file header. """
def _read_logo(pat):
pattern = pat + r":\s+\S+"
data_str = re.compile(pattern).search(content).group()
return data_str.split(':')[1].strip()
info = {}
for pat in ['Version', 'Website']:
info[pat] = _read_logo(pat)
return info | e5ed2adb67c42854a3889dd823de6a3517cf1bad | 3,629,529 |
def modulate_position(timestamp):
"""
counts the position in time-sorted log of IP activity as based on the timestamp attached to
the particular log in rdd
timestamp: attached timestamp
"""
result = (INCREMENT - timestamp) % time_dimension
return result | f123acb1dd4924151a293789f3a3432f9000541e | 3,629,530 |
def resnet50(mask_init='1s', mask_scale=1e-2, threshold_fn='binarizer', **kwargs):
"""Constructs a ResNet-50 model."""
#print('resnet50 get in')
#print('resnet50 mask_init', mask_init)
model = ResNet(Bottleneck, [3, 4, 6, 3], mask_init,
mask_scale, threshold_fn, **kwargs)
return model | 4713b7b27408e8a80d5603db3b4875cb88321248 | 3,629,531 |
import ctypes
def create_channel(pvname, connect=False, auto_cb=True, callback=None):
""" create a Channel for a given pvname
creates a channel, returning the Channel ID ``chid`` used by other
functions to identify this channel.
Parameters
----------
pvname : string
the name of the PV for which a channel should be created.
connect : bool
whether to (try to) connect to PV as soon as possible.
auto_cb : bool
whether to automatically use an internal connection callback.
callback : callable or ``None``
user-defined Python function to be called when the connection
state change s.
Returns
-------
chid : ctypes.c_long
channel ID.
Notes
-----
1. The user-defined connection callback function should be prepared to accept
keyword arguments of
=========== =============================
keyword meaning
=========== =============================
`pvname` name of PV
`chid` Channel ID
`conn` whether channel is connected
=========== =============================
2. If `auto_cb` is ``True``, an internal connection callback is used so
that you should not need to explicitly connect to a channel, unless you
are having difficulty with dropped connections.
3. If the channel is already connected for the PV name, the callback
will be called immediately.
"""
# Note that _CB_CONNECT (defined above) is a global variable, holding
# a reference to _onConnectionEvent: This is really the connection
# callback that is run -- the callack here is stored in the _cache
# and called by _onConnectionEvent.
context_cache = _cache[current_context()]
# {}.setdefault is an atomic operation, so we are guaranteed to never
# create the same channel twice here:
with context_cache.setdefault(pvname, _SentinelWithLock()).lock:
# Grab the entry again from the cache. Between the time the lock was
# attempted and acquired, the cache may have changed.
entry = context_cache[pvname]
is_new_channel = isinstance(entry, _SentinelWithLock)
if is_new_channel:
callbacks = [callback] if callable(callback) else None
entry = _CacheItem(chid=None, pvname=pvname, callbacks=callbacks)
context_cache[pvname] = entry
chid = dbr.chid_t()
with entry.lock:
ret = libca.ca_create_channel(
ctypes.c_char_p(str2bytes(pvname)), _CB_CONNECT, 0, 0,
ctypes.byref(chid)
)
PySEVCHK('create_channel', ret)
entry.chid = chid
_chid_cache[chid.value] = entry
if (not is_new_channel and callable(callback) and
callback not in entry.callbacks):
entry.callbacks.append(callback)
if entry.chid is not None and entry.conn:
# Run the connection callback if already connected:
callback(chid=_chid_to_int(entry.chid), pvname=pvname,
conn=entry.conn)
if connect:
connect_channel(entry.chid)
return entry.chid | efc02a366873b5a912e951c73aa864b59f999b77 | 3,629,532 |
import subprocess
def fetch_profiles(db_path, keys_ls):
"""Fetch hmm profiles from db and save in a file
Args:
db_path: String, path where db are stored
keys_ls: String, Path to file with acc-nr
Return:
ls_keys: List, strings with acc-numbers
"""
LOG.info("Fetching profiles from Pfam-A file")
ls_keys = get_full_accession_number(db_path, keys_ls)
if not ls_keys:
LOG.error("No valid profiles could be selected")
else:
for key in ls_keys:
command_fetch_profile = "hmmfetch -o {} {} {}".format(db_path +
key + ".hmm", db_path + "Pfam-A.hmm.gz", key)
subprocess.run(command_fetch_profile, stdout=subprocess.PIPE,
shell=True)
LOG.info("Profiles found: %s", ls_keys)
return ls_keys | 89ded6a60b46247587180d151caeb66987cb5156 | 3,629,533 |
import sys
def get_w_cmd_args():
"""Either -his or -wnh depending on system."""
try:
if (str(sys.platform).lower().startswith(str("""darwin""")) is True):
return str("""-hi""")
else:
return str("""-his""")
except Exception as someErr:
logs.log(str(type(someErr)), "Error")
logs.log(str(someErr), "Error")
logs.log(str((someErr.args)), "Error")
return str("""-h""") | 783e832df273d7204bed10c78e24c2d8923eed60 | 3,629,534 |
def prep_for_graph(data_frame, series=None, delta_series=None, smoothing=None,
outlier_stddev=None):
"""Prepare a dataframe for graphing by calculating deltas for
series that need them, resampling, and removing outliers.
"""
series = series or []
delta_series = delta_series or []
graph = calc_deltas(data_frame, delta_series)
for s in series + delta_series:
if smoothing:
graph[s] = graph[s].resample(smoothing)
if outlier_stddev:
graph[s] = remove_outliers(graph[s], outlier_stddev)
return graph[series + delta_series] | 409fa6b806ce2c6546390e41ae88226b5000a8d8 | 3,629,535 |
def vigenere_decryption(text: str, key: str) -> str:
"""Декодирование шифра Виженера
:param text: расшифровываемый текст
:type text: str
:param key: ключ
:type key: str
:return: исходный текст
:rtype: str
"""
result = ''.join(
[chr((ord(m) - ord(key[i % len(key)]) + 26) % 26+97) for i, m in enumerate(text)]
)
return result | 6ad2277d1060eab48481749023e40e11eb3590ea | 3,629,536 |
def remove_diacritics(string):
"""Removes diacritics from the given string
Parameters
----------
string : str
The string from which diacritics should be removed
Returns
-------
string : str
The string with its diacritics removed
"""
uni = unidecode(string)
# r = uni.encode("ascii")
return uni | bc6330b32f583b888bfebdeb29993d0a79cae44a | 3,629,537 |
def eval_chip_generalized(theta1, theta2, deltaphi, q, chi1, chi2):
"""
Generalized definition of the effective precessing spin chip, see arxiv:2011.11948. This definition retains all variations on the precession timescale.
Call
----
chip = eval_chip_generalized(theta1,theta2,deltaphi,q,chi1,chi2)
Parameters
----------
theta1: float
Angle between orbital angular momentum and primary spin.
theta2: float
Angle between orbital angular momentum and secondary spin.
deltaphi: float
Angle between the projections of the two spins onto the orbital plane.
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
chip: float
Effective precessing spin chip.
"""
term1, term2 = chip_terms(theta1, theta2, q, chi1, chi2)
chip = (term1**2 + term2**2 + 2*term1*term2*np.cos(deltaphi))**0.5
return chip | bb359afb843d921d216dac69524e43e2ac5807a2 | 3,629,538 |
import re
def strip_emails(s):
"""
Remove digits from `s` using RE_EMAILS`.
"""
RE_EMAILS = re.compile(r"\S*@\S*\s?", re.UNICODE)
return RE_EMAILS.sub("", s) | 7c9a705023f1d5d821d002815f629bd7ebff8602 | 3,629,539 |
def get_hf(sigma_val=0.8228, boxRedshift=0., delta_wrt='mean'):
"""
Halo mass function model for the MultiDark simulation.
"""
#hf0 = MassFunction(cosmo_model=cosmo, sigma_8=sigma_val, z=boxRedshift)
omega = lambda zz: cosmoMD.Om0*(1+zz)**3. / cosmoMD.efunc(zz)**2
DeltaVir_bn98 = lambda zz : (18.*n.pi**2. + 82.*(omega(zz)-1)- 39.*(omega(zz)-1)**2.)/omega(zz)
print("DeltaVir", DeltaVir_bn98(boxRedshift), " at z",boxRedshift )
hf1 = MassFunction(cosmo_model=cosmoMD, sigma_8=sigma_val, z=boxRedshift, delta_h=DeltaVir_bn98(boxRedshift), delta_wrt=delta_wrt, Mmin=7, Mmax=16.5)
return hf1 | d90af3c653b62e44c21203ad9c1c48a98abed195 | 3,629,540 |
def reverse_one_hot(image):
"""
Transform a 2D array in one-hot format (depth is num_classes),
to a 2D array with only 1 channel, where each pixel value is
the classified class key.
# Arguments
image: The one-hot format image
# Returns
A 2D array with the same width and hieght as the input, but
with a depth size of 1, where each pixel value is the classified
class key.
"""
x = np.argmax(image, axis = 1)
x = np.expand_dims(x, axis=1)
print("x shape=", x.shape)
#x = torch.argmax(image, axis = 1)
return x | f4d1f06ad44926ae09a9ecce03dd1e55b4e798f0 | 3,629,541 |
def individual_utilities(
persons,
cdap_indiv_spec,
locals_d,
trace_hh_id=None, trace_label=None):
"""
Calculate CDAP utilities for all individuals.
Parameters
----------
persons : pandas.DataFrame
DataFrame of individual persons data.
cdap_indiv_spec : pandas.DataFrame
CDAP spec applied to individuals.
Returns
-------
utilities : pandas.DataFrame
Will have index of `persons` and columns for each of the alternatives.
plus some 'useful columns' [_hh_id_, _ptype_, 'cdap_rank', _hh_size_]
"""
# calculate single person utilities
indiv_utils = simulate.eval_utilities(cdap_indiv_spec, persons, locals_d, trace_label=trace_label)
# add columns from persons to facilitate building household interactions
useful_columns = [_hh_id_, _ptype_, 'cdap_rank', _hh_size_]
indiv_utils[useful_columns] = persons[useful_columns]
if trace_hh_id:
tracing.trace_df(indiv_utils, '%s.indiv_utils' % trace_label,
column_labels=['activity', 'person'])
return indiv_utils | 593e4b102755218476651101fa82c47f648e2cf1 | 3,629,542 |
def _create_pd_obj(frame, res, RATIO, mode):
"""Creates a prediction objects file."""
objs = []
# This is only needed for 2D detection or tracking tasks.
# Set it to the camera name the prediction is for.
if len(res) > 0:
for re in res:
# import ipdb; ipdb.set_trace()
re = re.split(",")
obj = metrics_pb2.Object()
obj.context_name = frame['context_name']
invalid_ts = -1
obj.frame_timestamp_micros = frame['timestamp_micros']
obj.camera_name = CAM_TYPES[frame['camera_type']]
# Populating box and score.
cx,cy,w,h = [float(x) for x in re[3:7]]
if mode == 'track':
track_id = "{}_{}".format(frame['camera_type'],re[1])
else:
track_id = str(0)
if isinstance(RATIO,tuple):
cx,cy,w,h = cx/RATIO[0], cy/RATIO[1], w/RATIO[0], h/RATIO[1]
else:
cx,cy,w,h = cx/RATIO, cy/RATIO, w/RATIO, h/RATIO
box = label_pb2.Label.Box()
box.center_x = cx
box.center_y = cy
box.center_z = 0
box.length = w
box.width = h
box.height = 0
box.heading = 0
obj.object.box.CopyFrom(box)
# This must be within [0.0, 1.0]. It is better to filter those boxes with
# small scores to speed up metrics computation.
obj.score = float(re[-2])
# For tracking, this must be set and it must be unique for each tracked
# sequence.
obj.object.id = track_id #'unique object tracking ID'
# Use correct type.
obj.object.type = CAT_TYPES[int(float(re[2]))]
objs.append(obj)
else:
obj = metrics_pb2.Object()
obj.context_name = (frame['context_name'])
invalid_ts = -1
obj.frame_timestamp_micros = frame['timestamp_micros']
obj.camera_name = CAM_TYPES[frame['camera_type']]
# Populating box and score.
box = label_pb2.Label.Box()
box.center_x = 0
box.center_y = 0
box.center_z = 0
box.length = 0
box.width = 0
box.height = 0
box.heading = 0
obj.object.box.CopyFrom(box)
# This must be within [0.0, 1.0]. It is better to filter those boxes with
# small scores to speed up metrics computation.
obj.score = float(0.0)
# For tracking, this must be set and it must be unique for each tracked
# sequence.
obj.object.id = str(0) #'unique object tracking ID'
# Use correct type.
obj.object.type = int(0)
# Add more objects. Note that a reasonable detector should limit its maximum
# number of boxes predicted per frame. A reasonable value is around 400. A
# huge number of boxes can slow down metrics computation.
# Write objects to a file.
objs.append(obj)
return objs | 6effd5af9f4e0da364623eb46a88896dae6ba2a4 | 3,629,543 |
import numbers
def check_random_state(seed):
"""Turn `seed` into a `np.random.RandomState` instance.
Parameters
----------
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
seed : {`numpy.random.Generator`, `numpy.random.RandomState`}
Random number generator.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
try:
# Generator is only available in numpy >= 1.17
if isinstance(seed, np.random.Generator):
return seed
except AttributeError:
pass
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed) | 31d9435a277174dc6bc033bc1f37b0b5de02078b | 3,629,544 |
import argparse
def init_argument_parser():
""" Creates an argument parser and adds specifications for command-line
arguments for the expression code generation program.
Returns:
arg_parser : An initialized ArgumentParser object with properties
lang, dest, cname, exprfile
"""
arg_parser = argparse.ArgumentParser(description=DESCRIPTION)
# Optional arguments
arg_parser.add_argument(
"--config", "-c",
type=str,
default="",
help="The file path to the json file containing configuration for "
"code generation, such as package name, class name, language, etc."
" Note that the configuration parameters specified in the command "
"line have higher priority than those in the configuration file "
"(which means they can override what are specified in the "
"configuration file)."
)
arg_parser.add_argument(
"--cname", "-n",
type=str,
default="",
help="The name of the class that encapsulates generated code. "
"By default, the name if the expression specification file will "
"be used for the class name, with the first character is made "
"to be uppercase"
)
arg_parser.add_argument(
"--dest", "-d",
type=str,
default="",
help="Destination directory that holds the generated code files. "
"By default, the current directory is used."
)
arg_parser.add_argument(
"--lang", "-l",
type=str,
default="",
help="Programming language that the generated code is in. "
"The default language is Java."
)
arg_parser.add_argument(
"--nohessian",
action="store_true",
default=False,
help="Flag to turn off code generation for Hessian matrix"
)
# Positional arguments
arg_parser.add_argument(
"exprfile", type=str,
help="The expression specification file name or path."
)
return arg_parser | baf96d3cb3aebcba00102b46c6e6d5d2345a91b4 | 3,629,545 |
import typing
def serialize(message: typing.Union[structure.Call, structure.CallResult, structure.CallError]) -> typing.List:
"""Serializes an 'OCPPMessage'.
Args:
- message: 'OCPPMessage', the message to serialize
Returns:
list, an equivalent to the message, based only on JSON compatible types (string, integer, list, dict, etc.)
Raises:
- errors.TypeConstraintViolationError
- errors.PropertyConstraintViolationError
"""
# Build the base of the message to serialize. Iterate over the dataclass' fields to get them in order, ignore
# 'payload' field that needs to be serialized recursively
ocpp_msg = [
serialize_field(field, getattr(message, field.name))
for field in fields(message) if field.name != 'payload'
]
if isinstance(message, (structure.Call, structure.CallResult)):
ocpp_msg.append(serialize_fields(message.payload))
return ocpp_msg | a1cc6f966528fb156f825bd87b9c765b9b8783a0 | 3,629,546 |
from typing import Sequence
def sequence_plot(units: Sequence[Unit]):
"""Plot the temperatures of all profiles"""
fig, ax = utils.create_sequence_plot(units)
ax.set_ylabel(r"temperature $T$")
ax.set_title("Mean Profile Temperatures")
units = list(units)
if len(units) > 0:
def gen_seq():
yield -0.5, units[0].in_profile.temperature
for i, u in enumerate(units):
yield i + 0.5, u.out_profile.temperature
x, y = np.transpose(
list(gen_seq())
)
ax.plot(x, y, marker="x")
return fig | 73b7d5b7a0653deb5108b75398102b2e4ae44e84 | 3,629,547 |
def format_data(data):
"""
:param data:
:return: str : this str can print to web page.
"""
return "<pre>" + change_to_str(data, rowstr="<br/>") + "</pre>" | 312cf4a1aaece4cc4921f06ed6b85bdde7ad6955 | 3,629,548 |
def DAYS(date1, date2):
"""Given two strings of dates, calculate the difference in days between the two.
Parameters
----------
date1 : string
start date in the form of a string: 'YYYY-MM-DD'
date2 : string
end date in the form of a string: 'YYYY-MM-DD'
Returns
-------
int
An integer with the difference between the start date and the end date.
"""
if(type(date1) == str and type(date2) == str):
if(len(date1.split('-')) == 3 and len(date2.split('-')) == 3):
date1_split = date1.split('-')
date1 = dt.date(int(date1_split[0]), int(date1_split[1]), int(date1_split[2]))
date2_split = date2.split('-')
date2 = dt.date(int(date2_split[0]), int(date2_split[1]), int(date2_split[2]))
diff = date2 - date1
return(diff.days)
else:
print('Invalid string: did you enter the dates in the proper YYYY-MM-DD format?')
else:
print('Invalid type: passed arguments other than strings.') | a7401fe0230f49ec864a0e3549f823e19c9879c0 | 3,629,549 |
from typing import List
def __get_column_names_from_worksheet(worksheet: Worksheet) -> List[NumberOrString]:
"""Returns list of column names (headers) from worksheet object (i.e; the values from row #1)"""
column_names = [
column.value for column in next(worksheet.iter_rows(min_row=1, max_row=1))
]
return column_names | 3653091b303a0daa512ca3fc3e2bd4aebe4342f6 | 3,629,550 |
import os
import math
def plot_set(fig_num: int, to_goal_arr: list, legend_names: list, max_len: float, max_non_init_rmsd: float,
init_metr: float, bsf_arr: list, common_point: float, max_trav: float, trav_arr: list, full_cut: str,
metric: str, metr_units: str, same: str, custom_path: str, shrink: bool, non_shrink_arr: list = None) -> int:
"""
Args:
:param int fig_num:
:param list to_goal_arr:
:param list legend_names:
:param float max_len:
:param float max_non_init_rmsd:
:param float init_metr:
:param float list bsf_arr:
:param float common_point:
:param float max_trav:
:param list trav_arr:
:param str full_cut:
:param str metric:
:param str metr_units:
:param str same:
:param str custom_path:
:param bool shrink:
:param list non_shrink_arr:
Returns:
:return: fig number
:rtype: int
"""
# # #### SHRINK
# ax_prop = {"min_lim_x": -max_len/80, "max_lim_x": max_len+max_len/80, "min_lim_y": 0, "max_lim_y": max_non_init_rmsd+max_non_init_rmsd/80, "min_ax_x": 0, "max_ax_x": max_len+max_len/80, "min_ax_y": 0, "max_ax_y": max_non_init_rmsd+max_non_init_rmsd/80, "ax_step_x": max_len/16, "ax_step_y": max_non_init_rmsd/20}
# extra_line = {"ax_type": 'hor', "val": init_rmsd, "name": "init {} ({:3.2f} {})".format(metric, init_rmsd, metr_units)}
# fig_num = single_plot(fig_num, ax_prop, to_goal_arr, None, legend_names, '.', 0.3, bsf=False, rev=False, extra_line=extra_line, xlab="steps (20ps each)", ylab="to goal, A", title="{} | to goal vs traveled | {} | {}".format(metric, full_cut, same), filename="{}_to_goal_vs_traveled_{}_{}".format(metric, full_cut, same)) # to goal vs traveled | cut
#
# ax_prop = {"min_lim_x": -max_len/80, "max_lim_x": max_len+max_len/80, "min_lim_y": 0, "max_lim_y": max_non_init_rmsd+max_non_init_rmsd/80, "min_ax_x": 0, "max_ax_x": max_len+max_len/80, "min_ax_y": 0, "max_ax_y": max_non_init_rmsd+max_non_init_rmsd/80, "ax_step_x": max_len/16, "ax_step_y": max_non_init_rmsd/20}
# extra_line = {"ax_type": 'hor', "val": init_rmsd, "name": "init {} ({:3.2f} {})".format(metric, init_rmsd, metr_units)}
# fig_num = single_plot(fig_num, ax_prop, bsf_arr, None, legend_names, '-', 1, bsf=True, rev=False, extra_line=extra_line, xlab="steps (20ps each)", ylab="steps", title="{} | to goal vs best_so_far | {} | {}".format(metric, full_cut, same), filename="{}_to_goal_vs_best_so_far_{}_{}".format(metric, full_cut, same)) # to goal vs best_so_far | cut
#
# ax_prop = {"min_lim_x": max_non_init_rmsd, "max_lim_x": common_point-common_point/10, "min_lim_y": -max_len/80, "max_lim_y": max_len+max_len/80, "min_ax_x": common_point, "max_ax_x": max_non_init_rmsd, "min_ax_y": 0, "max_ax_y": max_len+max_len/80, "ax_step_x": (max_non_init_rmsd-common_point)/16, "ax_step_y": max_len/20}
# extra_line = {"ax_type": 'ver', "val": init_rmsd, "name": "init {} ({:3.2f} {})".format(metric, init_rmsd, metr_units)}
# fig_num = single_plot(fig_num, ax_prop, bsf_arr, None, legend_names, '-', 1, bsf=True, rev=True, extra_line=extra_line, xlab="to goal, A", ylab="steps", title="{} | best_so_far vs steps | {} | {}".format(metric, full_cut, same), filename="{}_best_so_far_vs_steps_{}_{}".format(metric, full_cut, same)) # best_so_far vs steps | cut
# #### NO SHRINK
custom_path = custom_path+'shrink' if shrink else custom_path+'unshrink'
try:
os.mkdir(custom_path)
except:
pass
ax_prop = {"min_lim_x": -max_len/80, "max_lim_x": max_len+max_len/80, "min_lim_y": 0, "max_lim_y": max_non_init_rmsd+max_non_init_rmsd/80,
"min_ax_x": 0, "max_ax_x": max_len+max_len/80, "min_ax_y": 0, "max_ax_y": max_non_init_rmsd+max_non_init_rmsd/80, "ax_step_x": math.floor(max_len/16), "ax_step_y": max_non_init_rmsd/20}
if metr_units == 'contacts':
extra_line = [
{"ax_type": 'hor', "val": init_metr, "name": "Initial {} metric ({} {})".format(metric.upper(), int(init_metr), metr_units), "col": "darkmagenta"},
{"ax_type": 'hor', "val": min(min(elem) for elem in to_goal_arr), "name": "The lowest {} metric ({} {})".format(metric.upper(), int(min(min(elem) for elem in to_goal_arr)), metr_units), "col": "darkgreen"}]
else:
extra_line = [
{"ax_type": 'hor', "val": init_metr, "name": "Initial {} metric ({:3.2f} {})".format(metric.upper(), init_metr, metr_units), "col": "darkmagenta"},
{"ax_type": 'hor', "val": min(min(elem) for elem in to_goal_arr), "name": "The lowest {} metric ({:3.2f} {})".format(metric.upper(), min(min(elem) for elem in to_goal_arr), metr_units), "col": "darkgreen"}]
if metric == 'rmsd':
extra_line.append({"ax_type": 'hor', "val": 2.7, "name": "Typical folding mark (2.7 {})".format(metr_units), "col": "midnightblue"})
title = "{} | to goal vs traveled | {} | {} | {}".format(metric, full_cut, same, 'shrink' if shrink else 'unshrink')
filename = "{}_to_goal_vs_traveled_{}_{}_{}".format(metric, full_cut, same, 'shrink' if shrink else 'unshrink')
filename = os.path.join(custom_path, filename)
fig_num = single_plot(fig_num, ax_prop, to_goal_arr, non_shrink_arr, legend_names.copy(), '.', 0.3, bsf=False, rev=False, extra_line=extra_line, shrink=shrink, xlab="Steps (20ps each)", ylab="Distance to the goal, {}".format(metr_units), title=title, filename=filename) # to goal vs traveled | cut
for i in range(len(to_goal_arr)):
ff = legend_names[i].split('with')[1].split('ff')[0].strip()
title = "{} | to goal vs traveled | {} | {} | {} | {}".format(metric, full_cut, same, 'shrink' if shrink else 'unshrink', ff)
filename = "{}_to_goal_vs_traveled_{}_{}_{}_{}".format(metric, full_cut, same, 'shrink' if shrink else 'unshrink', ff)
filename = os.path.join(custom_path, filename)
extra_line[1]["val"] = min(to_goal_arr[i])
if metr_units == 'contacts':
extra_line[1]["name"] = "The lowest {} metric ({} {})".format(metric.upper(), int(min(to_goal_arr[i])), metr_units)
else:
extra_line[1]["name"] = "The lowest {} metric ({:3.2f} {})".format(metric.upper(), min(to_goal_arr[i]), metr_units)
fig_num = single_plot(fig_num, ax_prop, [to_goal_arr[i],], [non_shrink_arr[i],] if non_shrink_arr is not None else None, [legend_names[i],].copy(), '.', 0.3, bsf=False, rev=False, extra_line=extra_line, shrink=shrink, xlab="Steps (20ps each)",
ylab="Distance to the goal, {}".format(metr_units), title=title, filename=filename) # to goal vs traveled | cut
if shrink:
ax_prop = {"min_lim_x": max_non_init_rmsd, "max_lim_x": common_point-common_point/20, "min_lim_y": -max_trav/80, "max_lim_y": max_trav+max_trav/80,
"min_ax_x": common_point, "max_ax_x": max_non_init_rmsd, "min_ax_y": 0, "max_ax_y": max_trav+max_trav/80, "ax_step_x": (max_non_init_rmsd-common_point)/20, "ax_step_y": max_trav/20}
if metr_units == 'contacts':
extra_line = [
{"ax_type": 'ver', "val": init_metr, "name": "Initial {} metric ({} {})".format(metric.upper(), int(init_metr), metr_units), "col": "darkmagenta"},
{"ax_type": 'ver', "val": min(min(elem) for elem in to_goal_arr), "name": "The lowest {} metric ({} {})".format(metric.upper(), int(min(min(elem) for elem in to_goal_arr)), metr_units), "col": "darkgreen"}]
else:
extra_line = [
{"ax_type": 'ver', "val": init_metr, "name": "Initial {} metric ({:3.2f} {})".format(metric.upper(), init_metr, metr_units), "col": "darkmagenta"},
{"ax_type": 'ver', "val": min(min(elem) for elem in to_goal_arr), "name": "The lowest {} metric ({:3.2f} {})".format(metric.upper(), min(min(elem) for elem in to_goal_arr), metr_units), "col": "darkgreen"}]
if metric == 'rmsd':
extra_line.append({"ax_type": 'hor', "val": 2.7, "name": "Typical folding mark (2.7 {})".format(metr_units), "col": "midnightblue"})
title = "{} | traveled vs to_goal | {} | {} | {}".format(metric, full_cut, same, 'shrink' if shrink else 'unshrink')
filename = "{}_traveled_vs_to_goal_{}_{}_{}".format(metric, full_cut, same, 'shrink' if shrink else 'unshrink')
filename = os.path.join(custom_path, filename)
fig_num = single_plot(fig_num, ax_prop, to_goal_arr, trav_arr, legend_names.copy(), '.', 1, bsf=False, rev=True, extra_line=extra_line, shrink=shrink, xlab="Distance to the goal, {}".format(metr_units), ylab="Past dist, {}".format(metr_units), title=title, filename=filename) # traveled vs to_goal | cut
for i in range(len(to_goal_arr)):
ff = legend_names[i].split('with')[1].split('ff')[0].strip()
title = "{} | traveled vs to_goal | {} | {} | {} | {}".format(metric, full_cut, same, 'shrink' if shrink else 'unshrink', ff)
filename = "{}_traveled_vs_to_goal_{}_{}_{}_{}".format(metric, full_cut, same, 'shrink' if shrink else 'unshrink', ff)
filename = os.path.join(custom_path, filename)
extra_line[1]["val"] = min(to_goal_arr[i])
if metr_units == 'contacts':
extra_line[1]["name"] = "The lowest {} metric ({} {})".format(metric.upper(), int(min(to_goal_arr[i])), metr_units)
else:
extra_line[1]["name"] = "The lowest {} metric ({:3.2f} {})".format(metric.upper(), min(to_goal_arr[i]), metr_units)
fig_num = single_plot(fig_num, ax_prop, [to_goal_arr[i],], [trav_arr[i],], [legend_names[i],].copy(), '.', 1, bsf=False, rev=True, extra_line=extra_line, shrink=shrink,
xlab="Distance to the goal, {}".format(metr_units), ylab="Past dist, {}".format(metr_units), title=title, filename=filename) # traveled vs to_goal | cut
if not shrink:
for i in range(len(non_shrink_arr)):
non_shrink_arr[i].insert(0, 0)
ax_prop = {"min_lim_x": -max_len / 80, "max_lim_x": max_len + max_len / 80, "min_lim_y": 0, "max_lim_y": init_metr + init_metr / 80, # max_non_init_rmsd + max_non_init_rmsd / 80,
"min_ax_x": 0, "max_ax_x": max_len + max_len / 80, "min_ax_y": 0, "max_ax_y": init_metr + init_metr / 80, "ax_step_x": math.floor(max_len / 16), "ax_step_y": init_metr / 20}
if metr_units == 'contacts':
extra_line = [
{"ax_type": 'hor', "val": init_metr, "name": "Initial {} metric ({} {})".format(metric.upper(), int(init_metr), metr_units), "col": "darkmagenta"},
{"ax_type": 'hor', "val": min(min(elem) for elem in bsf_arr), "name": "The lowest {} metric ({} {})".format(metric.upper(), int(min(min(elem) for elem in bsf_arr)), metr_units), "col": "darkgreen"}]
else:
extra_line = [
{"ax_type": 'hor', "val": init_metr, "name": "Initial {} metric ({:3.2f} {})".format(metric.upper(), init_metr, metr_units), "col": "darkmagenta"},
{"ax_type": 'hor', "val": min(min(elem) for elem in bsf_arr), "name": "The lowest {} metric ({:3.2f} {})".format(metric.upper(), min(min(elem) for elem in bsf_arr), metr_units), "col": "darkgreen"}]
if metric == 'rmsd':
extra_line.append({"ax_type": 'hor', "val": 2.7, "name": "Typical folding mark (2.7 {})".format(metr_units), "col": "midnightblue"})
title = "{} | to goal vs best_so_far | {} | {} | {}".format(metric, full_cut, same, 'shrink' if shrink else 'unshrink')
filename = "{}_to_goal_vs_best_so_far_{}_{}_{}".format(metric, full_cut, same, 'shrink' if shrink else 'unshrink')
filename = os.path.join(custom_path, filename)
fig_num = single_plot(fig_num, ax_prop, bsf_arr, non_shrink_arr, legend_names.copy(), '-', 1, bsf=True, rev=False, extra_line=extra_line, shrink=shrink, xlab="Steps (20ps each)", ylab="Distance to the goal, {}".format(metr_units), title=title, filename=filename) # to goal vs best_so_far | cut
for i in range(len(bsf_arr)):
ff = legend_names[i].split('with')[1].split('ff')[0].strip()
title = "{} | to goal vs best_so_far | {} | {} | {} | {}".format(metric, full_cut, same, 'shrink' if shrink else 'unshrink', ff)
filename = "{}_to_goal_vs_best_so_far_{}_{}_{}_{}".format(metric, full_cut, same, 'shrink' if shrink else 'unshrink', ff)
extra_line[1]["val"] = min(bsf_arr[i])
if metr_units == 'contacts':
extra_line[1]["name"] = "The lowest {} metric ({} {})".format(metric.upper(), int(min(bsf_arr[i])), metr_units)
else:
extra_line[1]["name"] = "The lowest {} metric ({:3.2f} {})".format(metric.upper(), min(bsf_arr[i]), metr_units)
filename = os.path.join(custom_path, filename)
fig_num = single_plot(fig_num, ax_prop, [bsf_arr[i],], [non_shrink_arr[i],] if non_shrink_arr is not None else None, [legend_names[i],].copy(), '-', 1, bsf=True, rev=False, extra_line=extra_line, shrink=shrink, xlab="Steps (20ps each)",
ylab="Distance to the goal, {}".format(metr_units), title=title, filename=filename) # to goal vs best_so_far | cut
ax_prop = {"min_lim_x": max_non_init_rmsd, "max_lim_x": common_point-common_point/10, "min_lim_y": -max_len/80, "max_lim_y": max_len+max_len/80,
"min_ax_x": common_point, "max_ax_x": max_non_init_rmsd, "min_ax_y": 0, "max_ax_y": max_len+max_len/80, "ax_step_x": (max_non_init_rmsd-common_point)/20, "ax_step_y": math.floor(max_len/20)}
if metr_units == 'contacts':
extra_line = [
{"ax_type": 'ver', "val": init_metr, "name": "Initial {} metric ({} {})".format(metric.upper(), int(init_metr), metr_units), "col": "darkmagenta"},
{"ax_type": 'ver', "val": min(min(elem) for elem in bsf_arr), "name": "The lowest {} metric ({} {})".format(metric.upper(), int(min(min(elem) for elem in bsf_arr)), metr_units), "col": "darkgreen"}]
else:
extra_line = [
{"ax_type": 'ver', "val": init_metr, "name": "Initial {} metric ({:3.2f} {})".format(metric.upper(), init_metr, metr_units), "col": "darkmagenta"},
{"ax_type": 'ver', "val": min(min(elem) for elem in bsf_arr), "name": "The lowest {} metric ({:3.2f} {})".format(metric.upper(), min(min(elem) for elem in bsf_arr), metr_units), "col": "darkgreen"}]
if metric == 'rmsd':
extra_line.append({"ax_type": 'hor', "val": 2.7, "name": "Typical folding mark (2.7 {})".format(metr_units), "col": "midnightblue"})
title = "{} | best_so_far vs steps | {} | {} | {}".format(metric, full_cut, same, 'shrink' if shrink else 'unshrink')
filename = "{}_best_so_far_vs_steps_{}_{}_{}".format(metric, full_cut, same, 'shrink' if shrink else 'unshrink')
filename = os.path.join(custom_path, filename)
fig_num = single_plot(fig_num, ax_prop, bsf_arr, non_shrink_arr, legend_names.copy(), '-', 1, bsf=True, rev=True, extra_line=extra_line, shrink=shrink, xlab="Distance to the goal, {}".format(metr_units), ylab="Steps (20 ps each)", title=title, filename=filename) # best_so_far vs steps | cut
for i in range(len(bsf_arr)):
ff = legend_names[i].split('with')[1].split('ff')[0].strip()
title = "{} | best_so_far vs steps | {} | {} | {} | {}".format(metric, full_cut, same, 'shrink' if shrink else 'unshrink', ff)
filename = "{}_best_so_far_vs_steps_{}_{}_{}_{}".format(metric, full_cut, same, 'shrink' if shrink else 'unshrink', ff)
extra_line[1]["val"] = min(bsf_arr[i])
if metr_units == 'contacts':
extra_line[1]["name"] = "The lowest {} metric ({} {})".format(metric.upper(), int(min(bsf_arr[i])), metr_units)
else:
extra_line[1]["name"] = "The lowest {} metric ({:3.2f} {})".format(metric.upper(), min(bsf_arr[i]), metr_units)
filename = os.path.join(custom_path, filename)
fig_num = single_plot(fig_num, ax_prop, [bsf_arr[i],], [non_shrink_arr[i],] if non_shrink_arr is not None else None, [legend_names[i],].copy(), '-', 1, bsf=True, rev=True, extra_line=extra_line, shrink=shrink,
xlab="Distance to the goal, {}".format(metr_units), ylab="Steps (20 ps each)", title=title, filename=filename) # best_so_far vs steps | cut
return fig_num | 3108509ec0929cf41686e615da7c2aa25844c43c | 3,629,551 |
def get_fractal_patterns_NtoS_WtoE(fractal_portrait, width, height):
""" get all fractal patterns from fractal portrait, from North to South, from West to East """
fractal_patterns = []
for y in range(height):
# single fractal pattern
f_p = get_fractal_patterns_zero_amounts()
for x in range(width):
if fractal_portrait[x][y] != EMPTY_PLACE:
f_p[fractal_portrait[x][y]] += 1
if any(v > 0 for v in f_p.values()):
fractal_patterns.append(f_p)
return fractal_patterns | a19668d5d1f8ae7cb24e79c1e17030cde25c1821 | 3,629,552 |
import os
def lookup_dir(root, name):
"""Find a directory
"""
top = root
while osp.exists(root):
content = os.listdir(root)
for subdir in content:
if subdir == name:
return osp.join(root, subdir)
if len(content) > 1:
raise EnvironmentError('Could not find unique sub-dir in ' + root)
root = osp.join(root, content[0])
raise EnvironmentError('Could not find "html" directory in ' + top) | 5efc080384e1fc7f477f40d5f26bf567c756108b | 3,629,553 |
def single_stat_request(player, code, stat):
"""Actually retrieves the stat and returns the stat info in an embed"""
session = Session()
message = ""
if code == 1:
(table, col, message) = stat_dict[stat]
columns = [col]
res = session.query(*(getattr(table, column) for column in columns), Account.display_name).join(Account).filter(Account.display_name == player).first()
#Returns a tuple containing the stat, but only the first element is defined for some reason.
num = truncate_decimals(res[0])
name = res[1]
elif code == 2 or code == 3:
(table, col, message) = medal_dict[stat]
columns = [col]
res = session.query(func.sum(*(getattr(table, column) for column in columns))).join(Account).filter(Account.display_name == player).group_by(AccountMedals.id).first()
num = float(res[0])
name = player
if message != "Activities Entered" and message != "Total Number of Medals" and message != "Total Medal Score":
message = f"Total {message} Medals"
if code == 3:
denominator = session.query(PvPAggregate.activitiesEntered).join(Account).filter(Account.display_name == player).first()
act = denominator[0]
num = num/act
if message != "Activities Entered" and message != "Total Number of Medals" and message != "Total Medal Score":
message = f"{message} Medals per Game"
elif code == 4:
(table, col, message) = character_dict[stat]
columns = [col]
res = session.query(func.max(*(getattr(table, column) for column in columns)), Account.display_name).join(Account).filter(Account.display_name == player).first()
#Returns a tuple containing the stat, but only the first element is defined for some reason.
num = truncate_decimals(res[0])
name = res[1]
#em = discord.Embed(title = f"{author}{message}{result}", colour=0xADD8E6)
em = f"```{message} for {name}: {num}```"
return em | b7eef445b3580eff93768394a0f0632a6174e1a8 | 3,629,554 |
import os
def download_uniprot(myid, path=".TPSdownloader_cache" + os.path.sep + 'uniprot' + os.path.sep):
"""Download a page like
https://www.uniprot.org/uniprot/A0A2K9RFZ2.xml
https://www.uniprot.org/uniprot/D8R8K9.xml
Some entries have multiple Accessions, like
<accession>Q6XDB5</accession>
<accession>C0PT91</accession>
<name>TPSD2_PICSI</name>
<entry created="2019-07-03" dataset="Swiss-Prot" modified="2020-08-12" version="27">
<accession>A0A1D6LTV0</accession>
<accession>A5YZT5</accession>
<accession>B4F964</accession>
<accession>C0PNL6</accession>
<name>TPS26_MAIZE</name>
<protein>
<recommendedName>
<fullName evidence="8">Alpha-terpineol synthase, chloroplastic</fullName>
<ecNumber evidence="7">4.2.3.111</ecNumber>
</recommendedName>
<alternativeName>
<fullName evidence="8">4-terpineol synthase</fullName>
<ecNumber evidence="7">4.2.3.-</ecNumber>
</alternativeName>
<alternativeName>
<fullName evidence="8">Alpha-terpinolene synthase</fullName>
<ecNumber evidence="7">4.2.3.113</ecNumber>
</alternativeName>
<alternativeName>
<fullName evidence="8">Beta-myrcene synthase</fullName>
<ecNumber evidence="7">4.2.3.15</ecNumber>
</alternativeName>
<alternativeName>
<fullName evidence="8">Gamma-terpinene synthase</fullName>
<ecNumber evidence="7">4.2.3.114</ecNumber>
</alternativeName>
<alternativeName>
<fullName evidence="8">Limonene synthase</fullName>
<ecNumber evidence="7">4.2.3.16</ecNumber>
</alternativeName>
<alternativeName>
<fullName evidence="9">Terpene synthase 26, chloroplastic</fullName>
</alternativeName>
"""
downloader_wrapper(myid, 'uniprot', ".TPSdownloader_cache" + os.path.sep, "https://www.uniprot.org/uniprot/")
return ".TPSdownloader_cache" + os.path.sep + 'uniprot' + os.path.sep + myid + '.xml' | 76dbb05f72cf22b191bd0d2c355ecd854e83e8ec | 3,629,555 |
from typing import List
def random_string_list(num: int = 10) -> List[str]:
"""
Generate list of random strings
>>> type(random_string_list())
<class 'list'>
>>> all([True if type(obj) is str else False for obj in random_string_list()])
True
>>> len(random_string_list())
10
>>> len(random_string_list(num=0))
0
>>> len(random_string_list(num=1))
1
"""
return [random_string() for _ in range(num)] | f13ee6e837d78b631ece65370c15bc1a69e8c7dc | 3,629,556 |
def get_entity_description(entity):
"""
Realiza o mapeamento de uma entidade padrão da extracão de entidades
(named entity) retornando de forma explícita o equivalente em português
para a entidade extraída.
param : entity : <str>
return : <str>
"""
ent_map = {
'PERSON': 'pessoa',
'PER': 'pessoa',
'NORP': 'nacionalidade ou grupos religiosos/políticos.',
'FAC': 'prédios, estradas, aeroportos, pontes...',
'ORG': 'empresas, agências, instituições...',
'GPE': 'países, cidades, estados.',
'LOC': 'Locais sem classificação geopolitica.',
'PRODUCT': 'objetos, veículos, alimentos...',
'EVENT': 'batalhas, guerras, eventos esportivos...',
'WORK_OF_ART': 'títulos de livros, canções...',
'LAW': 'documentos nomeados que virarm leis.',
'LANGUAGE': 'idioma',
'DATE': 'datas ou períodos absolutos ou relativos.',
'TIME': 'períodos de tempo menores que um dia.',
'PERCENT': 'percentual.',
'MONEY': 'valores monetários.',
'QUANTITY': 'medidas.',
'ORDINAL': 'primeiro, segundo, terceiro...',
'CARDINAIS': 'outros numerais.'
}
return ent_map.get(entity, entity) | 21fe671419ba00436070ec49cc0433fabfb0c597 | 3,629,557 |
def duration(utter: Utterance) -> int:
"""Get the duration of an utterance in milliseconds
Args:
utter: The utterance we are finding the duration of
"""
return utter.end_time - utter.start_time | bfa1cc9139134a435b9ab39bc7dad9e504abef2d | 3,629,558 |
import more_itertools as mit
def label_sequential_regions(inlist):
"""Input a list of labeled tuples and return a dictionary of sequentially labeled regions.
Args:
inlist (list): A list of tuples with the first number representing the index and the second the index label.
Returns:
dict: Dictionary of labeled regions.
Examples:
>>> label_sequential_regions([(1, 'O'), (2, 'O'), (3, 'O'), (4, 'M'), (5, 'M'), (6, 'I'), (7, 'M'), (8, 'O'), (9, 'O')])
{'O1': [1, 2, 3], 'M1': [4, 5], 'I1': [6], 'M2': [7], 'O2': [8, 9]}
"""
df = pd.DataFrame(inlist).set_index(0)
labeled = {}
for label in df[1].unique():
iterable = df[df[1] == label].index.tolist()
labeled.update({'{}{}'.format(label, i + 1): items for i, items in
enumerate([list(group) for group in mit.consecutive_groups(iterable)])})
return labeled | f258310bd672be41828405f8cab772358991e1c0 | 3,629,559 |
def lines_edit_renderer(widget, data):
"""Renders text area with list value as lines.
"""
tag = data.tag
area_attrs = textarea_attributes(widget, data)
value = fetch_value(widget, data)
if value is None:
value = u''
else:
value = u'\n'.join(value)
return tag('textarea', value, **area_attrs) | 600f1b1c5952bd39b56112208ef61dd08e2887d3 | 3,629,560 |
def variantsFromAlignment(refWindow, refSeq, cssSeq,
cssQV=None, refCoverage=None):
"""
Extract the variants implied by a pairwise alignment of cssSeq to
refSeq reference. If cssQV, refCoverage are provided, they will
be used to decorate the variants with those attributes.
Arguments:
- cssQV: QV array, same length as css
- refCoverage: coverage array, sample length as reference window
This is trickier than in the haploid case. We have to break out
diploid variants as single bases, in order to avoid implying
phase.
"""
variants = []
refId, refStart, refEnd = refWindow
aln = cc.AlignAffineIupac(refSeq, cssSeq);
alnTarget = aln.Target()
alnQuery = aln.Query()
assert (cssQV is None) == (refCoverage is None) # Both or none
assert len(refSeq) == refEnd - refStart
assert cssQV is None or len(cssSeq) == len(cssQV)
assert refCoverage is None or len(refSeq) == len(refCoverage)
transcript = [ X if (Q != "N" and T != "N") else "N"
for (X, T, Q) in zip(aln.Transcript(),
alnTarget,
alnQuery) ]
def findPrev(s, pos):
for i in xrange(pos - 1, -1, -1):
if s[i] != '-':
return s[i]
return "N"
variants = []
runStart = -1
runStartRefPos = None
runX = None
refPos = refStart
for pos, (X, T, Q) in enumerate(zip(transcript,
alnTarget,
alnQuery)):
if X != runX or isHeterozygote(Q):
if runStart >= 0 and runX not in "MN":
# Package up the run and dump a variant
ref = alnTarget[runStart:pos].replace("-", "")
read = alnQuery [runStart:pos].replace("-", "")
refPrev = findPrev(alnTarget, runStart)
cssPrev = findPrev(alnQuery, runStart)
if isHeterozygote(read):
allele1, allele2 = unpackIUPAC(read)
var = Variant(refId, runStartRefPos, refPos, ref, allele1, allele2,
refPrev=refPrev, readPrev=cssPrev)
else:
var = Variant(refId, runStartRefPos, refPos, ref, read,
refPrev=refPrev, readPrev=cssPrev)
variants.append(var)
runStart = pos
runStartRefPos = refPos
runX = X
if T != "-": refPos += 1
# This might be better handled within the loop above, just keeping
# track of Qpos, Tpos
if cssQV is not None:
cssPosition = cc.TargetToQueryPositions(aln)
for v in variants:
# HACK ALERT: we are not really handling the confidence or
# coverage for variants at last position of the window
# correctly here.
refPos_ = min(v.refStart-refStart, len(refCoverage)-1)
cssPos_ = min(cssPosition[v.refStart-refStart], len(cssQV)-1)
if refCoverage is not None: v.coverage = refCoverage[refPos_]
if cssQV is not None: v.confidence = cssQV[cssPos_]
return variants | a49fd077db6043fbf23dc6a9d88c51143195d928 | 3,629,561 |
import os
def _read_cookie(cookie_path, is_safecookie):
"""
Provides the contents of a given cookie file.
:param str cookie_path: absolute path of the cookie file
:param bool is_safecookie: **True** if this was for SAFECOOKIE
authentication, **False** if for COOKIE
:raises:
* :class:`stem.connection.UnreadableCookieFile` if the cookie file is
unreadable
* :class:`stem.connection.IncorrectCookieSize` if the cookie size is
incorrect (not 32 bytes)
"""
if not os.path.exists(cookie_path):
exc_msg = "Authentication failed: '%s' doesn't exist" % cookie_path
raise UnreadableCookieFile(exc_msg, cookie_path, is_safecookie)
# Abort if the file isn't 32 bytes long. This is to avoid exposing arbitrary
# file content to the port.
#
# Without this a malicious socket could, for instance, claim that
# '~/.bash_history' or '~/.ssh/id_rsa' was its authentication cookie to trick
# us into reading it for them with our current permissions.
#
# https://trac.torproject.org/projects/tor/ticket/4303
auth_cookie_size = os.path.getsize(cookie_path)
if auth_cookie_size != 32:
exc_msg = "Authentication failed: authentication cookie '%s' is the wrong size (%i bytes instead of 32)" % (cookie_path, auth_cookie_size)
raise IncorrectCookieSize(exc_msg, cookie_path, is_safecookie)
try:
with open(cookie_path, 'rb', 0) as f:
return f.read()
except IOError as exc:
exc_msg = "Authentication failed: unable to read '%s' (%s)" % (cookie_path, exc)
raise UnreadableCookieFile(exc_msg, cookie_path, is_safecookie) | fa97c4aab128245809e0459f4cb50c188fe97ef5 | 3,629,562 |
from datetime import datetime
def exception_guard(f):
"""
Middleware (guard): checks if the current authorized user can created an
exception request
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if 'eid' not in kwargs.keys():
return F.abort(404)
election = meta.Election(kwargs['eid'])
voters = election.voters()
if election.get()['exception_due'] < datetime.now():
F.flash('Not accepting any exception request.')
return F.redirect(F.url_for('elections_single', eid=kwargs['eid']))
if F.g.user.username in voters['eligible_voters']:
F.flash('You are already eligible to vote in the election.')
return F.redirect(F.url_for('elections_single', eid=kwargs['eid']))
return f(*args, **kwargs)
return decorated_function | 850c9818516de666082fbfcc19e7bc281c876b30 | 3,629,563 |
def return_last(responses):
"""Return last item of a list."""
return responses[-1] | f4aedfe0b10adcdb859ac1d0f5809ca666abac80 | 3,629,564 |
from typing import List
from typing import Tuple
def autolink_replacements(what: str) -> List[Tuple[str, str, str]]:
"""
Create a list containing replacement tuples of the form:
(``regex``, ``replacement``, ``obj``) for all classes and methods which are
imported in ``KEDRO_MODULES`` ``__init__.py`` files. The ``replacement``
is a reStructuredText link to their documentation.
For example, if the docstring reads:
This LambdaDataSet loads and saves ...
Then the word ``LambdaDataSet``, will be replaced by
:class:`~kedro.io.LambdaDataSet`
Works for plural as well, e.g:
These ``LambdaDataSet``s load and save
Will convert to:
These :class:`kedro.io.LambdaDataSet` load and save
Args:
what: The objects to create replacement tuples for. Possible values
["class", "func"].
Returns:
A list of tuples: (regex, replacement, obj), for all "what" objects
imported in __init__.py files of ``KEDRO_MODULES``.
"""
replacements = []
suggestions = []
for module in KEDRO_MODULES:
if what == "class":
objects = get_classes(module)
elif what == "func":
objects = get_functions(module)
# Look for recognised class names/function names which are
# surrounded by double back-ticks
if what == "class":
# first do plural only for classes
replacements += [
(
fr"``{obj}``s",
f":{what}:`~{module}.{obj}`\\\\s",
obj,
)
for obj in objects
]
# singular
replacements += [
(fr"``{obj}``", f":{what}:`~{module}.{obj}`", obj) for obj in objects
]
# Look for recognised class names/function names which are NOT
# surrounded by double back-ticks, so that we can log these in the
# terminal
if what == "class":
# first do plural only for classes
suggestions += [
(fr"(?<!\w|`){obj}s(?!\w|`{{2}})", f"``{obj}``s", obj)
for obj in objects
]
# then singular
suggestions += [
(fr"(?<!\w|`){obj}(?!\w|`{{2}})", f"``{obj}``", obj) for obj in objects
]
return replacements, suggestions | 4ff706f19b2f3a4ede71f7bd1307f129fce53660 | 3,629,565 |
def flattenDict(dictionary, init = ()):
"""
Converts nested dicts with numeric or str keys to flat dict with tuple keys.
For example, x[1][0][2] becomes xx[(1, 0, 2)].
Based on::
http://stackoverflow.com/questions/6027558/\
flatten-nested-python-dictionaries-compressing-keys
"""
def _tuple(x):
"""
Returns x as a tuple
"""
return (x,)
def flatten(d, _keyAccum = init):
"""
"""
#module_logger.debug("flatten: entered with %s",d)
if type(d) == dict:
if d == {}:
return {(0,): None}
newdict = {}
for key in d.keys():
reduced = flatten(d[key], init+_tuple(key))
if type(reduced) == dict:
for k,v in reduced.items():
newdict[_tuple(key)+k] = v
else:
newdict[_tuple(key)] = d[key]
return newdict
else:
return d
return flatten(dictionary) | 1e78c6041fc015f0019166a74c264b3303f7cc9f | 3,629,566 |
def get_boundary_cell_count(plate_dims, exclude_outer=1):
"""Get number of wells in outer or inner edges
Parameters
----------
plate_dims : array
dimensions of plate
Returns
-------
boundary_cell_count : int
number of wells in the edges
"""
boundary_cell_count = 2 * (plate_dims[0] + plate_dims[1] - 2)
if exclude_outer == 2:
boundary_cell_count += 2 * (plate_dims[0]-2 + plate_dims[1]-2 - 2)
return boundary_cell_count | 8e5056af647f893854bab3de3e6e5038c0d703e1 | 3,629,567 |
def _is_false(x):
"""Evaluates false for bool(False) and str("false")/str("False").
The function is vectorized over numpy arrays or pandas Series.
Everything that is NA as defined in `is_na()` evaluates to False.
but also works for single values. """
x = np.array(x).astype(object)
return __is_false(x) | 200c43dcd60821642cb18a2ac82b1fd592bdaa9e | 3,629,568 |
def compute_border_indices(log2_T, J, i0, i1):
"""
Computes border indices at all scales which correspond to the original
signal boundaries after padding.
At the finest resolution,
original_signal = padded_signal[..., i0:i1].
This function finds the integers i0, i1 for all temporal subsamplings
by 2**J, being conservative on the indices.
Maximal subsampling is by `2**log2_T` if `average=True`, else by
`2**max(log2_T, J)`. We compute indices up to latter to be sure.
Parameters
----------
log2_T : int
Maximal subsampling by low-pass filtering is `2**log2_T`.
J : int / tuple[int]
Maximal subsampling by band-pass filtering is `2**J`.
i0 : int
start index of the original signal at the finest resolution
i1 : int
end index (excluded) of the original signal at the finest resolution
Returns
-------
ind_start, ind_end: dictionaries with keys in [0, ..., log2_T] such that the
original signal is in padded_signal[ind_start[j]:ind_end[j]]
after subsampling by 2**j
References
----------
This is a modification of
https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/utils.py
Kymatio, (C) 2018-present. The Kymatio developers.
"""
if isinstance(J, tuple):
J = max(J)
ind_start = {0: i0}
ind_end = {0: i1}
for j in range(1, max(log2_T, J) + 1):
ind_start[j] = (ind_start[j - 1] // 2) + (ind_start[j - 1] % 2)
ind_end[j] = (ind_end[j - 1] // 2) + (ind_end[j - 1] % 2)
return ind_start, ind_end | 09d29c4de2c808a1947d513580817bda16a6bfe7 | 3,629,569 |
def get(data_ids):
"""
Get a object(s) associated with `data_ids` from the shared object storage.
Parameters
----------
data_ids : unidist.core.backends.common.data_id.DataID or list
An ID(s) to object(s) to get data from.
Returns
-------
object
A Python object.
"""
return ObjectStore.get_instance().get(data_ids) | 2f42a45abd56428d1b0087cd9b78460430bed18e | 3,629,570 |
def pass_time(grid, minutes):
"""Pass number of minutes on grid."""
cur_grid = copy_grid(grid)
grid_set = get_grid_for_set(cur_grid)
grid_set_to_minute = {grid_set: 0}
minute_to_grid_set = {0: grid_set}
for minute in range(1, minutes + 1):
cur_grid = pass_minute(cur_grid)
cur_grid_set = get_grid_for_set(cur_grid)
if cur_grid_set in grid_set_to_minute:
prev_minute = grid_set_to_minute[cur_grid_set]
ending_minute = get_ending_minute_after_loop(
minutes, minute, prev_minute)
return minute_to_grid_set[ending_minute]
grid_set_to_minute[cur_grid_set] = minute
minute_to_grid_set[minute] = cur_grid_set
return cur_grid | 6929989d647c46a9c66bb1cee0e545ba621a72cc | 3,629,571 |
import os
def njoin(n):
"""Join with newlines."""
return join(os.linesep, n) | 2b0d16fa85ddfc47381976bfa635081b1e57f909 | 3,629,572 |
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and sparse labels."""
return 100.0 - (100.0 * np.sum(np.argmax(predictions, 1) == labels) / predictions.shape[0]) | c2e3ce21e799cf0368523fbcb3fab603642e9c7d | 3,629,573 |
import tqdm
def run_question_generation(trainer, dset, model, tokenizer, device, num_beams):
"""
Generate a set of questions from a source text and list of answers (named entities)
:param trainer: HuggingFace trainer
:param dset: The dataset to generate questions from
:param model: Question generation model
:param tokenizer: Tokenizer for the provided model
:param device: torch device to run on
:param num_beams: Number of beams for beam search
:return: A list of dicts containing input to the claim generation model
"""
dl = trainer.get_test_dataloader(dset)
all_samples = []
for b in tqdm(dl):
input_ids = b['input_ids'].to(device)
samples = model.generate(
input_ids,
num_beams=num_beams,
max_length=tokenizer.model_max_length,
early_stopping=True
)
all_samples.extend(list(samples.detach().cpu().numpy()))
claim_gen_input = defaultdict(list)
for id, con, ans, q, citance, paper_id, evidence in zip(dset['id'], dset['context'], dset['answers'],
all_samples, dset['citance'], dset['paper_id'],
dset['evidence']):
gen_question = tokenizer.decode(q, skip_special_tokens=True, clean_up_tokenization_spaces=False)
sample = {'id': id, 'paper_id': paper_id, 'context': con, 'answer': ans[0], 'generated_question': gen_question,
'citance': citance, 'evidence': evidence}
for k in sample:
claim_gen_input[k].append(sample[k])
return claim_gen_input | aff16d397b908eae0ab8ada273392e59ea444b68 | 3,629,574 |
def get_queue_ids(client: Client) -> list:
"""
Creating a list of all queue ids that are in the system.
Args:
client: Client for the api.
Returns:
list of queue ids.
"""
queues = client.queues_list_request()
queues = queues.get('Queues', [])
queues_id = []
for q in queues:
queues_id.append(str(q.get('id')))
return queues_id | e9455f8a30db543c44241c75ab86b4be7db9715e | 3,629,575 |
def get_image_palette(img, nclusters):
"""
Extract tuple of (Image, Palette) in LAB space
"""
lab = rgb2lab(np.array(img))
palette = kmeans_get_palette(lab, nclusters)
return lab, palette | a6570d08dd2a76bd8dae7ffdcf34ca271c09c7c6 | 3,629,576 |
from .model_store import get_model_file
import os
def get_diaresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DIA-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARDIAResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net | fee6a341cf86bea2a11a51d948035fecc652e901 | 3,629,577 |
def isbuffer(obj) -> bool:
"""
Test whether `obj` is an object that supports the buffer API, like a bytes
or bytearray object.
"""
try:
with memoryview(obj):
return True
except TypeError:
return False | bede4ffeb154e765c7c2f4dea3bfa77281b313f2 | 3,629,578 |
import csv
import json
import distutils
def _get_mqtt_data(file_name):
"""
Reads mqtt fake data and expected results from file
"""
mqtt_data = []
with open(file_name, newline='') as mqtt_data_csv:
csv_reader = csv.DictReader(mqtt_data_csv, quotechar="'", delimiter=';')
for row in csv_reader:
row[MqttCVS.in_topic] = row[MqttCVS.in_topic].strip()
row[MqttCVS.out_name] = row[MqttCVS.out_name].strip()
# covert payloud to bytes, as in a MQTT Message
row[MqttCVS.in_payload] = row[MqttCVS.in_payload].encode('UTF-8')
# parse labels, to a python object.
row[MqttCVS.out_labels] = json.loads(row.get(MqttCVS.out_labels, '{}'))
# Value could be a JSON, a float or anthing else.
try:
row[MqttCVS.out_value] = float(row.get(MqttCVS.out_value))
except ValueError:
try:
row[MqttCVS.out_value] = json.loads(row.get(MqttCVS.out_value))
except (JSONDecodeError, TypeError):
pass # leave as it is
# set delay to 0 if not a number
try:
row[MqttCVS.delay] = float(row.get(MqttCVS.delay, 0))
except ValueError:
row[MqttCVS.delay] = 0
# convert string to bool for expected assertion.
row[MqttCVS.expected_assert] = bool(
distutils.util.strtobool(row.get(MqttCVS.expected_assert, "True").strip()))
mqtt_data.append(row)
return mqtt_data | 0bc8aed18e406a3ed4098fb319b42f5df466adbc | 3,629,579 |
def yes_maybe_condition_true(x: dict) -> bool:
"""
The yes maybe condition is true if 35% or
2 (or more) out of 3 users
2 (or more) out of 4 users
2 (or more) out of 5 users
have classified as 'yes' or 'maybe'
"""
if x["yes_share"] + x["maybe_share"] > 0.35:
return True
else:
return False | 3009f2fdb6bdec69ab7f7530d47d40e1f493f8ba | 3,629,580 |
def load_texture(filename):
""" This fuctions will return the id for the texture"""
textureSurface = pygame.image.load(filename)
textureData = pygame.image.tostring(textureSurface,"RGBA",1)
width = textureSurface.get_width()
height = textureSurface.get_height()
ID = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D,ID)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,width,height,0,GL_RGBA,GL_UNSIGNED_BYTE,textureData)
return ID | 223ce31249589ed2db690add01fc0c00449ef211 | 3,629,581 |
import string
def search(pattern, doc, flags=0):
"""Scan through string looking for a match to the pattern, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).search(string) | d13be664aa3c279fc46c798c757acb80130a4f29 | 3,629,582 |
from typing import Concatenate
def yolo4_mobilenetv3small_body(inputs, num_anchors, num_classes, alpha=1.0):
"""Create YOLO_V4 MobileNetV3Small model CNN body in Keras."""
mobilenetv3small = MobileNetV3Small(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
# input: 416 x 416 x 3
# activation_31(layer 165, final feature map): 13 x 13 x (576*alpha)
# expanded_conv_10/Add(layer 162, end of block10): 13 x 13 x (96*alpha)
# activation_22(layer 117, middle in block8) : 26 x 26 x (288*alpha)
# expanded_conv_7/Add(layer 114, end of block7) : 26 x 26 x (48*alpha)
# activation_7(layer 38, middle in block3) : 52 x 52 x (96*alpha)
# expanded_conv_2/Add(layer 35, end of block2): 52 x 52 x (24*alpha)
# f1 :13 x 13 x (576*alpha)
# NOTE: activation layer name may different for TF1.x/2.x, so we
# use index to fetch layer
f1 = mobilenetv3small.layers[165].output
#feature map 1 head (13 x 13 x (288*alpha) for 416 input)
x1 = make_yolo_spp_head(f1, int(288*alpha))
#upsample fpn merge for feature map 1 & 2
x1_upsample = compose(
DarknetConv2D_BN_Leaky(int(144*alpha), (1,1)),
UpSampling2D(2))(x1)
f2 = mobilenetv3small.layers[117].output
# f2: 26 x 26 x (288*alpha) for 416 input
x2 = DarknetConv2D_BN_Leaky(int(144*alpha), (1,1))(f2)
x2 = Concatenate()([x2, x1_upsample])
#feature map 2 head (26 x 26 x (144*alpha) for 416 input)
x2 = make_yolo_head(x2, int(144*alpha))
#upsample fpn merge for feature map 2 & 3
x2_upsample = compose(
DarknetConv2D_BN_Leaky(int(48*alpha), (1,1)),
UpSampling2D(2))(x2)
f3 = mobilenetv3small.layers[38].output
# f3 : 52 x 52 x (96*alpha)
x3 = DarknetConv2D_BN_Leaky(int(48*alpha), (1,1))(f3)
x3 = Concatenate()([x3, x2_upsample])
#feature map 3 head & output (52 x 52 x (96*alpha) for 416 input)
#x3, y3 = make_last_layers(x3, int(48*alpha), num_anchors*(num_classes+5))
x3 = make_yolo_head(x3, int(48*alpha))
y3 = compose(
DarknetConv2D_BN_Leaky(int(96*alpha), (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1)))(x3)
#downsample fpn merge for feature map 3 & 2
x3_downsample = compose(
ZeroPadding2D(((1,0),(1,0))),
DarknetConv2D_BN_Leaky(int(144*alpha), (3,3), strides=(2,2)))(x3)
x2 = Concatenate()([x3_downsample, x2])
#feature map 2 output (26 x 26 x (288*alpha) for 416 input)
#x2, y2 = make_last_layers(x2, int(144*alpha), num_anchors*(num_classes+5))
x2 = make_yolo_head(x2, int(144*alpha))
y2 = compose(
DarknetConv2D_BN_Leaky(int(288*alpha), (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1)))(x2)
#downsample fpn merge for feature map 2 & 1
x2_downsample = compose(
ZeroPadding2D(((1,0),(1,0))),
DarknetConv2D_BN_Leaky(int(288*alpha), (3,3), strides=(2,2)))(x2)
x1 = Concatenate()([x2_downsample, x1])
#feature map 1 output (13 x 13 x (576*alpha) for 416 input)
#x1, y1 = make_last_layers(x1, int(288*alpha), num_anchors*(num_classes+5))
x1 = make_yolo_head(x1, int(288*alpha))
y1 = compose(
DarknetConv2D_BN_Leaky(int(576*alpha), (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1)))(x1)
return Model(inputs, [y1, y2, y3]) | 586e6f7b88ca785fe93b0cccda41291ccd4dec0f | 3,629,583 |
import cgi
from io import StringIO
import os
def main():
""" Go do something """
form = cgi.FieldStorage()
include_latlon = (form.getfirst('gis', 'no').lower() == 'yes')
myvars = form.getlist('vars')
myvars.insert(0, 'station')
myvars.insert(1, 'obtime')
delimiter = DELIMITERS.get(form.getfirst('delim', 'comma'))
what = form.getfirst('what', 'dl')
tzname = form.getfirst('tz', 'UTC')
src = form.getfirst('src', 'atmos')
sts, ets = get_time(form, tzname)
stations = form.getlist('stations')
if not stations:
ssw("Content-type: text/plain\n\n")
ssw("Error, no stations specified for the query!")
return
if len(stations) == 1:
stations.append('XXXXXXX')
tbl = ''
if src in ['soil', 'traffic']:
tbl = '_%s' % (src,)
sql = """SELECT *, valid at time zone %s as obtime from
alldata"""+tbl+"""
WHERE station in %s and valid BETWEEN %s and %s ORDER by valid ASC
"""
df = read_sql(sql, PGCONN, params=(tzname, tuple(stations), sts, ets))
if df.empty:
ssw("Content-type: text/plain\n\n")
ssw("Sorry, no results found for query!")
return
if include_latlon:
network = form.getfirst('network')
nt = NetworkTable(network)
myvars.insert(2, 'longitude')
myvars.insert(3, 'latitude')
def get_lat(station):
"""hack"""
return nt.sts[station]['lat']
def get_lon(station):
"""hack"""
return nt.sts[station]['lon']
df['latitude'] = [get_lat(x) for x in df['station']]
df['longitude'] = [get_lon(x) for x in df['station']]
sio = StringIO()
if what == 'txt':
ssw('Content-type: application/octet-stream\n')
ssw(('Content-Disposition: attachment; filename=rwis.txt\n\n'))
df.to_csv(sio, index=False, sep=delimiter, columns=myvars)
elif what == 'html':
ssw("Content-type: text/html\n\n")
df.to_html(sio, columns=myvars)
elif what == 'excel':
writer = pd.ExcelWriter('/tmp/ss.xlsx')
df.to_excel(writer, 'Data', index=False, columns=myvars)
writer.save()
ssw("Content-type: application/vnd.ms-excel\n")
ssw(("Content-Disposition: attachment; Filename=rwis.xlsx\n\n"))
ssw(open('/tmp/ss.xlsx', 'rb').read())
os.unlink('/tmp/ss.xlsx')
return
else:
ssw("Content-type: text/plain\n\n")
df.to_csv(sio, sep=delimiter, columns=myvars)
sio.seek(0)
ssw(sio.getvalue()) | 8011b4460067ea08982fa4d2655af04133669013 | 3,629,584 |
def get_all_effects(fname):
"""
Give fname from a direct effect file
"""
# Step 1: Load results for current file
print(fname)
indirect_result_df = pd.read_csv(fname)
analyze_effect_results(
results_df=indirect_result_df, effect="indirect"
)
fname = fname.replace("_indirect_", "_direct_")
direct_result_df = pd.read_csv(fname)
analyze_effect_results(
results_df=direct_result_df, effect="direct"
)
# Step 2: Join the two DF's
total_df = direct_result_df.join(
indirect_result_df, lsuffix="_direct", rsuffix="_indirect"
)[
[
"layer_direct",
"neuron_direct",
"odds_ratio_indirect",
"odds_ratio_direct"
]
]
total_df["total_causal_effect"] = (
total_df["odds_ratio_indirect"] + total_df["odds_ratio_direct"] - 1
)
total_df["correctness_cat"] = direct_result_df["correctness_cat"]
return total_df | e032b963a2ccb98b2d824ddc870fa661f38ad8f6 | 3,629,585 |
def cart_key(user_id=DEFAULT_USER_ID):
"""Sub model for representing an author."""
return ndb.Key('addSong2Cart', user_id) | 749ba650ed7ce7301ac1d152efcbd3da24cc6b2b | 3,629,586 |
from contextlib import suppress
from datetime import datetime
def format_datetime(date_str):
"""
Convert Twitter's date time format ("Thu Jul 20 19:34:20 +0000 2017")
to ISO 8601 International Standard Date and Time format.
:param date_str:
:return:
"""
with suppress(TypeError, ValueError):
dt = datetime.datetime.strptime(date_str, '%a %b %d %H:%M:%S +0000 %Y')
return dt.strftime('%Y-%m-%dT%H:%M:%SZ')
return None | fe926659acd095090a37c2cc8fd320d73e30c446 | 3,629,587 |
from pathlib import Path
from typing import Dict
def git_named_refs(git_hash: str, git_dir: Path) -> Dict[str, str]:
"""
Returns all named tag or reference for the provided hash and the hash.
This method does not need nor uses a git client installation.
"""
refs = dict(hash=git_hash)
ref_dir = git_dir / 'refs'
for item in ref_dir.glob('**/*'):
if item.is_file() and git_hash == item.read_text(encoding='ascii').strip():
refs[item.parent.relative_to(ref_dir).as_posix()] = item.name
return refs | 0ad9f07c2885785a39f3918c43b0c795d4a864e7 | 3,629,588 |
def check_string(seq):
"""Checks if seq is a string"""
if not isinstance(seq, str):
assert False, "Input is not a string."
else:
pass
return None | c56ce486fae2e1335b0b191b1804b19ab121f1c9 | 3,629,589 |
from typing import Sized
def shift_right(sized: Sized, n: int) -> Sized:
"""Return a copy of sized with it's elements shifted n places to the left but keeping the same size.
sized: A sized object which's elements to shift
n: How many places to shift sized's items to the left
"""
return [None] * n + sized[n : len(sized)] | 39ac29ba835d0ea68971d331b235c17c6f98f56a | 3,629,590 |
import io
def backport_makefile(
self, mode="r", buffering=None, encoding=None, errors=None, newline=None
):
"""
Backport of ``socket.makefile`` from Python 3.5.
"""
if not set(mode) <= {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._makefile_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text | d19d6ffdd8fcf41e39d021b071bada5bb9c1ea29 | 3,629,591 |
from datetime import datetime
def epoch_seconds(d: datetime = None) -> int:
"""
Return the number of seconds for given date. If no date, return current.
:param d: timestamp datetime object
:return: timestamp in epoch
"""
if not d:
d = datetime.utcnow()
return int((d - datetime.utcfromtimestamp(0)).total_seconds()) | b9a4671b0904d148248231a1e1eabfa062a5d1d2 | 3,629,592 |
import json
def parse_json(json_file):
"""JSON poem parser for 'Gongocorpus'.
We read the data and find elements like title, author, year, etc. Then
we iterate over the poem text and we look for each stanza, line, word
and syllable data.
:param json_file: Path for the json file
:return: Dict with the data obtained from the poem
:rtype: dict
"""
corpus_poem = json.loads(json_file.read_text())
corpus_name = json_file.parts[-5]
poem = {}
title = corpus_poem["incipit"]
author = corpus_poem["author"]
year = corpus_poem["year"]
authorship = corpus_poem["authorship"]
manually_checked = False
scanned_poem = corpus_poem["scanned_poem"]
poem_text = corpus_poem["text"]
stanza_list = []
line_number = 0
for stanza_number, stanza in enumerate(poem_text.split("\n\n")):
stanza_text = "".join(stanza)
line_list = []
for line_text in stanza.split("\n"):
scanned_line = scanned_poem[line_number]
rythym_info = scanned_line["rhythm"]
metrical_pattern = rythym_info["stress"]
line_length = rythym_info["length"]
word_list = []
for token in scanned_line["tokens"]:
if "word" in token:
word = token["word"]
stress_position = token["stress_position"]
syllables_text = [syl["syllable"] for syl in word]
word_text = "".join(syllables_text)
has_synalepha = [True for syl in word
if "has_synalepha" in syl]
word_dict = {
"word_text": word_text,
"stress_position": stress_position,
"syllables": syllables_text
}
if True in has_synalepha:
word_dict.update({
"has_synalepha": True,
})
word_list.append(word_dict)
line_list.append({
"line_number": line_number + 1,
"line_text": line_text,
"metrical_pattern": metrical_pattern,
"line_length": line_length,
"words": word_list,
})
line_number += 1
stanza_list.append({
"stanza_number": stanza_number + 1,
"stanza_type": "",
"lines": line_list,
"stanza_text": stanza_text,
})
poem.update({
"poem_title": title,
"author": author,
"authorship": authorship,
"year": year,
"manually_checked": manually_checked,
"stanzas": stanza_list,
"corpus": corpus_name,
})
return poem | e1944bd5cf18e913c22a8d910aafee608a654e0e | 3,629,593 |
def chou_pseudo_aa_composition(*sequences):
"""
M.K. Gupta , R. Niyogi & M. Misra (2013) An alignment-free method to find
similarity among protein sequences via the general form of Chou’s pseudo amino acid composition,
SAR and QSAR in Environmental Research, 24:7, 597-609,
DOI: 10.1080/1062936X.2013.773378
Args:
*sequences: amino acid sequences
Returns:
list of Chou's pseudo amino acid composition for each sequence
"""
# first the aa count
aa_count_dict = [aa_composition(seq) for seq in sequences]
# distance to first
aa_distance_to_first_dict = [distance_to_first(x) for x in sequences]
aa_distribution_dict = [aa_distribution(seq, aa_c, aa_dist) for seq, aa_c, aa_dist in
zip(sequences, aa_count_dict, aa_distance_to_first_dict)]
# create lists with amino acids in the right order
aa_count = [order_seq(aa_count_dict_i) for aa_count_dict_i in aa_count_dict]
aa_distance_to_first = [order_seq(aa_distance_to_first_i) for aa_distance_to_first_i in aa_distance_to_first_dict]
aa_dist = [order_seq(aa_distribution_dict_i) for aa_distribution_dict_i in aa_distribution_dict]
return [x + y + z for x, y, z in zip(aa_count, aa_distance_to_first, aa_dist)] | 95ce5b0ee082a899ea487a910280233fc892430a | 3,629,594 |
def calculate_wire_sweeping_const(h_over_d):
"""Calculate the wire-sweeping constant for the Upgraded
Cheng-Todreas friction factor constant calculation"""
ws = {}
if h_over_d == 0.0:
ws['turbulent'] = 0.0
ws['laminar'] = 0.0
else:
ws['turbulent'] = -11.0 * np.log10(h_over_d) + 19.0
ws['laminar'] = ws['turbulent']
return ws | ae3247de9ff14eb5f14065906f7c6fc0e2a4dc34 | 3,629,595 |
import json
def create_subscription_definition(subscription_definition_dict):
"""Create a subscription definition."""
cli_input = json.dumps(subscription_definition_dict)
cmd = [
"aws",
"greengrass",
"create-subscription-definition",
"--cli-input-json",
cli_input
]
output = execute_command(cmd)
output_json = json.loads(output.decode("utf-8"))
subscription_definition_version_arn = output_json["LatestVersionArn"]
return subscription_definition_version_arn | 30c51f6f6b905390574b839ec6363716c619c48f | 3,629,596 |
def compile(element, compiler, **_kw): # pylint: disable=function-redefined
"""
Get length of array defined in a JSONB column
"""
return "jsonb_typeof(%s)" % compiler.process(element.clauses) | 917706c5aa05305c6d2930673d23194665b8c6ed | 3,629,597 |
def guid(fn):
"""Server mock; grab the object guid from the url"""
@wraps(fn)
def wrapper(self, request, context, *args, **kwargs):
guid = uuid_url_matcher('.+').match(request.path).group(1)
return fn(self, request=request, context=context, guid=guid, *args, **kwargs)
return wrapper | 87dd8b645117e6df5977d72e4f4309a735d00c83 | 3,629,598 |
def pil_to_numpy(img: Image.Image) -> np.ndarray:
"""
Args:
img: an Image.Image from `read_img()`
Returns: np.ndarray, RGB-style, with shape: [3, H, W], scale: [0, 255], dtype: float32
"""
return np.asarray(img.convert('RGB'), dtype='float32').transpose((2, 0, 1)) | 924b4ad93204b6f2716b316c79b43bc1f83de156 | 3,629,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.