content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_env(env_name: str) -> str:
"""
Safely read an environment variable.
Raises errors if it is not defined or it is empty.
:param env_name: the name of the environment variable
:return: the value of the environment variable
"""
if env_name not in os.environ:
raise KeyError(f"{env_name} not defined")
env_value: str = os.environ[env_name]
if not env_value:
raise ValueError(f"{env_name} has yet to be configured")
return env_value
| 5,337,800
|
def image_resize_and_sharpen(image, size, preserve_aspect_ratio=False, factor=2.0):
"""
Create a thumbnail by resizing while keeping ratio.
A sharpen filter is applied for a better looking result.
:param image: PIL.Image.Image()
:param size: 2-tuple(width, height)
:param preserve_aspect_ratio: boolean (default: False)
:param factor: Sharpen factor (default: 2.0)
"""
origin_mode = image.mode
if image.mode != 'RGBA':
image = image.convert('RGBA')
image.thumbnail(size, Image.ANTIALIAS)
if preserve_aspect_ratio:
size = image.size
sharpener = ImageEnhance.Sharpness(image)
resized_image = sharpener.enhance(factor)
# create a transparent image for background and paste the image on it
image = Image.new('RGBA', size, (255, 255, 255, 0))
image.paste(resized_image, ((size[0] - resized_image.size[0]) / 2, (size[1] - resized_image.size[1]) / 2))
if image.mode != origin_mode:
image = image.convert(origin_mode)
return image
| 5,337,801
|
def callHgsql(database, command):
""" Run hgsql command using subprocess, return stdout data if no error."""
cmd = ["hgsql", database, "-Ne", command]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
cmdout, cmderr = p.communicate()
if p.returncode != 0:
# keep command arguments nicely quoted
cmdstr = " ".join([pipes.quote(arg) for arg in cmd])
raise Exception("Error from: " + cmdstr + ": " + cmderr)
return cmdout
| 5,337,802
|
def validate_mash(seq_list, metadata_reports, expected_species):
"""
Takes a species name as a string (i.e. 'Salmonella enterica') and creates a dictionary with keys for each Seq ID
and boolean values if the value pulled from MASH_ReferenceGenome matches the string or not
:param seq_list: List of OLC Seq IDs
:param metadata_reports: Dictionary retrieved from get_combined_metadata()
:param expected_species: String containing expected species
:return: Dictionary with Seq IDs as keys and True/False as values
"""
seq_status = {}
for seqid in seq_list:
print('Validating MASH reference genome for {} '.format(seqid))
df = metadata_reports[seqid]
observed_species = df.loc[df['SeqID'] == seqid]['MASH_ReferenceGenome'].values[0]
if observed_species == expected_species:
seq_status[seqid] = True
else:
seq_status[seqid] = False
return seq_status
| 5,337,803
|
def style_strokes(svg_path: str, stroke_color: str='#ff0000',
stroke_width: float=0.07559055) -> etree.ElementTree:
"""Modifies a svg file so that all black paths become laser cutting paths.
Args:
svg_path: a file path to the svg file to modify and overwrite.
stroke_color: the color, as a hex code, to set paths to.
stroke_width: the stroke width, in pixels (at 96 pixels per inch), to
set paths to.
Returns:
The modified XML tree.
"""
xml = etree.parse(svg_path)
svg = xml.getroot()
paths = svg.findall('.//{http://www.w3.org/2000/svg}path'
'[@style="stroke:#000000;fill:none"]')
for path in paths:
path.set('style', (
'fill:none;stroke:{};stroke-opacity:1;stroke-width:{};'
'stroke-miterlimit:4;stroke-dasharray:none'
).format(stroke_color, stroke_width))
return xml
| 5,337,804
|
def preview_pipeline(
pipeline: Pipeline, domain_retriever: DomainRetriever, limit: int = 50, offset: int = 0
) -> str:
"""
Execute a pipeline but returns only a slice of the results, determined by `limit` and `offset` parameters, as JSON.
Return format follows the 'table' JSON table schema used by pandas (see
https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#orient-options), with a few addition related to
pagination.
Note: it's required to use pandas `to_json` methods, as it convert NaN and dates to an appropriate format.
"""
df, _ = execute_pipeline(pipeline, domain_retriever)
return json.dumps(
{
'schema': build_table_schema(df, index=False),
'offset': offset,
'limit': limit,
'total': df.shape[0],
'data': json.loads(df[offset : offset + limit].to_json(orient='records')),
}
)
| 5,337,805
|
def redirect_to_url(url):
"""
Return a bcm dictionary with a command to redirect to 'url'
"""
return {'mode': 'redirect', 'url': url}
| 5,337,806
|
def get_xml_tagged_data(buffer, include_refstr=True):
"""
figure out what format file it is and call the
respective function to return data for training
:param buffer:
:param include_refstr: during training do not need refstr
:return:
"""
if len(buffer) > 1 and 'http://www.elsevier.com/xml/document' in buffer[1]:
return get_elsevier_tagged_data(REGEX_XML_TAG_FORMAT.sub('', ' '.join(buffer)))
if len(buffer) > 1 and 'ADSBIBCODE' in buffer[0] and 'citation_list' in buffer[1]:
buffer = '<?xml version="1.0"?>' + ' '.join(buffer[1:])
return get_crossref_tagged_data(buffer, include_refstr)
if len(buffer) > 1 and 'ADSBIBCODE' in buffer[0] and 'Citation ID' in buffer[1]:
selected_buffer = ['<?xml version="1.0"?>', '<CitationList>']
for line in buffer:
line = line.strip()
if line.startswith('<Citation ID='):
selected_buffer.append(line)
selected_buffer.append('</CitationList>')
return get_springer_tagged_data('\n'.join(selected_buffer), include_refstr)
return None
| 5,337,807
|
async def test_successful_config_flow(hass, mqtt_mock):
"""Test a successful config flow."""
# Initialize a config flow
result = await _flow_init(hass)
# Check that the config flow shows the user form as the first step
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await _flow_configure(hass, result)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == USER_INPUT[CONF_NAME]
assert result["data"] == USER_INPUT
assert result["result"]
| 5,337,808
|
async def helppanel(ctx):
"""
Send out the student leadership panel.
:param ctx: Context
:return: None
"""
await helpdirectory.send_help_panel(ctx)
| 5,337,809
|
def de_dup(
data: pd.DataFrame,
drop_duplicates_kwargs: Dict[str, Any] = {},
) -> pd.DataFrame:
"""Drop duplicate rows
"""
return data.drop_duplicates(**drop_duplicates_kwargs)
| 5,337,810
|
def get_task_defs(workspace: str, num_validators: int, num_fullnodes: int) -> dict:
"""
Builds a dictionary of:
family -> current_task_def
task_def can be used to get the following when updating a service:
- containerDefinitions
- volumes
- placementConstraints
NOTE: only possible to get the current running list of tasks, so tf apply needed after
need a way to restore to steady state if an ecs update makes node boot loop
"""
ret = {}
print("Fetching ECS tasks")
def_fams = []
def_fams += [f"{workspace}-validator-{i}" for i in range(num_validators)]
def_fams += [f"{workspace}-fullnode-{i}" for i in range(num_fullnodes)]
for fam in def_fams:
print(f"Fetching task definition for {fam}")
task_def = execute_cmd_with_json_output(
["aws", "ecs", "describe-task-definition", "--task-definition", fam,],
err=f"could not get task definition for {fam}",
)
key = task_def.get("taskDefinition").get("family")
ret[key] = task_def.get("taskDefinition")
# put the tags separately
tags = task_def.get("tags")
ret[key]["tags"] = tags if tags else []
print()
return ret
| 5,337,811
|
def gruml(source_code_path, **kwargs):
"""driver function of GRUML.
"""
gruml = GRUML()
print('Generating RUML for source code at: {}'.format(source_code_path))
gruml.get_source_code_path_and_modules(source_code_path)
gruml.get_driver_path_and_driver_name(
kwargs.get('use_case', None),
kwargs.get('driver_name', None),
kwargs.get('driver_path', None),
kwargs.get('driver_function', None),
)
gruml.generate_dependency_data()
if gruml.use_case:
gruml.generate_sequential_function_calls()
| 5,337,812
|
def get_terms(cmd, urn=None, publisher=None, offer=None, plan=None):
"""
Get the details of Azure Marketplace image terms.
:param cmd:cmd
:param urn:URN, in the format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted
:param publisher:Image publisher
:param offer:Image offer
:param plan:Image billing plan
:return:
"""
from azure.mgmt.marketplaceordering.models import OfferType
publisher, offer, plan = _terms_prepare(cmd, urn, publisher, offer, plan)
op = cf_vm_image_term(cmd.cli_ctx, '')
terms = op.get(offer_type=OfferType.VIRTUALMACHINE,
publisher_id=publisher,
offer_id=offer,
plan_id=plan)
return terms
| 5,337,813
|
def encode_payload( result ):
"""JSON encodes a dictionary, named tuple, or object for sending
to the server
"""
try:
return tornado.escape.json_encode( result )
except TypeError:
if type( result ) is list:
return [ tornado.escape.json_encode( r ) for r in result ]
d = { k: getattr( result, k ) for k in result.__dict__ }
return tornado.escape.json_encode( d )
| 5,337,814
|
def relu(shape) -> np.ndarray:
"""
Creates a gaussian distribution numpy array with a mean of 0 and variance of sqrt(2/m).
Arguments:
shape : tuple : A tuple with 2 numbers, specifying size of the numpy array.
Returns:
output : np.ndarray : A uniform numpy array.
"""
return np.random.normal(0, np.sqrt(2/shape[1]), shape)
| 5,337,815
|
def reset_all():
"""Batch reset of batch records."""
_url = request.args.get("url") or request.referrer
task_id = request.form.get("task_id")
task = Task.get(task_id)
try:
count = utils.reset_all_records(task)
except Exception as ex:
flash(f"Failed to reset the selected records: {ex}")
else:
flash(f"{count} {task.task_type.name} records were reset for batch processing.", "info")
return redirect(_url)
| 5,337,816
|
def update_to_report(db, data, section_name, img_path,id):
"""
Update data of report
"""
query = '''UPDATE report
SET data = "{}" ,
section_name = "{}",
image_path = "{}"
WHERE id = "{}" '''.format(data, section_name, img_path, id)
result = get_db_with_actions(db, query)
return(result)
| 5,337,817
|
def keyboard(table, day=None):
"""Handler for showing the keyboard statistics page."""
cols, group = "realkey AS key, COUNT(*) AS count", "realkey"
where = (("day", day),) if day else ()
counts_display = counts = db.fetch(table, cols, where, group, "count DESC")
if "combos" == table:
counts_display = db.fetch(table, "key, COUNT(*) AS count", where,
"key", "count DESC")
events = db.fetch(table, where=where, order="stamp")
for e in events: e["dt"] = datetime.datetime.fromtimestamp(e["stamp"])
stats, collatedevents = stats_keyboard(events, table)
days, input = db.fetch("counts", order="day", type=table), "keyboard"
return bottle.template("heatmap.tpl", locals(), conf=conf)
| 5,337,818
|
def fetch(uri, username='', password=''):
"""Can fetch with Basic Authentication"""
headers = {}
if username and password:
headers['Authorization'] = 'Basic ' + base64.b64encode('%s:%s' % (username, password))
headers['User-Agent'] = 'Twimonial'
f = urlfetch.fetch(uri, headers=headers)
logging.debug('Fetching %s (%s): %d' % (uri, username, f.status_code))
return f
| 5,337,819
|
def update_identity_provider(UserPoolId=None, ProviderName=None, ProviderDetails=None, AttributeMapping=None, IdpIdentifiers=None):
"""
Updates identity provider information for a user pool.
See also: AWS API Documentation
:example: response = client.update_identity_provider(
UserPoolId='string',
ProviderName='string',
ProviderDetails={
'string': 'string'
},
AttributeMapping={
'string': 'string'
},
IdpIdentifiers=[
'string',
]
)
:type UserPoolId: string
:param UserPoolId: [REQUIRED]
The user pool ID.
:type ProviderName: string
:param ProviderName: [REQUIRED]
The identity provider name.
:type ProviderDetails: dict
:param ProviderDetails: The identity provider details to be updated, such as MetadataURL and MetadataFile .
(string) --
(string) --
:type AttributeMapping: dict
:param AttributeMapping: The identity provider attribute mapping to be changed.
(string) --
(string) --
:type IdpIdentifiers: list
:param IdpIdentifiers: A list of identity provider identifiers.
(string) --
:rtype: dict
:return: {
'IdentityProvider': {
'UserPoolId': 'string',
'ProviderName': 'string',
'ProviderType': 'SAML',
'ProviderDetails': {
'string': 'string'
},
'AttributeMapping': {
'string': 'string'
},
'IdpIdentifiers': [
'string',
],
'LastModifiedDate': datetime(2015, 1, 1),
'CreationDate': datetime(2015, 1, 1)
}
}
:returns:
(string) --
(string) --
"""
pass
| 5,337,820
|
def _handle_file(args):
"""
Handle the --file argument.
"""
filename = args.filename
_parse_and_output(filename, args)
| 5,337,821
|
def tsCrossValidationScore(params, series,loss_function=mean_squared_error, nsplits=3, slen=1):
"""
#Parameters:
params : vector of parameters for optimization (three parameters:
alpha, beta, gamma for example
series : dataset with timeseries
sle:
Returns:
error on CrossValidation
"""
# errors array
errors = []
values = series.values
alpha, beta, gamma = params
# set the number of folds for cross-validation
tscv = TimeSeriesSplit(n_splits=nsplits)
# iterating over folds, train model on each, forecast and calculate error
for train, test in tscv.split(values):
model = HoltWinters(series=values[train], slen=slen,
alpha=alpha, beta=beta, gamma=gamma, n_preds=len(test))
model.triple_exponential_smoothing()
predictions = model.result[-len(test):]
actual = values[test]
error = loss_function(predictions, actual)
errors.append(error)
return np.mean(np.array(errors))
| 5,337,822
|
def test_empty_file():
"""
Test empty file
"""
empty_file = 'test/empty.json'
ephemeral = TinyDB(empty_file, storage=EphemeralJSONStorage)
assert ephemeral.get(where('name') == 'Value A') is None
ephemeral.close()
| 5,337,823
|
def handle_exhibition(data: List[Dict[str, str]]):
"""
Iterates over all provided exhibition items and checks the http.StatusOK for
every valid url item.
:param data: list containing exhibition dictionary items.
"""
print("Processing exhibition item links ...")
fails = ""
for idx, item in enumerate(data):
item_num = item['company_name']
info_rune = "•"
# exhibition has a number of 'material_' keys where urls can be referenced
materials = list(filter(lambda cur: cur.startswith("material_"), item.keys()))
materials.append("website")
for url_key in materials:
# check link availability
if check_url := handle_link_item(item, url_key):
# check url and process resulting error messages
if err := handle_url_check(check_url):
info_rune = "x"
fails += f"{err} | Exhibition {item_num} | {url_key}({check_url})\n"
if idx and not idx % 100:
print(f" {idx}")
print(info_rune, end="", flush=True)
print(f"\n{fails}")
| 5,337,824
|
def get_psf_fwhm(psf_template: np.ndarray) -> float:
"""
Fit a symmetric 2D Gaussian to the given ``psf_template`` to
estimate the full width half maximum (FWHM) of the central "blob".
Args:
psf_template: A 2D numpy array containing the unsaturated
PSF template.
Returns:
The FWHM of the PSF template (in pixels).
"""
# Crop PSF template: too large templates (which are mostly zeros) can
# cause problems when fitting them with a 2D Gauss function
psf_cropped = np.copy(psf_template)
if psf_template.shape[0] >= 33 or psf_template.shape[1] >= 33:
psf_cropped = crop_center(psf_cropped, (33, 33))
# Define the grid for the fit
x, y = np.meshgrid(
np.arange(psf_cropped.shape[0]), np.arange(psf_cropped.shape[1])
)
# Create a new Gaussian2D object
center = get_center(psf_cropped.shape)
gaussian = models.Gaussian2D(x_mean=center[0], y_mean=center[1])
# Define auxiliary function for tieing the standard deviations
def tie_stddev(gaussian: Any) -> Any:
return gaussian.y_stddev
# Enforce symmetry: tie standard deviation parameters to same value to
# ensure that the resulting 2D Gaussian is always circular
gaussian.x_stddev.tied = tie_stddev
# Fix the position (= mean) of the 2D Gaussian
gaussian.x_mean.fixed = True
gaussian.y_mean.fixed = True
# Fit the model to the data
fit_p = fitting.LevMarLSQFitter()
gaussian_model = fit_p(gaussian, x, y, np.nan_to_num(psf_cropped))
# Make sure the returned FWHM is positive
return abs(float(gaussian_model.x_fwhm))
| 5,337,825
|
def test_structure_linecache():
"""Linecaching for structuring should work."""
@define
class A:
a: int
c = GenConverter()
try:
c.structure({"a": "test"}, A)
except ValueError:
res = format_exc()
assert "'a'" in res
| 5,337,826
|
def sqrt(number):
"""
Calculate the floored square root of a number
Args:
number(int): Number to find the floored squared root
Returns:
(int): Floored Square Root
"""
assert number >= 0, 'Only square root of positive numbers are valid'
start = 0
end = number
res = None
while start <= end:
middle = (start + end) // 2
square = middle ** 2
next_square = (middle + 1) ** 2
if square <= number and next_square > number:
res = middle
break
if square > number:
end = middle - 1
else:
start = middle + 1
return res
| 5,337,827
|
def add_roi_shared_config(cfg):
"""
Add config for roi shared head
"""
_C = cfg
_C.MODEL.ROI_BOX_HEAD.IN_FEATURES = []
_C.MODEL.ROI_SHARED_HEAD = CN()
_C.MODEL.ROI_SHARED_HEAD.IN_FEATURES = ["p2"]
_C.MODEL.ROI_SHARED_HEAD.POOLER_RESOLUTION = 32
_C.MODEL.ROI_SHARED_HEAD.POOLER_TYPE = "ROIAlignV2"
_C.MODEL.ROI_SHARED_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_SHARED_BLOCK = CN()
_C.MODEL.ROI_SHARED_BLOCK.NAME = "ParsingSharedBlock"
_C.MODEL.ROI_SHARED_BLOCK.ASPP_ON = True
_C.MODEL.ROI_SHARED_BLOCK.ASPP_DIM = 256
_C.MODEL.ROI_SHARED_BLOCK.ASPP_DILATION = (6, 12, 18)
_C.MODEL.ROI_SHARED_BLOCK.NONLOCAL_ON = True
_C.MODEL.ROI_SHARED_BLOCK.NONLOCAL_REDUCTION_RATIO = 0.5
_C.MODEL.ROI_SHARED_BLOCK.NONLOCAL_USE_MAXPOOL = True
_C.MODEL.ROI_SHARED_BLOCK.NONLOCAL_USE_BN = True
_C.MODEL.ROI_SHARED_BLOCK.NUM_CONVS_BEFORE_ASPP_NL = 0
_C.MODEL.ROI_SHARED_BLOCK.NUM_CONVS_AFTER_ASPP_NL = 4
_C.MODEL.ROI_SHARED_BLOCK.CONV_HEAD_DIM = 512
_C.MODEL.ROI_SHARED_BLOCK.CONV_HEAD_KERNEL = 3
_C.MODEL.ROI_DENSEPOSE_PREDICTOR = CN()
_C.MODEL.ROI_DENSEPOSE_PREDICTOR.NAME = "Default"
| 5,337,828
|
def test_cluster_representatives_criterion_flag(analysis, criterion, expected):
"""
Tests the user-defined method of selecting cluster representatives.
Parameters
----------
analysis : Analysis object
Created by a fixture.
criterion : str
cluster_representatives_criterion flag defined by the user.
expected : str
Expected value in the dataframe.
TODO: Manually check expected values and then add them to the test to make sure we're getting the right stuff!
"""
output_folder = "cluster_rep_selection"
csv = os.path.join(output_folder, "top_selections.csv")
analysis.generate_clusters(
path=output_folder,
clustering_type="meanshift",
bandwidth=2.5,
max_top_clusters=1,
representatives_criterion=criterion,
)
df = pd.read_csv(csv)
assert all(
[x in df.columns
for x in [
"Cluster label",
"epoch",
"trajectory",
"Step",
"currentEnergy",
"Binding Energy",
"sasaLig",
]]
)
assert not df.isnull().values.any()
check_remove_folder(output_folder)
| 5,337,829
|
def check_child_update_request_from_parent(
command_msg,
leader_data=CheckType.OPTIONAL,
network_data=CheckType.OPTIONAL,
challenge=CheckType.OPTIONAL,
tlv_request=CheckType.OPTIONAL,
active_timestamp=CheckType.OPTIONAL,
):
"""Verify a properly formatted Child Update Request(from parent) command message.
"""
check_secure_mle_key_id_mode(command_msg, 0x02)
command_msg.assertMleMessageContainsTlv(mle.SourceAddress)
check_mle_optional_tlv(command_msg, leader_data, mle.LeaderData)
check_mle_optional_tlv(command_msg, network_data, mle.NetworkData)
check_mle_optional_tlv(command_msg, challenge, mle.Challenge)
check_mle_optional_tlv(command_msg, tlv_request, mle.TlvRequest)
check_mle_optional_tlv(command_msg, active_timestamp, mle.ActiveTimestamp)
| 5,337,830
|
def repetition_sigmoid(M):
"""
Used to model repetition-driven effects of STDP. More repetitions results in stronger increase/decrease.
"""
return 1.0/(1+np.exp(-0.2*M+10))
| 5,337,831
|
def generate_features(ids0, ids1, forcefield, system, param):
"""
This function performs a minimization of the energy and computes the matrix features.
:param ids0: ids of the atoms for the 1st protein
:param ids1: ids of the atoms for the 2nd protein
:param forcefield: forcefield for OpenMM simulation
:param system: system for OpenMM simulation
:param param: OpenMM parameters
:return: features - to be used by ML models
"""
# sources
# https://en.wikipedia.org/wiki/Electrostatics
# https://en.wikipedia.org/wiki/Lennard-Jones_potential
# https://en.wikipedia.org/wiki/Combining_rules
# constants
eps0 = 8.8541878128e-12 * su.farad * su.meter**-1
e = 1.60217662e-19 * su.coulomb
N = 6.02214179e23 * su.mole**-1 # Avogadro
# scaling factors
k0 = (N * (e*e) / (4.0 * np.pi * eps0))
# get nonbonded interactions parameters for all atoms
# (Lennard-Jones and electrostatics)
epsilon = np.array([a.epsilon for a in param.atoms])
sigma = np.array([a.sigma for a in param.atoms])
charge = np.array([a.charge for a in param.atoms])
# pairwise epsilon with units
E = np.sqrt(epsilon[ids0].reshape(-1, 1) * epsilon[ids1].reshape(1, -1)) * param.atoms[0].uepsilon.unit
# pairwise sigma with units
S = 0.5 * (sigma[ids0].reshape(-1, 1) + sigma[ids1].reshape(1, -1)) * param.atoms[0].usigma.unit
# pairwise partial charges
Q = charge[ids0].reshape(-1, 1) * charge[ids1].reshape(1, -1)
# setup MD engine
integrator = so.LangevinIntegrator(300*su.kelvin, 1/su.picosecond, 0.002*su.picoseconds)
try:
platform = so.Platform.getPlatformByName('CUDA')
except Exception e:
platform = so.Platform.getPlatformByName('CPU')
simulation = so.app.Simulation(param.topology, system, integrator, platform)
# set atom coordinates
simulation.context.setPositions(param.get_coordinates()[0] * su.angstrom)
# minimize energy
simulation.minimizeEnergy()
# get atom coordinates and compute distance matrix between subunits
state = simulation.context.getState(getPositions=True)
xyz = state.getPositions(asNumpy=True)
D = np.linalg.norm(np.expand_dims(
xyz[ids0], 1) - np.expand_dims(xyz[ids1], 0), axis=2) * su.angstrom
# To choose the most relevant residues, we will first choose the pair of atoms with the lowest distance, and then
# extract a submatrix around it. This way we preserve the chain order of the distance matrix.
min_i = np.argmin(D)
min_r, min_c = int(min_i/D.shape[1]), min_i % D.shape[1]
# Number of interacting residues/particles considered relevant to be stored in the features
n_interactions = 256
ids0_min, ids0_max = min_r - n_interactions/2, min_r + n_interactions/2
ids1_min, ids1_max = min_c - n_interactions/2, min_c + n_interactions/2
if ids0_min < 0:
ids0_max -= ids0_min
ids0_min = 0
elif ids0_max >= D.shape[0]:
ids0_min -= ids0_max - D.shape[0] + 1
ids0_max = D.shape[0]-1
if ids1_min < 0:
ids1_max -= ids1_min
ids1_min = 0
elif ids1_max >= D.shape[1]:
ids1_min -= ids1_max - D.shape[1] + 1
ids1_max = D.shape[1]-1
ids0_interacting = np.arange(ids0_min, ids0_max, dtype=np.int32)
ids1_interacting = np.arange(ids1_min, ids1_max, dtype=np.int32)
D = D[np.ix_(ids0_interacting, ids1_interacting)]
S = S[np.ix_(ids0_interacting, ids1_interacting)]
Q = Q[np.ix_(ids0_interacting, ids1_interacting)]
E = E[np.ix_(ids0_interacting, ids1_interacting)]
# compute nonbonded potential energies
U_LJ = (4.0 * E * (np.power(S/D, 12) - np.power(S/D, 6))).value_in_unit(su.kilojoule / su.mole)
U_el = (k0 * Q / D).value_in_unit(su.kilojoule / su.mole)
features = {'U_LJ': U_LJ, 'U_el': U_el, 'D_mat': D}
return features
| 5,337,832
|
def extract_bow_feature_vectors(reviews, dictionary):
"""
Inputs a list of string reviews
Inputs the dictionary of words as given by bag_of_words
Returns the bag-of-words feature matrix representation of the data.
The returned matrix is of shape (n, m), where n is the number of reviews
and m the total number of entries in the dictionary.
Feel free to change this code as guided by Problem 9
"""
num_reviews = len(reviews)
feature_matrix = np.zeros([num_reviews, len(dictionary)])
stopwords = np.loadtxt('stopwords.txt', dtype = str)
for i, text in enumerate(reviews):
word_list = extract_words(text)
for word in stopwords:
word_list = list(filter((word).__ne__, word_list))
word_list_pairs = [(word1, word2) for (word1, word2) in zip(word_list[:-1], word_list[1:])]
for (word1, word2) in word_list_pairs:
if (word1, word2) in dictionary:
# This code gets a non-binary indicator
feature_matrix[i, dictionary[(word1, word2)]] += 1
return feature_matrix
| 5,337,833
|
def model_wcs_header(datamodel, get_sip=False, order=4, step=32):
"""
Make a header with approximate WCS for use in DS9.
Parameters
----------
datamodel : `jwst.datamodels.ImageModel`
Image model with full `~gwcs` in `with_wcs.meta.wcs`.
get_sip : bool
If True, fit a `astropy.modeling.models.SIP` distortion model to the
image WCS.
order : int
Order of the SIP polynomial model.
step : int
For fitting the SIP model, generate a grid of detector pixels every
`step` pixels in both axes for passing through
`datamodel.meta.wcs.forward_transform`.
Returns
-------
header : '~astropy.io.fits.Header`
Header with simple WCS definition: CD rotation but no distortion.
"""
from astropy.io.fits import Header
from scipy.optimize import least_squares
sh = datamodel.data.shape
try:
pipe = datamodel.meta.wcs.pipeline[0][1]
if 'offset_2' in pipe.param_names:
# NIRISS WCS
c_x = pipe.offset_2.value
c_y = pipe.offset_3.value
else:
# Simple WCS
c_x = pipe.offset_0.value
c_y = pipe.offset_1.value
crpix = np.array([-c_x+1, -c_y+1])
except:
crpix = np.array(sh)/2.+0.5
crval = datamodel.meta.wcs.forward_transform(crpix[0], crpix[1])
cdx = datamodel.meta.wcs.forward_transform(crpix[0]+1, crpix[1])
cdy = datamodel.meta.wcs.forward_transform(crpix[0], crpix[1]+1)
header = Header()
header['RADESYS'] = 'ICRS'
header['CTYPE1'] = 'RA---TAN'
header['CTYPE2'] = 'DEC--TAN'
header['CUNIT1'] = header['CUNIT2'] = 'deg'
header['CRPIX1'] = crpix[0]
header['CRPIX2'] = crpix[1]
header['CRVAL1'] = crval[0]
header['CRVAL2'] = crval[1]
cosd = np.cos(crval[1]/180*np.pi)
header['CD1_1'] = (cdx[0]-crval[0])*cosd
header['CD1_2'] = (cdy[0]-crval[0])*cosd
header['CD2_1'] = cdx[1]-crval[1]
header['CD2_2'] = cdy[1]-crval[1]
cd = np.array([[header['CD1_1'], header['CD1_2']], [header['CD2_1'], header['CD2_2']]])
if not get_sip:
return header
#### Fit a SIP header to the gwcs transformed coordinates
v, u = np.meshgrid(np.arange(1,sh[0]+1,step), np.arange(1,sh[1]+1,step))
x, y = datamodel.meta.wcs.forward_transform(u, v)
y -= crval[1]
x = (x-crval[0])*np.cos(crval[1]/180*np.pi)
a_names = []
b_names = []
#order = 4
for i in range(order+1):
for j in range(order+1):
ext = '{0}_{1}'.format(i,j)
if (i+j) > order:
continue
if ext in ['0_0', '0_1','1_0']:
continue
a_names.append('A_'+ext)
b_names.append('B_'+ext)
p0 = np.zeros(4+len(a_names)+len(b_names))
p0[:4] += cd.flatten()
args = (u.flatten(), v.flatten(), x.flatten(), y.flatten(), crpix, a_names, b_names, cd, 0)
# Fit the SIP coeffs
fit = least_squares(_objective_sip, p0, jac='2-point', bounds=(-np.inf, np.inf), method='lm', ftol=1e-08, xtol=1e-08, gtol=1e-08, x_scale=1.0, loss='linear', f_scale=1.0, diff_step=None, tr_solver=None, tr_options={}, jac_sparsity=None, max_nfev=1000, verbose=0, args=args, kwargs={})
# Get the results
args = (u.flatten(), v.flatten(), x.flatten(), y.flatten(), crpix, a_names, b_names, cd, 1)
cd_fit, a_coeff, b_coeff = _objective_sip(fit.x, *args)
# Put in the header
for i in range(2):
for j in range(2):
header['CD{0}_{1}'.format(i+1, j+1)] = cd_fit[i,j]
header['CTYPE1'] = 'RA---TAN-SIP'
header['CTYPE2'] = 'DEC--TAN-SIP'
header['A_ORDER'] = order
for k in a_coeff:
header[k] = a_coeff[k]
header['B_ORDER'] = order
for k in b_coeff:
header[k] = b_coeff[k]
return header
| 5,337,834
|
def create_session():
"""Return a session to be used for database connections
Returns:
Session: SQLAlchemy session object
"""
# Produces integrity errors!
# return _Session()
# db.session is managed by Flask-SQLAlchemy and bound to a request
return db.session
| 5,337,835
|
def meta_to_indexes(meta, table_name=None, model_name=None):
"""Find all the indexes (primary keys) based on the meta data
"""
indexes, pk_field = {}, None
indexes = []
for meta_model_name, model_meta in meta.iteritems():
if (table_name or model_name) and not (table_name == model_meta['Meta'].get('db_table', '') or model_name == meta_model_name):
continue
field_name, field_infodict, score = find_index(model_meta)
indexes.append(('%s.%s' % (meta_model_name, field_name), field_infodict, score))
return indexes
| 5,337,836
|
def index():
"""Render and return the index page.
This is a informational landing page for non-logged-in users, and the corp
homepage for those who are logged in.
"""
success, _ = try_func(auth.is_authenticated)
if success:
module = config.get("modules.home")
if module:
return config.modules[module].home()
return render_template("default_home.mako")
return render_template("landing.mako")
| 5,337,837
|
def test_user_tries_deleting_his_profile_but_it_fails_partially(
rf, user_gql_client, youth_service, berth_service, mocker
):
"""Test an edge case where dry runs passes for all connected services, but the
proper service connection delete fails for a single connected service. All other
connected services should still get deleted.
"""
def mock_delete_gdpr_data(self, api_token, dry_run=False):
if self.service.service_type == ServiceType.BERTH and not dry_run:
raise requests.HTTPError("Such big fail! :(")
mocker.patch.object(
ServiceConnection,
"delete_gdpr_data",
autospec=True,
side_effect=mock_delete_gdpr_data,
)
mocker.patch.object(
TunnistamoTokenExchange, "fetch_api_tokens", return_value=GDPR_API_TOKENS
)
profile = ProfileFactory(user=user_gql_client.user)
ServiceConnectionFactory(profile=profile, service=youth_service)
ServiceConnectionFactory(profile=profile, service=berth_service)
request = rf.post("/graphql")
request.user = user_gql_client.user
executed = user_gql_client.execute(DELETE_MY_PROFILE_MUTATION, context=request)
expected_data = {"deleteMyProfile": None}
assert ServiceConnection.objects.count() == 1
assert ServiceConnection.objects.first().service.service_type == ServiceType.BERTH
assert dict(executed["data"]) == expected_data
assert_match_error_code(executed, CONNECTED_SERVICE_DELETION_FAILED_ERROR)
| 5,337,838
|
def _(origin, category="", default=None):
"""
This function returns the localized string.
"""
return LOCALIZED_STRINGS_HANDLER.translate(origin, category, default)
| 5,337,839
|
def next_month(month: datetime) -> datetime:
"""Find the first day of the next month given a datetime.
:param month: the date
:type month: datetime
:return: The first day of the next month.
:rtype: datetime
"""
dt = this_month(month)
return datetime((dt+_A_MONTH).year, (dt+_A_MONTH).month, 1)
| 5,337,840
|
def walk_json(d, func):
""" Walk over a parsed JSON nested structure `d`, apply `func` to each leaf element and replace it with result
"""
if isinstance(d, Mapping):
return OrderedDict((k, walk_json(v, func)) for k, v in d.items())
elif isinstance(d, list):
return [walk_json(v, func) for v in d]
else:
return func(d)
| 5,337,841
|
def console_map_string_to_font(s, fontCharX, fontCharY):
"""Remap a string of codes to a contiguous set of tiles.
Args:
s (AnyStr): A string of character codes to map to new values.
The null character `'\\x00'` will prematurely end this
function.
fontCharX (int): The starting X tile coordinate on the loaded tileset.
0 is the leftmost tile.
fontCharY (int): The starting Y tile coordinate on the loaded tileset.
0 is the topmost tile.
"""
lib.TCOD_console_map_string_to_font_utf(_unicode(s), fontCharX, fontCharY)
| 5,337,842
|
def getUser():
"""This method will be called if a GET request is made to the /user/ route
It will get the details of a specified user
Parameters
----------
username
the name of the user to get info about
Raises
------
DoesNotExist
Raised if the username provided does not match a user in the database
Returns
-------
String: Either the json of the user, or an error message saying the user doesn't exists
"""
try:
user = User.objects(username=request.args["username"]).get()
return str(user.json())
except DoesNotExist:
return "That user does not exist, please try again :)"
| 5,337,843
|
def _create_ppo_agent(
time_step_spec: types.NestedTensorSpec, action_spec: types.NestedTensorSpec,
preprocessing_layers: types.NestedLayer,
policy_network: types.Network) -> tfa.agents.TFAgent:
"""Creates a ppo_agent."""
actor_network = policy_network(
time_step_spec.observation,
action_spec,
preprocessing_layers=preprocessing_layers,
name='ActorDistributionNetwork')
critic_network = constant_value_network.ConstantValueNetwork(
time_step_spec.observation, name='ConstantValueNetwork')
return ppo_agent.PPOAgent(
time_step_spec,
action_spec,
actor_net=actor_network,
value_net=critic_network)
| 5,337,844
|
def stiffness_matrix_CST(element=tetra_4()):
"""Calculate stiffness matrix for linear elasticity"""
element.volume()
B = strain_matrix_CST(element)
D = material()
print('B')
print(B)
print('V',element.V)
return element.V * np.dot(np.dot(np.transpose(B),D),B)
| 5,337,845
|
def count_down(print_text, count, sleep):
"""
Print text and animation count down.
Args:
print_text (str): Enter the text to be printed.
count (int): Counter number.
sleep (float): How fast the counts is supposed to be.
Returns:
print: Count down animation.
Usage:
.. code::
count_down('Your text ', 20, 0.1)
"""
animation = '\\|/-'
while (count >= 0):
print('\r', print_text, count, animation[count % len(animation)], end = ' ', flush = True)
count -= 1
time.sleep(sleep)
| 5,337,846
|
def test_handling_core_messages(hass):
"""Test handling core messages."""
cloud = MagicMock()
cloud.logout.return_value = mock_coro()
yield from iot.async_handle_cloud(hass, cloud, {
'action': 'logout',
'reason': 'Logged in at two places.'
})
assert len(cloud.logout.mock_calls) == 1
| 5,337,847
|
async def can_action_member(bot, ctx: SlashContext, member: discord.Member) -> bool:
""" Stop mods from doing stupid things. """
# Stop mods from actioning on the bot.
if member.id == bot.user.id:
return False
# Stop mods from actioning one another, people higher ranked than them or themselves.
if member.top_role >= ctx.author.top_role:
role_muted = discord.utils.get(member.guild.roles, id=settings.get_value("role_muted"))
role_restricted = discord.utils.get(member.guild.roles, id=settings.get_value("role_restricted"))
# Enable mods to use /unmute and /unrestrict on others since the role "Muted" and "Restricted" is placed higher than "Staff".
if role_muted in member.roles or role_restricted in member.roles:
return True
return False
# Checking if Bot is able to even perform the action
if member.top_role >= member.guild.me.top_role:
return False
# Allow owner to override all limitations.
if member.id == ctx.guild.owner_id:
return True
# Otherwise, the action is probably valid, return true.
return True
| 5,337,848
|
def CountDatasetStatistics():
"""
Count the subgraphs for the published datasets
"""
# the datasets
input_filenames = {
'Hemi-Brain': 'graphs/hemi-brain-minimum.graph.bz2',
'C. elegans D1': 'graphs/C-elegans-timelapsed-01-minimum.graph.bz2',
'C. elegans D2': 'graphs/C-elegans-timelapsed-02-minimum.graph.bz2',
'C. elegans D3': 'graphs/C-elegans-timelapsed-03-minimum.graph.bz2',
'C. elegans D4': 'graphs/C-elegans-timelapsed-04-minimum.graph.bz2',
'C. elegans D5': 'graphs/C-elegans-timelapsed-05-minimum.graph.bz2',
'C. elegans D6': 'graphs/C-elegans-timelapsed-06-minimum.graph.bz2',
'C. elegans D7': 'graphs/C-elegans-timelapsed-07-minimum.graph.bz2',
'C. elegans D8': 'graphs/C-elegans-timelapsed-08-minimum.graph.bz2',
'C. elegans SH': 'graphs/C-elegans-sex-hermaphrodite-minimum.graph.bz2',
'C. elegans SM': 'graphs/C-elegans-sex-male-minimum.graph.bz2',
}
for dataset, input_filename in input_filenames.items():
# write the dataset
sys.stdout.write('{} '.format(dataset))
# iterate over k
for k in [3, 4, 5, 6, 7]:
# go through all possibilities
nsubgraphs, _ = ReadSummaryStatistics(input_filename, k, False, False, False)
# skip if this doesn't exist
if not nsubgraphs:
sys.stdout.write('& N/A ')
else:
sys.stdout.write('& {} '.format(nsubgraphs))
sys.stdout.write('\\\\ \n')
| 5,337,849
|
def trim_waveform_signal(
tr: obspy.Trace,
cfg: types.ModuleType = config
) -> obspy.Trace:
"""Cut the time series to signal window
Args:
tr: time series
cfg: configuration file
Returns:
tr: trimmed time series
"""
starttime, endtime = signal_window(tr, cfg)
tr.trim(starttime=starttime, endtime=endtime)
return tr
| 5,337,850
|
def animation_plot(
x,
y,
z_data,
element_table,
ani_fname,
existing_fig,
ani_funcargs=None,
ani_saveargs=None,
kwargs=None,
):
"""
Tricontourf animation plot.
Resulting file will be saved to MP4
"""
global tf
# Subtract 1 from element table to align with Python indexing
t = tri.Triangulation(x, y, element_table - 1)
# Preassign fig and ax
if existing_fig is None:
fig, ax, tf = filled_mesh_plot(x, y, z_data[:, 0], element_table, **kwargs)
else:
fig, ax, tf = existing_fig
# animation function
def animate(i):
global tf
z = z_data[:, i]
for c in tf.collections:
c.remove() # removes only the contours, leaves the rest intact
tf = ax.tricontourf(t, z, **kwargs)
anim = animation.FuncAnimation(fig, animate, frames=z_data.shape[1], repeat=True,)
anim.save(ani_fname, writer=animation.FFMpegWriter(**ani_funcargs), **ani_saveargs)
return fig, ax, tf
| 5,337,851
|
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
# adj_appr = np.array(sp.csr_matrix.todense(adj))
# # adj_appr = dense_lanczos(adj_appr, 100)
# adj_appr = dense_RandomSVD(adj_appr, 100)
# if adj_appr.sum(1).min()<0:
# adj_appr = adj_appr- (adj_appr.sum(1).min()-0.5)*sp.eye(adj_appr.shape[0])
# else:
# adj_appr = adj_appr + sp.eye(adj_appr.shape[0])
# adj_normalized = normalize_adj(adj_appr)
# adj_normalized = normalize_adj(adj+sp.eye(adj.shape[0]))
# adj_appr = np.array(sp.coo_matrix.todense(adj_normalized))
# # adj_normalized = dense_RandomSVD(adj_appr,100)
# adj_normalized = dense_lanczos(adj_appr, 100)
adj_normalized = normalize_adj(sp.eye(adj.shape[0]) + adj)
# adj_normalized = sp.eye(adj.shape[0]) + normalize_adj(adj)
return sparse_to_tuple(adj_normalized)
| 5,337,852
|
def plot_figure_one_input_resource_2(style_label=""):
"""
Plot two bar graphs side by side, with letters as x-tick labels.
latency_dev_num_non_reuse.log
"""
prng = np.random.RandomState(96917002)
#plt.set_cmap('Greys')
#plt.rcParams['image.cmap']='Greys'
# Tweak the figure size to be better suited for a row of numerous plots:
# double the width and halve the height. NB: use relative changes because
# some styles may have a figure size different from the default one.
(fig_width, fig_height) = plt.rcParams['figure.figsize']
fig_size = [fig_width * 1.8, fig_height / 2]
fig, ax = plt.subplots(ncols=1, nrows=1, num=style_label, figsize=fig_size, squeeze=True)
plt.set_cmap('Greys')
ax.set_ylabel("Latency (s)", fontsize=larger_size)
ax.set_xlabel("Number of devices", fontsize=larger_size)
grid = "3x3"
config = ["MR-BODPv2", "WST-FGP-R"]
#np.array(load_data_vector(config[0]+"/" + grid + "/single_resource/commu_size_steal.log"))+
#np.array(load_data_vector(config[0] + "/" + grid + "/single_resource/commu_size_gateway.log"))
y1 = load_data_vector(config[0] + "/single_resource/latency.log")
y2 = load_data_vector(config[1] + "/" + grid + "/single_resource/latency.log")
x = np.arange(len(y1))
print x
width = 0.2
latency1 = ax.bar(x-0.5*width, y1, width, label='MR-BODP', color=[0.4, 0.4, 0.4], edgecolor =[0, 0, 0])
latency2 = ax.bar(x+0.5*width, y2, width, label='WST-FGP (Shuffle)', color=[0.8, 0.8, 0.8], edgecolor =[0, 0, 0], hatch='//')
ax.set_xticks(x)
ax.set_xticklabels(['1','2','3','4','5','6'])
ax.set_xlim([-0.5,len(x)-0.3])
ax.set_ylim([0, 30])
plt.tick_params(labelsize=larger_size)
y1 = np.array(load_data_vector(config[0]+ "/single_resource/commu_size.log"))
y2 = np.array(load_data_vector(config[1]+"/" + grid + "/single_resource/commu_size_steal.log"))+np.array(load_data_vector(config[1] + "/" + grid + "/single_resource/commu_size_gateway.log"))
ax2 = ax.twinx()
comm1 = ax2.plot(x-width, y1, label='MR-BODP', linestyle='-.', linewidth=4, color=[0.4, 0.4, 0.4], marker="s", markersize=16)
comm2 = ax2.plot(x+width, y2, label='WST-FGP (Shuffle)', linestyle='-.', linewidth=4, color=[0.8, 0.8, 0.8], marker="<", markersize=16)
ax2.set_ylabel("Commu. size (MB)", fontsize=larger_size)
ax2.set_xticklabels(['1','2','3','4','5','6'])
ax2.set_ylim([-30, 25])
ax2.set_yticks([0, 10, 20])
plt.tick_params(labelsize=larger_size)
#plt.legend(loc=9, ncol=4, bbox_to_anchor=(0.5, 1.16), framealpha=1, prop={'size': larger_size})
plt.figlegend((latency1[0], comm1[0], latency2[0], comm2[0]), ('MR-BODP',' ', 'WST-FGP ('+grid+' Shuffle)', ' '), loc=9, ncol=2, bbox_to_anchor=(0.5, 1), framealpha=1, prop={'size': larger_size})
#fig.tight_layout()
return fig
| 5,337,853
|
def comp_rot_dir(self):
"""Compute the rotation direction of the winding
Parameters
----------
self : LamSlotWind
A LamSlotWind object
Returns
-------
rot_dir : int
-1 or +1
"""
MMF = self.comp_mmf_unit()
p = self.get_pole_pair_number()
# Compute rotation direction from unit mmf
results = MMF.get_harmonics(1, "freqs", "wavenumber")
H1 = results[MMF.symbol]
return sign(H1[0])
| 5,337,854
|
def search_pk(uuid):
"""uuid can be pk."""
IterHarmonicApprox = WorkflowFactory("phonopy.iter_ha")
qb = QueryBuilder()
qb.append(IterHarmonicApprox, tag="iter_ha", filters={"uuid": {"==": uuid}})
PhonopyWorkChain = WorkflowFactory("phonopy.phonopy")
qb.append(PhonopyWorkChain, with_incoming="iter_ha")
qb.order_by({PhonopyWorkChain: {"ctime": "asc"}})
pks = [n[0].pk for n in qb.all() if n[0].is_finished_ok]
return pks
| 5,337,855
|
def _region_bulk(mode='full', scale=.6):
"""
Estimate of the temperature dependence of bulk viscosity zeta/s.
"""
plt.figure(figsize=(scale*textwidth, scale*aspect*textwidth))
ax = plt.axes()
def zetas(T, zetas_max=0, zetas_width=1):
return zetas_max / (1 + ((T - Tc)/zetas_width)**2)
chain = mcmc.Chain()
keys, ranges = map(list, zip(*(
i for i in zip(chain.keys, chain.range)
if i[0].startswith('zetas')
)))
T = Tc*np.linspace(.5, 1.5, 1000)
maxdict = {k: r[1] for k, r in zip(keys, ranges)}
ax.fill_between(
T, zetas(T, **maxdict),
label='Prior range',
**region_style
)
ax.set_xlim(T[0], T[-1])
ax.set_ylim(0, 1.05*maxdict['zetas_max'])
auto_ticks(ax, minor=2)
ax.set_xlabel('Temperature [GeV]')
ax.set_ylabel(r'$\zeta/s$')
if mode == 'empty':
return
if mode == 'examples':
for args in [
(.025, .01),
(.050, .03),
(.075, .05),
]:
ax.plot(T, zetas(T, *args), color=plt.cm.Blues(.7))
return
# use a Gaussian mixture model to classify zeta/s parameters
samples = chain.load(*keys, thin=10)
gmm = GaussianMixture(n_components=3, covariance_type='full').fit(samples)
labels = gmm.predict(samples)
for n in range(gmm.n_components):
params = dict(zip(
keys,
(mcmc.credible_interval(s)[1] for s in samples[labels == n].T)
))
if params['zetas_max'] > .05:
cmap = 'Blues'
elif params['zetas_width'] > .03:
cmap = 'Greens'
else:
cmap = 'Oranges'
curve = zetas(T, **params)
color = getattr(plt.cm, cmap)(.65)
ax.plot(T, curve, color=color, zorder=-10)
ax.fill_between(T, curve, color=color, alpha=.1, zorder=-20)
ax.legend(loc='upper left')
| 5,337,856
|
def training(args):
"""Train the model and frequently print the loss and save checkpoints."""
dataset = tf.data.TextLineDataset([args.corpus])
dataset = dataset.map(
lambda line: tf.decode_raw(line, tf.uint8))
dataset = dataset.flat_map(
lambda line: chunk_sequence(line, args.chunk_length))
dataset = dataset.cache().shuffle(buffer_size=1000).batch(args.batch_size)
dataset = dataset.repeat().prefetch(1)
chunks, length = dataset.make_one_shot_iterator().get_next()
model = RecurrentLanguageModel(args.num_layers, args.num_units)
optimize = model.optimize(chunks, length, args.learning_rate)
step = tf.train.get_or_create_global_step()
increment_step = step.assign_add(1)
with initialize_session(args.logdir) as (sess, saver):
while True:
if sess.run(step) >= args.total_steps:
print('Training complete.')
break
loss_value, step_value = sess.run([optimize, increment_step])
if step_value % args.log_every == 0:
print('Step {} loss {}.'.format(step_value, loss_value))
if step_value % args.checkpoint_every == 0:
print('Saving checkpoint.')
saver.save(sess, os.path.join(args.logdir, 'model.ckpt'), step_value)
| 5,337,857
|
def bias(struct,subover=True,trim=True, subbias=False, bstruct=None,
median=False, function='polynomial',order=3,rej_lo=3,rej_hi=3,niter=10,
plotover=False, log=None, verbose=True):
"""Bias subtracts the bias levels from a frame. It will fit and subtract the overscan
region, trim the images, and subtract a master bias if required.
struct--image structure
subover--subtract the overscan region
trim--trim the image
subbias--subtract master bias
bstruct--master bias image structure
median--use the median instead of mean in image statistics
function--form to fit to the overscan region
order--order for the function
rej_lo--sigma of low points to reject in the fit
rej_hi--sigma of high points to reject in the fit
niter--number of iterations
log--saltio log for recording information
verbose--whether to print to stdout
"""
infile=saltkey.getimagename(struct[0])
# how many extensions?
nsciext = saltkey.get('NSCIEXT',struct[0])
nextend = saltkey.get('NEXTEND',struct[0])
nccd = saltkey.get('NCCDS',struct[0])
# how many amplifiers?--this is hard wired
amplifiers = 2 * nccd
#log the process
if subover and log:
message = '%28s %7s %5s %4s %6s' % \
('HDU','Overscan','Order','RMS','Niter')
log.message('\n --------------------------------------------------',
with_header=False, with_stdout=verbose)
log.message(message, with_header=False, with_stdout=verbose)
log.message(' --------------------------------------------------',
with_header=False, with_stdout=verbose)
if (plotover):
plt.figure(1)
plt.axes([0.1,0.1,0.8,0.8])
plt.xlabel('CCD Column')
plt.ylabel('Pixel Counts (e-)')
plt.ion()
#loop through the extensions and subtract the bias
for i in range(1,nsciext+1):
if struct[i].name=='SCI':
#get the bias section
biassec = saltkey.get('BIASSEC',struct[i])
y1,y2,x1,x2 = saltio.getSection(biassec, iraf_format=True)
#get the data section
datasec = saltkey.get('DATASEC',struct[i])
dy1,dy2, dx1, dx2 = saltio.getSection(datasec, iraf_format=True)
#setup the overscan region
if subover:
yarr=np.arange(y1,y2, dtype=float)
data=struct[i].data
odata=struct[i].data[y1:y2,x1:x2]
if median:
odata=np.median((struct[i].data[y1:y2,x1:x2]),axis=1)
olevel=np.median((struct[i].data[y1:y2,x1:x2]))
saltkey.new('OVERSCAN','%f' % (olevel),'Overscan median value', struct[i])
else:
odata=np.mean((struct[i].data[y1:y2,x1:x2]),axis=1)
olevel=np.mean((struct[i].data[y1:y2,x1:x2]))
saltkey.new('OVERSCAN','%f' % (olevel),'Overscan mean value', struct[i])
#fit the overscan region
ifit=saltfit.interfit(yarr, odata, function=function, \
order=order, thresh=rej_hi, niter=niter)
try:
ifit.interfit()
coeffs=ifit.coef
ofit=ifit(yarr)
omean, omed, osigma=saltstat.iterstat((odata-ofit), sig=3, niter=5)
except ValueError:
#catch the error if it is a zero array
ofit=np.array(yarr)*0.0
osigma=0.0
except TypeError:
#catch the error if it is a zero array
ofit=np.array(yarr)*0.0
osigma=0.0
#subtract the overscan region
for j in range(len(struct[i].data[0])):
struct[i].data[y1:y2,j] -= ofit
#report the information
if log:
message = '%25s[%1d] %8.2f %3d %7.2f %3d' % \
(infile, i, olevel, order, osigma, niter)
log.message(message, with_stdout=verbose, with_header=False)
#add the statistics to the image header
saltkey.new('OVERRMS','%f' % (osigma),'Overscan RMS value', struct[i])
#update the variance frame
if saltkey.found('VAREXT', struct[i]):
vhdu=saltkey.get('VAREXT', struct[i])
try:
vdata=struct[vhdu].data
#The bias level should not be included in the noise from the signal
for j in range(len(struct[i].data[0])):
vdata[y1:y2,j] -= ofit
#add a bit to make sure that the minimum error is the rednoise
rdnoise= saltkey.get('RDNOISE',struct[i])
vdata[vdata<rdnoise**2]=rdnoise**2
struct[vhdu].data=vdata+osigma**2
except Exception as e:
msg='Cannot update the variance frame in %s[%i] because %s' % (infile, vhdu, e)
raise SaltError(msg)
#plot the overscan region
if plotover:
plt.plot(yarr, odata)
plt.plot(yarr, ofit)
#trim the data and update the headers
if trim:
struct[i].data=struct[i].data[dy1:dy2,dx1:dx2]
datasec = '[1:'+str(dx2-dx1)+',1:'+str(dy2-dy1)+']'
saltkey.put('DATASEC',datasec,struct[i])
#update the variance frame
if saltkey.found('VAREXT', struct[i]):
vhdu=saltkey.get('VAREXT', struct[i])
struct[vhdu].data=struct[vhdu].data[dy1:dy2,dx1:dx2]
datasec = '[1:'+str(dx2-dx1)+',1:'+str(dy2-dy1)+']'
saltkey.put('DATASEC',datasec,struct[vhdu])
#update the BPM frame
if saltkey.found('BPMEXT', struct[i]):
bhdu=saltkey.get('BPMEXT', struct[i])
struct[bhdu].data=struct[bhdu].data[dy1:dy2,dx1:dx2]
datasec = '[1:'+str(dx2-dx1)+',1:'+str(dy2-dy1)+']'
saltkey.put('DATASEC',datasec,struct[bhdu])
#subtract the master bias if necessary
if subbias and bstruct:
struct[i].data -= bstruct[i].data
#update the variance frame
if saltkey.found('VAREXT', struct[i]):
vhdu=saltkey.get('VAREXT', struct[i])
try:
vdata=struct[vhdu].data
struct[vhdu].data=vdata+bstruct[vhdu].data
except Exception as e:
msg='Cannot update the variance frame in %s[%i] because %s' % (infile, vhdu, e)
raise SaltError(msg)
if plotover:
plt.ioff()
plt.show()
return struct
| 5,337,858
|
def _add_centered_square(ax, xy, area, lablz, text_size, **kwargs):
"""
create hinton diagram element square with variable size according to weight matrix element value
"""
size = np.sqrt(area)
textz = fto_texts(lablz)
loc = np.asarray(xy) - size/2.
rect = mpatches.Rectangle(loc, size, size, **kwargs)
label = ax.annotate(textz, xy=loc + size/2., fontsize=text_size, ha='center', va='center')
ax.add_patch(rect)
| 5,337,859
|
def main(args={}, version=False, arguments=False):
"""set up user2edd
set up config file(s)
set up logger
:param checkOnto_: running checkOntology or not
"""
# init default
_default_logger()
# setup package path
_setup_path()
# read configuration file(s)
config = _setup_cfg()
# cli.py
# # read command line arguments
# args = _parse()
if args:
version = args.version
arguments = args.arguments
if version:
_show_version()
# overwrite configuration file parameter with parser arguments
config.set_args(args, dots=True)
# read logging configuration file
_setup_logger(config)
# check configuration file
_chk_config(config)
# print parameters use from config file and/or inline command
_show_arguments(config, arguments)
| 5,337,860
|
def help_command(update: Update, context: CallbackContext) -> None:
"""Send a message when the command /help is issued."""
update.message.reply_text('Тебе уже не помочь')
| 5,337,861
|
def main(input_dir: Path, resample: bool):
"""Process the BAVED dataset at location INPUT_DIR."""
paths = list(input_dir.glob("?/*.wav"))
resample_dir = Path("resampled")
if resample:
resample_audio(paths, resample_dir)
write_filelist(resample_dir.glob("*.wav"), "files_all")
levels = {}
speakers = {}
genders = {}
ages = {}
words = {}
for path in paths:
spk, gen, age, word, level, _ = path.stem.split("-")
levels[path.stem] = emotion_level[int(level)]
speakers[path.stem] = spk
genders[path.stem] = gen.upper()
ages[path.stem] = int(age)
words[path.stem] = word_map[int(word)]
write_annotations(levels, "level")
write_annotations(speakers, "speaker")
write_annotations(genders, "gender")
write_annotations(ages, "age")
write_annotations(words, "word")
write_annotations({p.stem: "ar" for p in paths}, "language")
| 5,337,862
|
def _BAIL( functionName, message ):
"""
Universal failure mode message.
"""
print( "= = ERROR in function %s : %s" % ( functionName, message), file=sys.stderr )
sys.exit(1)
| 5,337,863
|
def getItemStatus(selected_item, store_id, num_to_average):
""" Method pulls the stock status of the selected item in the given store
:param selected_item: current item being processed (toilet paper or hand sanitizer)
:param store_id: id of the current store
:param num_to_average: number of recent status updates to include in the cumulative moving average status update
:return: returns the status of the item (integer between 1-5)
"""
db = mysql.connect()
cursor = db.cursor()
query = "SELECT rating FROM status_list WHERE id = '" + str(store_id) + "' AND item = " + str(selected_item) +";"
cursor.execute(query)
status_values = []
status = cursor.fetchall()
moving_average = 0
for i in range(len(status)):
status_values.append(5-(status[i][0])+1)
if len(status_values) != 0:
for i in range(min(len(status_values),num_to_average)):
moving_average += status_values[i]
moving_average = moving_average/min(num_to_average, len(status_values))
cursor.close()
db.close()
return round(moving_average)
| 5,337,864
|
def iter_file(file_path, options={}):
"""Opens a file at the given file_path and iterates over its
contents, yielding FecItem instances, which consist of data and
data_type attributes. The data_type attribute can be one of "header",
"summary", "itemization", "text", or "F99_text". The data attribute is a
dictionary for all data types except for "F99_text", for which it is a
string. This method avoids loading the entire filing into memory, as the
from_file method does.
"""
with open(file_path, 'r') as file:
for item in fecparser.iter_lines(file, options=options):
yield item
| 5,337,865
|
def assert_equal(obj1, obj2, save_if_different=False, msg='', name1='value1', name2='value2', ignore_keys=None,
**kwargs):
"""
This will assert obj1 and obj2 are the same and it will attempt to report the difference.
NOTE*** If this is different and the objects are complicated then use the save_if_different and use beyond compare
The files will be saved in the _scratch folder
Use exclusive_list to add the keys that you don't want to compare
:param msg:
:param obj1: obj of comparison 1
:param obj2: obj of comparison 2
:param save_if_different: bool if to save to a file the differences
:param name1: str name of the first object,
:param name2: str name of the second object,
:param ignore_keys: list of parameters to exclude from the comparison
:param kwargs: dict of args for diff obj
:return: None
"""
global _index_count
diff = diff_obj(obj1, obj2, ignore_keys=ignore_keys, **kwargs)
if diff:
report = '\n\n'.join(
['obj%s\n %s = %s\n %s = %s' % (key, name1, repr(value1), name2, repr(value2))
for key, value1, value2 in diff])
if save_if_different:
try:
assert os.path.exists(FOLDER), 'Scratch Folder %s does not exist' % FOLDER
with open('%s/%s-%s.json' % (FOLDER, _index_count, name1), 'w') as fn:
pprint(obj1, stream=fn, indent=2, depth=10, width=80)
with open('%s/%s-%s.json' % (FOLDER, _index_count, name2), 'w') as fn:
pprint(obj2, stream=fn, indent=2, depth=10, width=80)
_index_count += 1
except Exception as e:
log.error('assert_equal exception in save different %s' % e)
if msg:
msg += '\n'
raise Exception(msg + 'Data different\n' + report)
| 5,337,866
|
def filter_and_copy_table(tab, to_remove):
""" Filter and copy a FITS table.
Parameters
----------
tab : FITS Table object
to_remove : [int ...}
list of indices to remove from the table
returns FITS Table object
"""
nsrcs = len(tab)
mask = np.zeros((nsrcs), '?')
mask[to_remove] = True
inv_mask = np.invert(mask)
out_tab = tab[inv_mask]
return out_tab
| 5,337,867
|
def _archive_logs(conn, node_type, logger, node_ip):
"""Creates an archive of all logs found under /var/log/cloudify plus
journalctl.
"""
archive_filename = 'cloudify-{node_type}-logs_{date}_{ip}.tar.gz'.format(
node_type=node_type,
date=get_host_date(conn),
ip=node_ip
)
archive_path = '/tmp/{}'.format(archive_filename)
journalctl_destination_path = '/var/log/cloudify/journalctl.log'
conn.sudo(
'bash -c "journalctl > /tmp/jctl && mv /tmp/jctl {0}"'
.format(journalctl_destination_path),
)
logger.info('Creating logs archive in {0}: {1}'.format(node_type,
archive_path))
conn.sudo(
'tar -czf {0} -C /var/log cloudify '
'--warning=no-file-changed'.format(archive_path),
warn=True
)
conn.run('test -e {0}'.format(archive_path))
conn.sudo('rm {0}'.format(journalctl_destination_path))
return archive_path
| 5,337,868
|
def _check_n_dim(x, n_dims):
"""Raise error if the number of dimensions of the input data is not consistent with the expected value.
Args:
x (array-like tensor): input data, shape (n_samples, I_1, I_2, ..., I_N)
n_dims (int): number of dimensions expected, i.e. N+1
"""
if not x.ndim == n_dims:
error_msg = "The expected number of dimensions is %s but it is %s for given data" % (n_dims, x.ndim)
logging.error(error_msg)
raise ValueError(error_msg)
| 5,337,869
|
def reset():
"""Reset your Twitter auth information.
"""
app_dir = click.get_app_dir(APP_NAME)
path = os.path.join(app_dir, 'config.ini')
if os.path.exists(path): os.remove(path)
click.echo('Configuration has been reset.')
| 5,337,870
|
def get_scripts():
"""Returns the list of available scripts
Returns:
A dict holding the result message
"""
return Response.ok("Script files successfully fetched.", {
"scripts": list_scripts()
})
| 5,337,871
|
async def invoke(
fn: callbacks.BaseFn,
*args: Any,
settings: Optional[configuration.OperatorSettings] = None,
cause: Optional[causation.BaseCause] = None,
**kwargs: Any,
) -> Any:
"""
Invoke a single function, but safely for the main asyncio process.
Used mostly for handler functions, and potentially slow & blocking code.
Other callbacks are called directly, and are expected to be synchronous
(such as handler-selecting (lifecycles) and resource-filtering (``when=``)).
A full set of the arguments is provided, expanding the cause to some easily
usable aliases. The function is expected to accept ``**kwargs`` for the args
that it does not use -- for forward compatibility with the new features.
The synchronous methods are executed in the executor (threads or processes),
thus making it non-blocking for the main event loop of the operator.
See: https://pymotw.com/3/asyncio/executors.html
"""
if is_async_fn(fn):
kwargs = build_kwargs(cause=cause, _sync=False, **kwargs)
result = await fn(*args, **kwargs) # type: ignore
else:
kwargs = build_kwargs(cause=cause, _sync=True, **kwargs)
# Not that we want to use functools, but for executors kwargs, it is officially recommended:
# https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.run_in_executor
real_fn = functools.partial(fn, *args, **kwargs)
# Copy the asyncio context from current thread to the handlr's thread.
# It can be copied 2+ times if there are sub-sub-handlers (rare case).
context = contextvars.copy_context()
real_fn = functools.partial(context.run, real_fn)
# Prevent orphaned threads during daemon/handler cancellation. It is better to be stuck
# in the task than to have orphan threads which deplete the executor's pool capacity.
# Cancellation is postponed until the thread exits, but it happens anyway (for consistency).
# Note: the docs say the result is a future, but typesheds say it is a coroutine => cast()!
loop = asyncio.get_event_loop()
executor = settings.execution.executor if settings is not None else None
future = cast(aiotasks.Future, loop.run_in_executor(executor, real_fn))
cancellation: Optional[asyncio.CancelledError] = None
while not future.done():
try:
await asyncio.shield(future) # slightly expensive: creates tasks
except asyncio.CancelledError as e:
cancellation = e
if cancellation is not None:
raise cancellation
result = future.result()
return result
| 5,337,872
|
def test_null_count():
""" method to test null count, should return 0 nulls """
assert test_null.null_count(s1) == 0
| 5,337,873
|
def getLatestFare(_origin, _destination, _date):
"""
_origin and _destination take airport codes , e.g. BLR for Bangalore
_date in format YYYY-MM-DD e.g.2016-10-30
Returns either:
10 latest results from the results page.
1 lastest result from the results page.
"""
try:
_url = base_url.format(_origin, _destination, _date)
soup = getSoup(_url)
fare_list = soup.find('ul').find_all('li',{"class":["active","not-active"]})
fares = []
for fare in fare_list:
fares.append({'price':getPrice(fare.find('a').find('div').findChildren()[2].string),'date':fare.find('a').find('div').findChildren()[0].string})
except Exception:
sys.exit("No Route found.")
return fares
| 5,337,874
|
def plot_noisy_linear_1d(axes, num_samples, weights, sigma, limits, rng):
"""
Generate and plot points from a noisy single-feature linear model,
along with a line showing the true (noiseless) relationship.
# Arguments
axes: a Matplotlib Axes object into which to plot
num_samples: number of samples to generate
(ie, the number of rows in the returned X
and the length of the returned y)
weights: vector defining the model
(including a bias term at index 0)
sigma: standard deviation of the additive noise
limits: a tuple (low, high) specifying the value
range of all the input features x_i
rng: an instance of numpy.random.Generator
from which to draw random numbers
# Returns
None
"""
assert(len(weights)==2)
X, y = generate_noisy_linear(num_samples, weights, sigma, limits, rng)
# TODO: do the plotting
utils.plot_unimplemented ( axes, 'Noisy 1D Linear Model' )
| 5,337,875
|
def home():
"""Renders the home page."""
return render_template(
'index.html',
title='Rococal',
year=datetime.now().year,
)
| 5,337,876
|
def Dx(x):
"""Nombre de survivants actualisés.
Args:
x: l'âge.
Returns:
Nombre de survivants actualisés.
"""
return lx(x)*v**x
| 5,337,877
|
def tracek(k,aee,aii,see,sii,tau=1,alpha=0):
""" Trace of recurrently connected network of E,I units, analytically determined
input:
k: spatial frequency
aee: ampltidue E to E connectivity
aii: ampltidue I to I connectivity
see: standard deviation/width of E to E connectivity
sii: standard deviation/width of I to I connectivity
tau: ratio of excitatory to inhibitory time constant, default is 1
alpha: float, 0<=alpha<=1, strength of self-inhibitory connections
"""
aii_s = aii*(1-alpha)
aii_a = aii*alpha
return -1 - (1 + aii_s*H(k,sii) + aii_a)/tau + aee*H(k,see)
| 5,337,878
|
def set_working_dir_repo_root(func):
"""
Decorator for checking whether the
current working dir is set as root of repo.
If not, changes the working dir to root of repo
Returns
-------
"""
def inner(*args, **kwargs):
git_repo = git.Repo(".", search_parent_directories=True)
git_root = git_repo.working_tree_dir
if os.getcwd() != git_root:
logger.info(
f"current working dir: {os.getcwd()},"
f"is not correctly set as repo root, "
f"so changing to {git_root}"
)
os.chdir(git_root)
else:
logger.info(
f"current working dir correctly set as" f" repo root {os.getcwd()}"
)
result = func(*args, **kwargs)
return result
return inner
| 5,337,879
|
def get_verbose_name(model_or_queryset, field):
"""
returns the value of the ``verbose_name`` of a field
typically used in the templates where you can have a dynamic queryset
:param model_or_queryset: target object
:type model_or_queryset: :class:`django.db.models.Model`, :class:`django.db.query.Queryset`
:param field: field to get the verbose name
:type field: :class:`django.db.models.Field`, basestring
:return: translated field verbose name
:rtype: unicode
Valid uses:
>>> from django.contrib.auth.models import User, Permission
>>> user = User()
>>> p = Permission()
>>> print unicode(get_verbose_name(user, 'username'))
username
>>> print unicode(get_verbose_name(User, 'username'))
username
>>> print unicode(get_verbose_name(User.objects.all(), 'username'))
username
>>> print unicode(get_verbose_name(User.objects, 'username'))
username
>>> print unicode(get_verbose_name(User.objects, user._meta.get_field_by_name('username')[0]))
username
>>> print unicode(get_verbose_name(p, 'content_type.model'))
python model class name
>>> get_verbose_name(object, 'aaa')
Traceback (most recent call last):
...
ValueError: `get_verbose_name` expects Manager, Queryset or Model as first parameter (got <type 'type'>)
"""
if isinstance(model_or_queryset, models.Manager):
model = model_or_queryset.model
elif isinstance(model_or_queryset, QuerySet):
model = model_or_queryset.model
elif isinstance(model_or_queryset, models.Model):
model = model_or_queryset
elif type(model_or_queryset) is models.base.ModelBase:
model = model_or_queryset
else:
raise ValueError('`get_verbose_name` expects Manager, Queryset or Model as first parameter (got %s)' % type(
model_or_queryset))
if isinstance(field, basestring):
field = get_field_by_path(model, field)
elif isinstance(field, models.Field):
field = field
else:
raise ValueError('`get_verbose_name` field_path must be string or Field class')
return field.verbose_name
| 5,337,880
|
def if_then_else(cond, t, f, span=None):
"""Conditional selection expression.
Parameters
----------
cond : PrimExpr
The condition
t : PrimExpr
The result expression if cond is true.
f : PrimExpr
The result expression if cond is false.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
result : Node
The result of conditional expression.
Note
----
Unlike Select, if_then_else will not execute
the branch that does not satisfy the condition.
You can use it to guard against out of bound access.
Unlike Select, if_then_else cannot be vectorized
if some lanes in the vector have different conditions.
"""
return _ffi_api._OpIfThenElse(convert(cond), convert(t), convert(f), span)
| 5,337,881
|
def get_args() -> argparse.Namespace:
"""Get arguments."""
parser = argparse.ArgumentParser(description="Dump Instance")
parser.add_argument(
"network_state_path", type=str, help="File path to network state dump JSON."
)
parser.add_argument("--host", type=str, help="Host to bind to", default="127.0.0.1")
parser.add_argument(
"--port", type=int, help="Port to run on (defaults to 3000)", default=3000
)
parser.add_argument(
"--log-level",
type=str.upper,
help="Log level",
default="INFO",
choices=["DEBUG", "INFO", "WARNING", "ERROR"],
)
parser.add_argument(
"--events-to-replay-path",
type=str,
help=(
"File path to events to replay JSON. Events provided by "
"--combined-replay-dump-path option will be first, followed by events "
"from this file."
),
default=None,
)
parser.add_argument(
"--command-results-path",
type=str,
help=(
"File path to command result JSON. Command results provided by "
"--combined-replay-dump-path option will be first, followed by results "
"from this file."
),
default=None,
)
parser.add_argument(
"--combined-replay-dump-path",
type=str,
help=(
"File path to the combined event and command result dump JSON. Events and "
"command results will be extracted in the order they were received."
),
default=None,
)
return parser.parse_args()
| 5,337,882
|
def get_resources(filetype):
"""Find all HTML template or JavaScript files in the package.
Caches the results for quick access.
Parameters
----------
filetype : {'templates', 'js'}
The type of file resource needed.
Returns
-------
:class:`dict`
A dictionary mapping filename to the contents of the file.
Raises
------
ValueError
If `filetype` is unknown.
"""
global _resource_cache
if filetype not in _resource_cache:
raise ValueError("Unknown filetype '{0}' for get_resources()!".format(filetype))
if _resource_cache[filetype] is None:
_resource_cache[filetype] = dict()
for f in resource_listdir('prospect', filetype):
if not f.startswith("."):
_resource_cache[filetype][f] = resource_string('prospect', filetype + '/' + f).decode('utf-8')
return _resource_cache[filetype]
| 5,337,883
|
def sum_fn(xnum, ynum):
""" A function which performs a sum """
return xnum + ynum
| 5,337,884
|
def _process_gamemap_intents(intent: InputIntentType):
"""
Process intents for the player turn game state.
"""
player = world.get_player()
position = world.get_entitys_component(player, Position)
possible_move_intents = [InputIntent.DOWN, InputIntent.UP, InputIntent.LEFT, InputIntent.RIGHT]
possible_skill_intents = [InputIntent.SKILL0, InputIntent.SKILL1, InputIntent.SKILL2, InputIntent.SKILL3,
InputIntent.SKILL4, InputIntent.SKILL5]
## Player movement
if intent in possible_move_intents and position:
direction = _get_pressed_direction(intent)
target_tile = world.get_tile((position.x, position.y))
possible_moves = [Direction.UP, Direction.DOWN, Direction.LEFT, Direction.RIGHT]
if direction in possible_moves:
_process_skill_use(player, Move, target_tile, direction)
## Use a skill
elif intent in possible_skill_intents and position:
skill_name = _get_pressed_skills_name(intent)
current_tile = world.get_tile((position.x, position.y))
if skill_name:
# is skill ready to use
if world.can_use_skill(player, skill_name):
skill = world.get_known_skill(player, skill_name)
if skill:
# if auto targeting use the skill
if skill.targeting_method == TargetingMethod.AUTO:
# pass centre as it doesnt matter, the skill will pick the right direction
_process_skill_use(player, skill, current_tile, Direction.CENTRE)
else:
# trigger targeting overlay
state.set_new(GameState.TARGETING)
state.set_active_skill(skill_name)
ui.update_targeting_overlay(True, skill_name)
## Show actor info - we're in GAMEMAP so it cant be visible
elif intent == InputIntent.ACTOR_INFO_TOGGLE:
# show
state.set_new(GameState.MENU)
ui.set_element_visibility(UIElement.ACTOR_INFO, True)
## Exit game
elif intent == InputIntent.EXIT:
state.set_new(GameState.EXIT_GAME)
| 5,337,885
|
def categorical_iou(y_true, y_pred, target_classes=None, strict=True):
"""画像ごとクラスごとのIoUを算出して平均するmetric。
Args:
target_classes: 対象のクラスindexの配列。Noneなら全クラス。
strict: ラベルに無いクラスを予測してしまった場合に減点されるようにするならTrue、ラベルにあるクラスのみ対象にするならFalse。
"""
axes = list(range(1, K.ndim(y_true)))
y_classes = K.argmax(y_true, axis=-1)
p_classes = K.argmax(y_pred, axis=-1)
active_list = []
iou_list = []
for c in target_classes or range(y_true.shape[-1]):
with tf.name_scope(f"class_{c}"):
y_c = K.equal(y_classes, c)
p_c = K.equal(p_classes, c)
inter = K.sum(K.cast(tf.math.logical_and(y_c, p_c), "float32"), axis=axes)
union = K.sum(K.cast(tf.math.logical_or(y_c, p_c), "float32"), axis=axes)
active = union > 0 if strict else K.any(y_c, axis=axes)
iou = inter / (union + K.epsilon())
active_list.append(K.cast(active, "float32"))
iou_list.append(iou)
return K.sum(iou_list, axis=0) / (K.sum(active_list, axis=0) + K.epsilon())
| 5,337,886
|
def convert_string(string: str, type: str) -> str:
"""Convert the string by [e]ncrypting or [d]ecrypting.
:param type: String 'e' for encrypt or 'd' for decrypt.
:return: [en/de]crypted string.
"""
hash_string = hash_()
map_ = mapping(hash_string)
if type.lower() == 'e':
output = encrypt(string, map_)
elif type.lower() == 'd':
map_ = {v: k for k, v in map_.items()}
output = decrypt(string, map_)
else:
output = ''
return output
| 5,337,887
|
def create_saml_security_context(token, private_key):
"""
Create a security context for SAML token based
authentication scheme
:type token: :class:`str`
:param token: SAML Token
:type private_key: :class:`str`
:param private_key: Absolute file path of the private key of the user
:rtype: :class:`vmware.vapi.core.SecurityContext`
:return: Newly created security context
"""
private_key_data = None
with open(private_key, 'r') as fp:
private_key_data = fp.read()
return SecurityContext({SCHEME_ID: SAML_SCHEME_ID,
PRIVATE_KEY: private_key_data,
SAML_TOKEN: token,
SIGNATURE_ALGORITHM: DEFAULT_ALGORITHM_TYPE})
| 5,337,888
|
def test_augment_angles():
"""
X_all1,y_all1,es_all1 = load_boxworld_data('lab2_big_60v_10t')
X_all2,y_all2,es_all2 = load_boxworld_data('lab2_big_60v_30t')
X,y,es = augment_angles(X_all1,y_all1,es_all1,36,60)
assert np.array_equal(X,X_all1)
assert np.array_equal(y,y_all1)
assert es == es_all1
X,y,es = augment_angles(X_all1,y_all1,es_all1,12,60)
assert np.array_equal(X,X_all2)
assert np.array_equal(y,y_all2)
assert es == es_all2
X,y,es = augment_angles(X_all2,y_all2,es_all2,12,60)
assert np.array_equal(X,X_all2)
assert np.array_equal(y,y_all2)
assert es == es_all2
"""
"""
X,y,es = augment_angles(X_all3,y_all3,es_all3,36,60)
assert np.array_equal(X,X_all1)
assert np.array_equal(y,y_all1)
assert es == es_all1
"""
"""
X,y,es = augment_angles(X_all3,y_all3,es_all3,12,60)
assert np.array_equal(X,X_all2)
assert np.array_equal(y,y_all2)
assert es == es_all2
"""
for ds in ['lab2_big_20v_10t','lab2_big_20v_10t_t']:
X_all3,y_all3,es_all3 = load_boxworld_data(ds)
X,y,es = augment_angles(X_all3,y_all3,es_all3,36,20)
assert np.array_equal(X,X_all3)
assert np.array_equal(y,y_all3)
assert es == es_all3
Xs,ys,ess = augment_angles(X_all3,y_all3,es_all3,36,60)
X,y,es = augment_angles(X_all3,y_all3,es_all3,36,20)
X,y,es = augment_angles(X,y,es,36,60)
assert np.array_equal(X,Xs)
assert np.array_equal(y,ys)
assert es == ess
X2,y2,es2 = augment_angles(X_all3,y_all3,es_all3,36,20)
for a in range(40,361,20):
X1,y1,es1 = X2,y2,es2
X2,y2,es2 = augment_angles(X_all3,y_all3,es_all3,36,a)
assert np.sum(X1)<np.sum(X2)
assert np.all(np.sum(X1,axis=1)<=np.sum(X2,axis=1))
assert np.all(y1==y2)
assert es1['view_angle']<es2['view_angle']
#this is only relevant for _t
X,_,_ = augment_angles(X_all3,y_all3,es_all3,36,360)
assert np.sum(X)==np.prod(X.shape)
| 5,337,889
|
def _save_txt(data, file_path):
"""
将一个list的数组写入txt文件里
:param data:
:param file_path:
:return:
"""
if not isinstance(data, list):
data = [data]
with open(file_path, mode='w', encoding='utf8') as f:
f.write('\n'.join(data))
| 5,337,890
|
def cli():
"""Command Line Tool to clone and restore RDS DB instance
or cluster for Blue-Green deployments. Please the sub commands
below. You can also use the options below to get more help.
NOTE: Please ensure the RDS instance ID is stored in your environment
variable as DBINSTANCEID
"""
pass
| 5,337,891
|
def predictIsDeviceLeftRunning():
"""
Returns if the device is presumed left running without a real need
---
parameters:
name: -device_id
in: query
description: the device id for which the prediction is made
required: false
style: form
explode: true
schema:
type: integer
format: int32
responses:
"200":
description: A boolean, True or False, if the device is left running or not
content:
application/json:
schema:
type: boolean
"400":
description: Bad argument
"""
device_id = int(request.args.get("id", -1))
if device_id == -1:
return Response(jsonify("Bad argument"), status=400)
device = DeviceObject(UserObject.getMockUser(), device_id)
return jsonify(device.predictDeviceLeftRunning())
| 5,337,892
|
def load(
source: AnyPath,
wordnet: Wordnet,
get_synset_id: Optional[Callable] = None,
) -> Freq:
"""Load an Information Content mapping from a file.
Arguments:
source: A path to an information content weights file.
wordnet: A :class:`wn.Wordnet` instance with synset
identifiers matching the offsets in the weights file.
get_synset_id: A callable that takes a synset offset and part
of speech and returns a synset ID valid in *wordnet*.
Raises:
:class:`wn.Error`: If *wordnet* does not have exactly one
lexicon.
Example:
>>> import wn, wn.ic
>>> pwn = wn.Wordnet('pwn:3.0')
>>> path = '~/nltk_data/corpora/wordnet_ic/ic-brown.dat'
>>> freq = wn.ic.load(path, pwn)
"""
source = Path(source).expanduser().resolve(strict=True)
assert len(wordnet.lexicons()) == 1
lexid = wordnet.lexicons()[0].id
if get_synset_id is None:
get_synset_id = synset_id_formatter(prefix=lexid)
freq = _initialize(wordnet, 0.0)
with source.open() as icfile:
for offset, pos, weight, is_root in _parse_ic_file(icfile):
ssid = get_synset_id(offset=offset, pos=pos)
# synset = wordnet.synset(ssid)
freq[pos][ssid] = weight
if is_root:
freq[pos][None] += weight
return freq
| 5,337,893
|
def save_depth_png(depth, png, png_scale):
"""save depth map
Args:
depth (array, [HxW]): depth map
png (str): path for saving depth map PNG file
png_scale (float): scaling factor for saving PNG file
"""
depth = np.clip(depth, 0, 65535 / png_scale)
depth = (depth * png_scale).astype(np.uint16)
cv2.imwrite(png, depth)
| 5,337,894
|
def iso_time_str() -> str:
"""Return the current time as ISO 8601 format
e.g.: 2019-01-19T23:20:25.459Z
"""
now = datetime.datetime.utcnow()
return now.isoformat()[:-3]+'Z'
| 5,337,895
|
def mass(snap: Snap) -> Quantity:
"""Particle mass."""
massoftype = snap._file_pointer['header/massoftype'][()]
particle_type = np.array(
np.abs(get_dataset('itype', 'particles')(snap)).magnitude, dtype=int
)
return massoftype[particle_type - 1] * snap._array_code_units['mass']
| 5,337,896
|
def nothing():
"""DO nothing."""
pass
| 5,337,897
|
def _build_pytest_test_results_path(cmake_build_path):
"""
Build the path to the Pytest test results directory.
:param cmake_build_path: Path to the CMake build directory.
:return: Path to the Pytest test results directory.
"""
pytest_results_path = os.path.join(cmake_build_path, TEST_RESULTS_DIR, 'Pytest')
return pytest_results_path
| 5,337,898
|
async def create_object_detection_training(
train_object_detection_model_request: TrainImageModel,
token: str = Depends(oauth2_scheme),
):
"""[API router to train AutoML object detection model]
Args:
train_object_detection_model_request (TrainImageModel): [Train AutoML Object detection model request]
token (str, optional): [Bearer token for authentication]. Defaults to Depends(oauth2_scheme).
Raises:
HTTPException: [Unauthorized exception when invalid token is passed]
error: [Exception in underlying controller]
Returns:
[TrainModelResponse]: [AutoML train object detection model response]
"""
try:
logging.info("Calling /gcp/automl/train_object_detection_model endpoint")
logging.debug(f"Request: {train_object_detection_model_request}")
if decodeJWT(token=token):
response = TrainModelController().train_object_detection_model_controller(
request=train_object_detection_model_request
)
return TrainModelResponse(**response)
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid access token",
headers={"WWW-Authenticate": "Bearer"},
)
except Exception as error:
logging.error(
f"Error in /gcp/automl/train_object_detection_model endpoint: {error}"
)
raise error
| 5,337,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.