content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from .wrappers import Response
import os
import mimetypes
import io
import time
def send_file(request, filepath_or_fp, mimetype=None, as_attachment=False,
attachment_filename=None, add_etags=True, cache_timeout=60 * 60 * 12,
conditional=False, use_x_sendfile=False, response_class=None):
"""Sends the contents of a file to the client. This will use the
most efficient method available and configured. By default it will
try to use the WSGI server's file_wrapper support. Alternatively
you can set the `use_x_sendfile` parameter to `True` to directly emit
an `X-Sendfile` header. This however requires support of the underlying
webserver for `X-Sendfile`.
By default it will try to guess the mimetype for you, but you can
also explicitly provide one. For extra security you probably want
to send certain files as attachment (HTML for instance). The mimetype
guessing requires a `filename` or an `attachment_filename` to be
provided.
Please never pass filenames to this function from user sources without
checking them first. Something like this is usually sufficient to
avoid security problems::
if '..' in filename or filename.startswith('/'):
raise NotFound()
param request:
...
param filepath_or_fp:
The absolute path of the file to send.
Alternatively a file object might be provided in which case
`X-Sendfile` might not work and fall back to the traditional method.
Make sure that the file pointer is positioned at the start
of data to send before calling `send_file`.
param mimetype:
The mimetype of the file if provided, otherwise
auto detection happens.
param as_attachment:
Set to `True` if you want to send this file with
a `Content-Disposition: attachment` header.
param attachment_filename:
The filename for the attachment if it
differs from the file's filename.
param add_etags:
Set to `False` to disable attaching of etags.
param conditional:
Set to `True` to enable conditional responses.
param cache_timeout:
The timeout in seconds for the headers.
param use_x_sendfile:
Set to `True` to directly emit an `X-Sendfile` header.
This however requires support of the underlying webserver.
param response_class:
Set to overwrite the default Response class.
--------------------------------
Copied almost verbatim from Flask <http://flask.pocoo.org/>
Copyright © 2010 by Armin Ronacher.
Used under the modified BSD license.
"""
mtime = None
if isinstance(filepath_or_fp, basestring):
filepath = filepath_or_fp
file = None
else:
assert bool(mimetype or attachment_filename)
add_etags = False
file = filepath_or_fp
filepath = getattr(file, 'name', None)
if filepath is not None:
filepath = os.path.abspath(filepath)
if mimetype is None and (filepath or attachment_filename):
mimetype = mimetypes.guess_type(filepath or attachment_filename)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
headers = Headers()
if as_attachment:
if attachment_filename is None:
if filepath is None:
raise TypeError('filename unavailable, required for '
'sending as attachment')
attachment_filename = os.path.basename(filepath)
headers.add('Content-Disposition', 'attachment',
filename=attachment_filename)
if use_x_sendfile and filepath:
if file is not None:
file.close()
headers['X-Sendfile'] = filepath
data = None
else:
if file is None:
file = io.open(filepath, 'rb')
mtime = os.path.getmtime(filepath)
data = wrap_file(request.environ, file)
response_class = response_class or Response
resp = response_class(data, mimetype=mimetype, headers=headers,
direct_passthrough=True)
# if we know the file modification date, we can store it as the
# the time of the last modification.
if mtime is not None:
resp.last_modified = int(mtime)
resp.cache_control.public = True
if cache_timeout:
resp.cache_control.max_age = cache_timeout
resp.expires = int(time() + cache_timeout)
if add_etags and filepath is not None:
resp.set_etag('shake-%s-%s-%s' % (
os.path.getmtime(filepath),
os.path.getsize(filepath),
adler32(
filepath.encode('utf8') if isinstance(filepath, unicode)
else filepath
) & 0xffffffff
))
if conditional:
resp = resp.make_conditional(request)
# make sure we don't send x-sendfile for serespers that
# ignore the 304 status code for x-sendfile.
if resp.status_code == 304:
resp.headers.pop('x-sendfile', None)
return resp | 75255450998c77861e2f6460b9c95bf7bb3e726d | 3,638,000 |
import json
def getInfo(ID):
"""
get info from file
:param ID: meter ID
:return: info = {
"distance": 10,
"horizontal": 10,
"vertical": 20,
"name": "1_1",
"type": SF6,
"template": "template.jpg",
"ROI": {
"x": 200,
"y": 200,
"w": 1520,
"h": 680
},
"startPoint": {
"x": -1,
"y": -1
},
"endPoint": {
"x": -1,
"y": -1
},
"centerPoint": {
"x": -1,
"y": -1
},
"startValue": 0,
"totalValue": 2
}
"""
file = open("config/" + ID + ".json")
info = json.load(file)
# string to pointer
if info["type"] == "SF6":
info["type"] = SF6
elif info["type"] == "youwen":
info["type"] = youwen
info["template"] = cv2.imread("template/" + ID + ".jpg")
return info | d0e415b547450a84d15c96b3800276f6f566e503 | 3,638,001 |
from typing import Union
import os
def DO_run(wb, args: Union[list, None] = None, external: bool = False) -> RET:
"""run shell_command lfn|alien: tagged lfns :: download lfn(s) as a temporary file and run shell command on the lfn(s)"""
if args is None: args = []
if not args: return RET(1, '', 'No shell command specified')
if is_help(args) or len(args) == 1:
msg_last = ('Command format: shell_command arguments lfn\n'
'N.B.!! the lfn must be the last element of the command!!\n'
'N.B.! The output and error streams will be captured and printed at the end of execution!\n'
'for working within application use <edit> or -noout argument\n'
'additiona arguments recognized independent of the shell command:\n'
'-force : will re-download the lfn even if already present\n'
'-noout : will not capture output, the actual application can be used')
if external:
ret_obj = runShellCMD(f'{args[0]} -h', captureout = True, do_shell = True)
return ret_obj._replace(out = f'{ret_obj.out}\n{msg_last}')
msg = ('Command format: run shell_command arguments lfn\n'
'the lfn must be the last element of the command\n'
'N.B.! The output and error streams will be captured and printed at the end of execution!\n'
'for working within application use <edit>\n'
'additiona arguments recognized independent of the shell command:\n'
'-force : will re-download the lfn even if already present\n'
'-noout : will not capture output, the actual application can be used')
return RET(0, msg)
overwrite = get_arg(args, '-force')
capture_out = get_arg(args, '-noout')
list_of_lfns = [arg for arg in args if 'alien:' in arg]
if not list_of_lfns: list_of_lfns = [args.pop(-1)]
tmp_list = [download_tmp(wb, lfn, overwrite) for lfn in list_of_lfns] # list of temporary downloads
new_args = [arg for arg in args if arg not in list_of_lfns] # command arguments without the files
args = list(new_args)
cmd = " ".join(args)
files = " ".join(tmp_list)
if tmp_list and all(os.path.isfile(tmp) for tmp in tmp_list):
return runShellCMD(f'{cmd} {files}', capture_out, do_shell = True)
return RET(1, '', f'There was an error downloading the following files:\n{chr(10).join(tmp_list)}') | 904cc2e014410cb89a975af2e59119a99604b495 | 3,638,002 |
import os
import subprocess
import re
def get_keychain_pass(account=None, server=None):
"""
Gets the password for a given account from Apple's keychain
"""
params = {
'user': os.environ['USER'],
'security': '/usr/bin/security',
'command': 'find-internet-password',
'account': account,
'server': server,
'keychain': os.environ['HOME'] + '/Library/Keychains/login.keychain'
}
command = "sudo -u %(user)s %(security)s -v %(command)s -g -a %(account)s -s %(server)s %(keychain)s" % params
output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
outtext = [line for line in output.splitlines() if line.startswith('password: ')][0]
return re.match(r'password: "(.*)"', outtext).group(1) | 97aa11d196919438d0fcb6b149c73a539ccceed2 | 3,638,003 |
import tensorflow as tf
def cast_tensor_by_spec(_input, spec):
"""
transform dtype & shape following spec
"""
try:
except ImportError:
raise MissingDependencyException(
"Tensorflow package is required to use TfSavedModelArtifact"
)
if not _isinstance_wrapper(spec, "TensorSpec"):
return _input
if _isinstance_wrapper(_input, ["Tensor", "EagerTensor"]):
# TensorFlow issue #43038
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
return tf.cast(_input, dtype=spec.dtype, name=spec.name)
else:
return tf.constant(_input, dtype=spec.dtype, name=spec.name) | 3cea0851c5cc9d457a05b58b0d2cdde50b3ed1ba | 3,638,004 |
def StopRequestHook(ref, args, request):
"""Declarative request hook for TPU Stop command."""
del ref
del args
stop_request = GetMessagesModule().StopNodeRequest()
request.stopNodeRequest = stop_request
return request | 8eba140a8f9bf59fec293d8f33d4c10bb03c1a99 | 3,638,005 |
def get_bucket(
storage_bucket_name: str,
**kwargs,
) -> Bucket:
"""Get a storage bucket."""
client = get_client()
return client.get_bucket(storage_bucket_name, **kwargs) | ad33ae43f9d8ff2fb087519733931efd3952df18 | 3,638,006 |
def furl_for(endpoint: str, filename: str=None, **kwargs: dict) -> str:
""" Replacement for url_for. """
return URL() + (url_for(endpoint, filename=filename) if filename != None else ("/" if endpoint == "" else url_for(endpoint, **kwargs))) | 2e518ce13bd01771a5daffc111c6adc5d60e40cd | 3,638,007 |
def image_max_value(img, region=None, scale=None):
"""Retrieves the maximum value of an image.
Args:
img (object): The image to calculate the maximum value.
region (object, optional): The region over which to reduce data. Defaults to the footprint of the image's first band.
scale (float, optional): A nominal scale in meters of the projection to work in. Defaults to None.
Returns:
object: ee.Number
"""
if region is None:
region = img.geometry()
if scale is None:
scale = image_scale(img)
max_value = img.reduceRegion(**{
'reducer': ee.Reducer.max(),
'geometry': region,
'scale': scale,
'maxPixels': 1e12
})
return max_value | b7e9c4ece9639fbdad75146021fd16438567b9c7 | 3,638,008 |
def getblock(lst, limit):
"""Return first limit entries from list lst and remove them from the list"""
r = lst[-limit:]
del lst[-limit:]
return r | 8d230dec59fe00375d92b6c6a8b51f3e6e2d9126 | 3,638,009 |
def electrode_neighborhoods(mea='hidens', neighborhood_radius=HIDENS_NEIGHBORHOOD_RADIUS, x=None, y=None):
"""
Calculate neighbor matrix from distances between electrodes.
:param mea: (optional) type of the micro electrode array, default: 'hidens'
:param neighborhood_radius:(optional) depends on mea type
:param x, y: (optional) electrode coordinates
:return: neighbors: square matrix
"""
distances = electrode_distances(mea, x, y)
neighbors = distances < neighborhood_radius
return neighbors | 7595b8cbd43bd12db2cb8e8a14d1968152a34fc0 | 3,638,010 |
def lat_from_meta(meta):
"""
Obtains a latitude coordinates array from rasterio metadata.
:param meta: dict rasterio metadata.
:return: numpy array
"""
try:
t, h = meta["transform"], meta["height"]
except KeyError as e:
raise e
lat = np.arange(t[5], t[5] + (t[4] * h), t[4])
# in rare cases coords may be too short or too long (e.g. due to rounding)
lat = shorten_coords_array(lat, t[5], t[4], h) # try several times to be sure
lat = enlarge_coords_array(lat, t[5], t[4], h)
lat = shorten_coords_array(lat, t[5], t[4], h)
lat = enlarge_coords_array(lat, t[5], t[4], h)
lat = shorten_coords_array(lat, t[5], t[4], h)
lat = enlarge_coords_array(lat, t[5], t[4], h)
return lat | dd4624521071788e497dadabdbb3e7716c6132cc | 3,638,011 |
def get_test_class(dbcase):
"""Return the implementation class of a TestCase, or None if not found.
"""
if dbcase.automated and dbcase.valid:
impl = dbcase.testimplementation
if impl:
obj = module.get_object(impl)
if type(obj) is type and issubclass(obj, core.Test):
return obj
else:
raise InvalidTestError("%r is not a Test class object." % (obj,))
else:
return None
else:
return None | 0ddf0127f87308695fef83001424ce7fa94ca463 | 3,638,012 |
def calc_dp(t_c, rh):
"""Calculate the dew point in Celsius.
Arguments:
t_c - the temperature in °C.
rh - the relative humidity as a percent, (0-100)
Returns:
The dew point in °C.
"""
sat_vp = vapor_pressure_liquid_water(t_c)
vp = sat_vp * rh / 100.0
a = log(vp / 6.1037) / 17.641
return a * 243.27 / (1.0 - a) | a183a11e6a61e536376802d18b939caa7c02dd28 | 3,638,013 |
from typing import Tuple
def getSlackMetar(input: flask.Request) -> Tuple[str, int, dict]:
"""
Endpoint handler for the slack_metar HTTP function trigger
"""
global _baseUrl
global _logger
_logger.debug("Entered metar")
station = _getStationName(input)
txt = _requestData('METAR', station)
return (_buildSlackResponse(txt),
200,
{'Content-type': 'application/json'}) | 902c68255c419f34e196886015102b7226ca527c | 3,638,014 |
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
Note that from stage 3, the first conv layer at main path is with strides=(2,2)
And the shortcut should have strides=(2,2) as well
"""
filters1, filters2, filters3 = filters
if IMAGE_ORDERING == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1) , data_format=IMAGE_ORDERING , strides=strides,
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size , data_format=IMAGE_ORDERING , padding='same',
name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1) , data_format=IMAGE_ORDERING , name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = Conv2D(filters3, (1, 1) , data_format=IMAGE_ORDERING , strides=strides,
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = Activation('relu')(x)
return x | f54486729405eb1ae41b554842c5942f8ad9483a | 3,638,015 |
from typing import List
from typing import Optional
import tokenize
def _fake_before_lines(first_line: str) -> List[str]:
"""Construct the fake lines that should go before the text."""
fake_lines = []
indent_levels = _indent_levels(first_line)
# Handle regular indent
for i in range(indent_levels):
prefix = SINGLE_INDENT * i
fake_lines.append(f"{prefix}if True:\n")
# Handle else/elif/except/finally
try:
first_token: Optional[tokenize.TokenInfo] = next(
tokenize.generate_tokens(iter([first_line.lstrip()]).__next__)
)
except tokenize.TokenError:
first_token = None
if first_token and first_token.type == tokenize.NAME:
name = first_token.string
prefix = SINGLE_INDENT * indent_levels
if name in {"else", "elif"}:
fake_lines.append(f"{prefix}if True:\n")
fake_lines.append(f"{prefix}{SINGLE_INDENT}pass\n")
elif name in {"except", "finally"}:
fake_lines.append(f"{prefix}try:\n")
fake_lines.append(f"{prefix}{SINGLE_INDENT}pass\n")
return fake_lines | 9d3a450dce45690e1650ad2f1c86eff5df5d8e36 | 3,638,016 |
def normal_logpdf(x, mu, cov):
"""
Multivariate normal logpdf, numpy native implementation
:param x:
:param mu:
:param cov:
:return:
"""
part1 = 1 / (((2 * np.pi) ** (len(mu) / 2)) * (np.linalg.det(cov) ** (1 / 2)))
part2 = (-1 / 2) * ((x - mu).T.dot(np.linalg.inv(cov))).dot((x - mu))
return float(np.log(part1) + part2) | 47672093235cdf23562a83eac28ad428c5d4db24 | 3,638,017 |
def criteriarr(criteria):
"""Validate if the iterable only contains MIN (or any alias) and MAX
(or any alias) values. And also always returns an ndarray representation
of the iterable.
Parameters
----------
criteria : Array-like
Iterable containing all the values to be validated by the function.
Returns
-------
numpy.ndarray :
Criteria array.
Raises
------
DataValidationError :
if some value of the criteria array are not MIN (-1) or MAX (1)
"""
pcriteria = np.array([ALIASES.get(c) for c in criteria])
if None in pcriteria:
msg = (
"Criteria Array only accept minimize or maximize Values. Found {}")
raise DataValidationError(msg.format(criteria))
return pcriteria | 3f00db0e4a41ab09650b7779819e243c25b6ca35 | 3,638,018 |
import random
def integer_or_rational(entropy, signed, min_abs=0):
"""Returns a rational, with 50% probability of it being an integer."""
if random.choice([False, True]):
return integer(entropy, signed, min_abs=min_abs)
else:
return non_integer_rational(entropy, signed) | 03e11aa082dfdb613f0f1861ff8346b1087b4880 | 3,638,019 |
import functools
import warnings
def ignore_python_warnings(function):
"""
Decorator for ignoring *Python* warnings.
Parameters
----------
function : object
Function to decorate.
Returns
-------
object
Examples
--------
>>> @ignore_python_warnings
... def f():
... warnings.warn('This is an ignored warning!')
>>> f()
"""
@functools.wraps(function)
def wrapped(*args, **kwargs):
"""
Wrapped function.
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return function(*args, **kwargs)
return wrapped | 438e54fe927f787783175faacf4eb9608fd27cf0 | 3,638,020 |
def runMetrics(
initWorkingSetName,
stepName,
requestInfo,
jobId,
outputFolder,
referenceFolder,
referencePrefix,
dtmFile,
dsmFile,
clsFile,
mtlFile,
):
"""
Run a Girder Worker job to compute metrics on output files.
Requirements:
- Danesfield Docker image is available on host
:param initWorkingSetName: The name of the top-level working set.
:type initWorkingSetName: str
:param stepName: The name of the step.
:type stepName: str (DanesfieldStep)
:param requestInfo: HTTP request and authorization info.
:type requestInfo: RequestInfo
:param jobId: Job ID.
:type jobId: str
:param outputFolder: Output folder document.
:type outputFolder: dict
:param referenceFolder: Reference directory.
:type referenceFolder: dict
:param referencePrefix: Reference file prefix.
:type referencePrefix: str
:param dtmFile: DTM file document.
:type dtmFile: dict
:param dsmFile: DSM file document.
:type dsmFile: dict
:param clsFile: CLS file document.
:type clsFile: dict
:param mtlFile: MTL file document.
:type mtlFile: dict
:returns: Job document.
"""
gc = createGirderClient(requestInfo)
if referencePrefix == "STANDARD":
# We know that there's no reference data with this selection
containerArgs = ["echo", "No ground truth selected for scoring"]
asyncResult = docker_run.delay(
**createDockerRunArguments(
image=DockerImage.DANESFIELD,
containerArgs=containerArgs,
jobTitle="[%s] Run metrics" % initWorkingSetName,
jobType=stepName,
user=requestInfo.user,
)
)
else:
# Otherwise we assume the reference data exists, and try to
# run the metrics
outputVolumePath = VolumePath("__output__")
# Docker container arguments
containerArgs = [
"danesfield/tools/run_metrics.py",
"--output-dir",
outputVolumePath,
"--ref-dir",
GirderFolderIdToVolume(referenceFolder["_id"], gc=gc),
"--ref-prefix",
referencePrefix,
"--dsm",
GirderFileIdToVolume(dsmFile["_id"], gc=gc),
"--cls",
GirderFileIdToVolume(clsFile["_id"], gc=gc),
"--mtl",
GirderFileIdToVolume(mtlFile["_id"], gc=gc),
"--dtm",
GirderFileIdToVolume(dtmFile["_id"], gc=gc),
]
# Result hooks
# - Upload output files to output folder
# - Provide upload metadata
upload_kwargs = createUploadMetadata(jobId, stepName)
resultHooks = [
GirderUploadVolumePathToFolder(
outputVolumePath,
outputFolder["_id"],
upload_kwargs=upload_kwargs,
gc=gc,
)
]
asyncResult = docker_run.delay(
**createDockerRunArguments(
image=DockerImage.DANESFIELD,
containerArgs=containerArgs,
jobTitle="[%s] Run metrics" % initWorkingSetName,
jobType=stepName,
user=requestInfo.user,
resultHooks=resultHooks,
)
)
# Add info for job event listeners
job = asyncResult.job
job = addJobInfo(job, jobId=jobId, stepName=stepName)
return job | e908191b274e4d55dc77a5686dc462a2f8a43798 | 3,638,021 |
import os
def terminal_bg():
"""Returns the first argument if the terminal has a light background, the second if it has a
dark background, and the third if it cannot determine."""
colorfgbg = os.environ.get('COLORFGBG', '')
if colorfgbg == '0;15':
return BGColor.LIGHT
if colorfgbg == '15;0':
return BGColor.DARK
return BGColor.UNKNOWN | d86ffaac04dce16f47bf71f495a9e017f3805618 | 3,638,022 |
def matplotlib_kwarg_dealiaser(args, kind):
"""De-aliase the kwargs passed to plots."""
if args is None:
return {}
matplotlib_kwarg_dealiaser_dict = {
"scatter": mpl.collections.PathCollection,
"plot": mpl.lines.Line2D,
"hist": mpl.patches.Patch,
"bar": mpl.patches.Rectangle,
"hexbin": mpl.collections.PolyCollection,
"fill_between": mpl.collections.PolyCollection,
"hlines": mpl.collections.LineCollection,
"text": mpl.text.Text,
"contour": mpl.contour.ContourSet,
"pcolormesh": mpl.collections.QuadMesh,
}
return normalize_kwargs(args, getattr(matplotlib_kwarg_dealiaser_dict[kind], "_alias_map", {})) | 9c267531bde5445d7024bee4860b882044359212 | 3,638,023 |
def volume():
"""
Get volume number
:return:
"""
return Scheduler.ret_volume | 0a39ff47800d04055971e7687b240f0c1e1a2396 | 3,638,024 |
def grid_grad(input, grid, interpolation='linear', bound='zero',
extrapolate=False):
"""Sample spatial gradients of an image with respect to a deformation field.
Notes
-----
{interpolation}
{bound}
Parameters
----------
input : ([batch], [channel], *inshape) tensor
Input image.
grid : ([batch], *inshape, dim) tensor
Transformation field.
shape : sequence[int], default=inshape
Output shape
interpolation : int or sequence[int], default=1
Interpolation order.
bound : BoundType, or sequence[BoundType], default='zero'
Boundary conditions.
extrapolate : bool or int, default=True
Extrapolate out-of-bound data.
Returns
-------
output : ([batch], [channel], *shape, dim) tensor
Sampled gradients.
"""
# Broadcast
dim = grid.shape[-1]
input_no_batch = input.dim() == dim + 1
input_no_channel = input.dim() == dim
grid_no_batch = grid.dim() == dim + 1
if input_no_channel:
input = input[None, None]
elif input_no_batch:
input = input[None]
if grid_no_batch:
grid = grid[None]
batch = max(input.shape[0], grid.shape[0])
input = expand(input, [batch, *input.shape[1:]])
grid = expand(grid, [batch, *grid.shape[1:]])
out = GridGrad.apply(input, grid, interpolation, bound, extrapolate)
if input_no_channel:
out = out[:, 0]
if input_no_batch and grid_no_batch:
out = out[0]
return out | 4df566e4612252169f2fab721b62cbd9be8c85ac | 3,638,025 |
def delete_cart_item(quote_id, item_code):
"""Delete given item_codes from Quote if all deleted then delete Quote"""
try:
response = frappe._dict()
item_code = item_code.encode('utf-8')
item_list= [ i.strip() for i in item_code.split(",")]
if not isinstance(item_code, list):
item_code = [item_code]
if not frappe.db.exists("Quotation", quote_id):
response["message"] = "Quotation not found"
frappe.local.response['http_status_code'] = 404
else:
quote = frappe.get_doc("Quotation", quote_id)
new_items = []
for idx, row in enumerate(quote.get("items")):
if not row.item_code in item_list:
new_items.append(row)
quote.items = new_items
quote.flags.ignore_mandatory = True
quote.save()
if not len(quote.get("items", [])):
frappe.delete_doc("Quotation", quote_id)
response["message"] = "Deleted all items"
frappe.local.response["http_status_code"] = 200
else:
response = get_cart_details(quote_id)
frappe.db.commit()
except Exception as e:
http_status_code = getattr(e, "http_status_code", 500)
frappe.local.response['http_status_code'] = http_status_code
response["message"] = "Unable to Delete Quote Item"
frappe.log_error(message=frappe.get_traceback() , title="Website API: delete_cart_item")
finally:
return response | f7a17cf74764322136bd3e3940a37399ec1ac53b | 3,638,026 |
def list_methods(f):
"""Return a list of the multimethods currently registered to `f`.
The multimethods are returned in the order they would be tested by the dispatcher
when the generic function is called.
The return value is a list, where each item is `(callable, type_signature)`.
Each type signature is in the format returned by `typing.get_type_hints`.
`f`: a callable that has been declared `@generic` or `@typed`.
**Interaction with OOP**:
Bound methods are resolved to the underlying function automatically.
The `self`/`cls` argument is extracted from the `__self__` attribute of
the bound method, enabling linked dispatcher lookups in the MRO.
**CAUTION**:
Recall that in Python, instance methods when accessed through the *class*
are just raw functions; the method becomes bound, and thus `self` is set,
when accessed through *an instance* of that class.
Let `Cat` be a class with an OOP instance method `meow`, and `cat` an
instance of that class. If you call `list_methods(cat.meow)`, you get the
MRO lookup for linked dispatchers, as expected.
But if you call `list_methods(Cat.meow)` instead, it won't see the MRO,
because the value of the `self` argument isn't set for an unbound method
(which is really just a raw function).
If `Cat` has a `@classmethod` `iscute`, calling `list_methods(Cat.iscute)`
performs the MRO lookup for linked dispatchers. This is because a class
method is already bound (to the class, so the `cls` argument already has
a value) when it is accessed through the class.
Finally, note that while that is how `list_methods` works, it is not the
mechanism actually used to determine `self`/`cls` when *calling* the
generic function. There, the value of `self`/`cls` is extracted from the
first positional argument of the call. This is because the dispatcher is
actually installed on the underlying raw function, so it has no access to
the metadata of the bound method (which, as seen from the dispatcher, is
on the outside).
"""
function, _ = getfunc(f)
if not isgeneric(function):
raise TypeError(f"{_function_fullname(function)} is not a generic function, it does not have multimethods.")
# In case of a bound method (either `Foo.classmeth` or `foo.instmeth`),
# we can get the value for `self`/`cls` argument from its `__self__` attribute.
#
# Otherwise we have a regular function, an unbound method, or a `@staticmethod`;
# in those cases, there's no `self`/`cls`. (Technically, an unbound method has
# a parameter to receive it, but no value has been set yet.)
self_or_cls = f.__self__ if hasattr(f, "__self__") else None
return _list_multimethods(function, self_or_cls) | 97ac92d58ee1edd7c38a2b9c1bcd05f0b7468a24 | 3,638,027 |
import os
def load_CIFAR10(file_dir):
""" load all of cifar """
xs = []
ys = []
for filename in train_list:
file_path = os.path.join(file_dir, filename)
data, labels = load_CIFAR_batch(file_path)
xs.append(data)
ys.append(labels)
x_train = np.concatenate(xs)
y_train = np.concatenate(ys)
x_test, y_test = load_CIFAR_batch(os.path.join(file_dir, test_batch))
return x_train, y_train, x_test, y_test | 8074f373561503ecfee901220135211319110c9b | 3,638,028 |
def get_requirements(extra=None):
"""
Load the requirements for the given extra from the appropriate
requirements-extra.txt, or the main requirements.txt if no extra is
specified.
"""
filename = f"requirements-{extra}.txt" if extra else "requirements.txt"
with open(filename) as fp:
# Parse out as one per line
return [l.strip() for l in fp.readlines() if l.strip()] | 7ce9e348357925b7ff165ebd8f13300d849ea0ee | 3,638,029 |
import os
def count_image_files(directory, montage_mode=False):
"""Counts all image files inside the directory.
If montage_mode, counts 1 level deep and returns the minimum count.
Else, counts all child images of directory.
Args:
directory (str): directory to look for child image files
montage_mode (bool): whether ot not to look in subdirs of directory
Returns:
int: the number of image files in the directory
"""
def count_images(d):
valid_extensions = {'.tiff', '.tif', '.png', '.jpg', '.jpeg', '.bmp'}
count = 0
for f in os.listdir(directory):
_, ext = os.path.splitext(f.lower())
if ext in valid_extensions:
count += 1
return count
if not montage_mode:
return count_images(directory)
return min([count_images(d) for d in get_immediate_subdirs(directory)]) | c6b085a04f5777d6d5a30c714eced453e831f02e | 3,638,030 |
def _format_mojang_uuid(uuid):
"""
Formats a non-hyphenated UUID into a whitelist-compatible UUID
:param str uuid: uuid to format
:return str: formatted uuid
Example:
>>> _format_mojang_uuid('1449a8a244d940ebacf551b88ae95dee')
'1449a8a2-44d9-40eb-acf5-51b88ae95dee'
Must have 32 characters:
>>> _format_mojang_uuid('1')
Traceback (most recent call last):
...
ValueError: Expected UUID to have 32 characters
"""
if len(uuid) != 32:
raise ValueError('Expected UUID to have 32 characters')
return uuid[:8] + '-' + uuid[8:12] + '-' + uuid[12:16] + '-' + uuid[16:20] + '-' + uuid[20:] | 517071b28f1e747091e2a539cd5d0b8765bebeba | 3,638,031 |
def generate_uri(graph_base, username):
"""
Args:
graph_base ():
username ():
Returns:
"""
return "{}{}".format(graph_base, username) | 5e3557d300ed1a706e7b5257719135063d8c44e6 | 3,638,032 |
from typing import Type
from typing import Optional
from typing import Dict
from typing import Any
def _combine_model_kwargs_and_state(
generator_run: GeneratorRun,
model_class: Type[Model],
model_kwargs: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Produces a combined dict of model kwargs and model state after gen,
extracted from generator run. If model kwargs are not specified,
model kwargs from the generator run will be used.
"""
model_kwargs = model_kwargs or generator_run._model_kwargs or {}
if generator_run._model_state_after_gen is None:
return model_kwargs
serialized_model_state = not_none(generator_run._model_state_after_gen)
# We don't want to update `model_kwargs` on the `GenerationStep`,
# just to add to them for the purpose of this function.
return {**model_kwargs, **model_class.deserialize_state(serialized_model_state)} | b646b1cd12cdba02e0495e0592790933adadd3c9 | 3,638,033 |
def quick_boxcar(s, M=4, centered=True):
"""Returns a boxcar-filtered version of the input signal
Keyword arguments:
M -- number of averaged samples (default 4)
centered -- recenter the filtered signal to reduce lag (default False)
"""
# Sanity check on signal and filter window
length = s.shape[0]
if length <= 2*M:
raise ValueError('signal too short for specified filter window')
# Set up staggered arrays for vectorized average
z = np.empty((M, length+M-1), 'd')
for i in range(M):
z[i] = np.r_[np.zeros(i)+s[0], s, np.zeros(M-i-1)+s[-1]]
# Center the average if specified
start_ix = 0
end_ix = length
if centered:
start_ix += int(M/2)
end_ix += int(M/2)
return z.mean(axis=0)[start_ix:end_ix] | 64e0a847b05972f674394984fe8799738465b96b | 3,638,034 |
import psutil
async def info(request):
"""HTTP Method to retun node state to caller"""
log.debug("info request")
app = request.app
answer = {}
# copy relevant entries from state dictionary to response
node = {}
node['id'] = request.app['id']
node['type'] = request.app['node_type']
node['start_time'] = app["start_time"] #unixTimeToUTC(app['start_time'])
node['state'] = app['node_state']
node['node_number'] = app['node_number']
node['node_count'] = app['node_count']
answer["node"] = node
# psutil info
# see: http://pythonhosted.org/psutil/ for description of different fields
cpu = {}
cpu["percent"] = psutil.cpu_percent()
cpu["cores"] = psutil.cpu_count()
answer["cpu"] = cpu
diskio = psutil.disk_io_counters()
disk_stats = {}
disk_stats["read_count"] = diskio.read_count
disk_stats["read_time"] = diskio.read_time
disk_stats["read_bytes"] = diskio.read_bytes
disk_stats["write_count"] = diskio.write_count
disk_stats["write_time"] = diskio.write_time
disk_stats["write_bytes"] = diskio.write_bytes
answer["diskio"] = disk_stats
netio = psutil.net_io_counters()
net_stats = {}
net_stats["bytes_sent"] = netio.bytes_sent
net_stats["bytes_sent"] = netio.bytes_recv
net_stats["packets_sent"] = netio.packets_sent
net_stats["packets_recv"] = netio.packets_recv
net_stats["errin"] = netio.errin
net_stats["errout"] = netio.errout
net_stats["dropin"] = netio.dropin
net_stats["dropout"] = netio.dropout
answer["netio"] = net_stats
mem_stats = {}
svmem = psutil.virtual_memory()
mem_stats["phys_total"] = svmem.total
mem_stats["phys_available"] = svmem.available
sswap = psutil.swap_memory()
mem_stats["swap_total"] = sswap.total
mem_stats["swap_used"] = sswap.used
mem_stats["swap_free"] = sswap.free
mem_stats["percent"] = sswap.percent
answer["memory"] = mem_stats
disk_stats = {}
sdiskusage = psutil.disk_usage('/')
disk_stats["total"] = sdiskusage.total
disk_stats["used"] = sdiskusage.used
disk_stats["free"] = sdiskusage.free
disk_stats["percent"] = sdiskusage.percent
answer["disk"] = disk_stats
answer["log_stats"] = app["log_count"]
answer["req_count"] = app["req_count"]
answer["s3_stats"] = app["s3_stats"]
mc_stats = {}
if "meta_cache" in app:
mc = app["meta_cache"] # only DN nodes have this
mc_stats["count"] = len(mc)
mc_stats["dirty_count"] = mc.dirtyCount
mc_stats["utililization_per"] = mc.cacheUtilizationPercent
mc_stats["mem_used"] = mc.memUsed
mc_stats["mem_target"] = mc.memTarget
answer["meta_cache_stats"] = mc_stats
cc_stats = {}
if "chunk_cache" in app:
cc = app["chunk_cache"] # only DN nodes have this
cc_stats["count"] = len(cc)
cc_stats["dirty_count"] = cc.dirtyCount
cc_stats["utililization_per"] = cc.cacheUtilizationPercent
cc_stats["mem_used"] = cc.memUsed
cc_stats["mem_target"] = cc.memTarget
answer["chunk_cache_stats"] = cc_stats
dc_stats = {}
if "domain_cache" in app:
dc = app["domain_cache"] # only DN nodes have this
dc_stats["count"] = len(dc)
dc_stats["dirty_count"] = dc.dirtyCount
dc_stats["utililization_per"] = dc.cacheUtilizationPercent
dc_stats["mem_used"] = dc.memUsed
dc_stats["mem_target"] = dc.memTarget
answer["domain_cache_stats"] = dc_stats
resp = await jsonResponse(request, answer)
log.response(request, resp=resp)
return resp | 5960b166c7c73335a09d3e4ece84ae63155b56d0 | 3,638,035 |
def read_file(file, assume_complete=False):
"""read_file(filename, assume_complete=False) -> Contest
Read in a text file describing a contest, and construct a Contest object.
This adds the ballots (by calling addballots()), but it doesn't do
any further computation.
If assume_complete is True, any entries missing from a ballot are assumed
to be tied for last.
"""
contents = None
ballots = []
while True:
ln = file.readline()
if (not ln):
break
ln = ln.strip()
if (not ln):
continue
if (ln.startswith('#')):
continue
if (ln.startswith('*')):
if (contents):
raise Exception('More than one line in the input file begins with *.')
contents = ln
else:
ballots.append(ln)
if (not contents):
raise Exception('No line in the input file begins with *.')
entries = contents[1:].split()
if (not entries):
raise Exception('The * line has no contents.')
dic = {}
for val in entries:
dic[val] = True
if (len(dic) != len(entries)):
raise Exception('Duplicate entry in * line.')
contest = Contest(entries)
for ln in ballots:
ls = ln.split()
ls = [ val.split('/') for val in ls ]
dic = {}
for subls in ls:
for val in subls:
if (not contest.iskey(val)):
raise Exception('Unknown key in ballot: ' + val)
if (val in dic):
raise Exception('Repeated key in ballot: ' + val)
dic[val] = True
if (assume_complete):
final = []
for val in contest.entries:
if (val not in dic):
final.append(val)
if (final):
ls.append(final)
contest.addballot(ls)
return contest | 2e57353c66b82e73fbc4caace2f2e2e54d486dac | 3,638,036 |
import six
def merge_dict(a, b):
"""
Recursively merges and returns dict a with dict b.
Any list values will be combined and returned sorted.
:param a: dictionary object
:param b: dictionary object
:return: merged dictionary object
"""
if not isinstance(b, dict):
return b
result = deepcopy(a)
for key, val in six.iteritems(b):
if key in result and isinstance(result[key], dict):
result[key] = merge_dict(result[key], val)
elif key in result and isinstance(result[key], list):
result[key] = sorted(list(set(val) | set(result[key])))
else:
result[key] = deepcopy(val)
return result | 5adddd784ff4facdefebecea3ce2ab5069058885 | 3,638,037 |
def send_message(oc_user,params):
"""留言
"""
to_uid = params.get("to_uid")
content = params.get("content",'')
if not to_uid:
return 1,{"msg":"please choose user"}
if not content:
return 2,{"msg":"please input content"}
if len(content) > 40:
return 3,{"msg":"content too long"}
compete_message_obj = UserCompeteMessage.hget(oc_user.uid,to_uid)
compete_message_obj.set_message(oc_user.uid,to_uid,content)
compete_message_obj = UserCompeteMessage.hget(to_uid,oc_user.uid)
compete_message_obj.set_message(oc_user.uid,to_uid,content)
return 0,{} | 6e61bfd8bfd4b94584e093c2bd91e647c39f7f90 | 3,638,038 |
def BDD100K(path: str) -> Dataset:
"""`BDD100K <https://bdd-data.berkeley.edu>`_ dataset.
The file structure should be like::
<path>
bdd100k_images_100k/
images/
100k/
test
train
val
labels/
det_20/
det_train.json
det_val.json
lane/
polygons/
lane_train.json
lane_val.json
drivable/
polygons/
drivable_train.json
drivable_val.json
Arguments:
path: The root directory of the dataset.
Returns:
Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance.
"""
return _BDD100K_loader(path, "100k") | c154f57e09fac6b01004015bcbebdbd3929a9ac3 | 3,638,039 |
def is_utf8(string):
"""Check if argument encodes to UTF8 without error.
Args:
string(str): string of bytes
Returns:
True if string can be successfully encoded
"""
try:
string.encode('utf-8')
except UnicodeEncodeError:
return False
except UnicodeDecodeError:
return False
except AttributeError:
return False
return True | 4ea0f8f9b93976017a8add574098d89f86f1345d | 3,638,040 |
def fixedwidth_bins(delta, xmin, xmax):
"""Return bins of width `delta` that cover `xmin`, `xmax` (or a larger range).
The bin parameters are computed such that the bin size `delta` is
guaranteed. In order to achieve this, the range `[xmin, xmax]` can be
increased.
Bins can be calculated for 1D data (then all parameters are simple floats)
or nD data (then parameters are supplied as arrays, with each entry
correpsonding to one dimension).
Parameters
----------
delta : float or array_like
desired spacing of the bins
xmin : float or array_like
lower bound (left boundary of first bin)
xmax : float or array_like
upper bound (right boundary of last bin)
Returns
-------
dict
The dict contains 'Nbins', 'delta', 'min', and 'max'; these are either
floats or arrays, depending on the input.
Example
-------
Use with :func:`numpy.histogram`::
B = fixedwidth_bins(delta, xmin, xmax)
h, e = np.histogram(data, bins=B['Nbins'], range=(B['min'], B['max']))
"""
if not np.all(xmin < xmax):
raise ValueError('Boundaries are not sane: should be xmin < xmax.')
_delta = np.asarray(delta, dtype=np.float_)
_xmin = np.asarray(xmin, dtype=np.float_)
_xmax = np.asarray(xmax, dtype=np.float_)
_length = _xmax - _xmin
N = np.ceil(_length / _delta).astype(np.int_) # number of bins
dx = 0.5 * (N * _delta - _length) # add half of the excess to each end
return {'Nbins': N, 'delta': _delta, 'min': _xmin - dx, 'max': _xmax + dx} | 5f9e8bc31f44d66323689a1a79d6a597d422fa57 | 3,638,041 |
def get_word(path):
""" extract word name from json path """
return path.split('.')[0] | e749bcdaaf65de0299d35cdf2a2264568ad5051b | 3,638,042 |
def produce_edge_image(thresh, img):
"""
Threshold the image and return the edges
"""
(thresh, alpha_img) = cv.threshold(img, thresh, 255, cv.THRESH_BINARY_INV)
blur_img = cv.medianBlur(alpha_img, 9)
blur_img = cv.morphologyEx(blur_img, cv.MORPH_OPEN, (5,5))
# find the edged
return cv.Canny(blur_img, 30, 200), alpha_img | e269f15f515e8c88da3761306430d4d49c445d3b | 3,638,043 |
import json
import logging
def get(endpoint: str,
encoding: str="utf-8",
**params) -> (list, dict, None):
"""
Return requested data in JSON (empty list on fallback)
Check request has correct schema.
:param endpoint - endpoint for request.
:param encoding - encoding for received data.
:param params - requested params
"""
if not is_valid_schema(endpoint, **params):
return []
url = make_url(endpoint, **params)
try:
response = request.urlopen(url)
return json.loads(response.read().decode(encoding))
except (error.HTTPError, error.URLError) as err:
logging.debug("Can't get '%s'.\n%s", url, err)
return [] | 7deb825098ea3ed7e26a8eecd1341ee07657dfff | 3,638,044 |
def aggregate_dicts(dicts: t.Sequence[dict], agg: str = "mean") -> dict:
"""
Aggregates a list of dictionaries into a single dictionary. All dictionaries in ``dicts`` should have the same keys.
All values for a given key are aggregated into a single value using ``agg``. Returns a single dictionary with the
aggregated values.
Parameters
----------
dicts : sequence of dicts
The dictionaries to aggregate.
agg : {'mean', 'stdev', 'sum', 'median', 'min', 'max'}
Name of the method to use to aggregate the values of `dicts` with.
"""
aggs: t.Dict[str, t.Callable] = {
"mean": np.mean,
"stdev": np.std,
"sum": np.sum,
"median": np.median,
"min": np.min,
"max": np.max,
}
assert len(dicts) > 0
keys = dicts[0].keys()
result = {}
for key in keys:
values = [d[key] for d in dicts]
if isinstance(values[0], dict):
# Recurse
result[key] = aggregate_dicts(values, agg)
else:
result[key] = aggs[agg](values, axis=0)
return result | c8a557e26c885fcef381eb21edd27a0fcb91a12e | 3,638,045 |
def aggregation_most_frequent(logits):
"""This aggregation mechanism takes the softmax/logit output of several
models resulting from inference on identical inputs and computes the most
frequent label. It is deterministic (no noise injection like noisy_max()
above.
:param logits: logits or probabilities for each sample
:return:
"""
# Compute labels from logits/probs and reshape array properly
labels = labels_from_probs(logits)
labels_shape = np.shape(labels)
labels = labels.reshape((labels_shape[0], labels_shape[1]))
# Initialize array to hold final labels
result = np.zeros(int(labels_shape[1]))
# Parse each sample
for i in xrange(int(labels_shape[1])):
# Count number of votes assigned to each class
label_counts = np.bincount(labels[:, i], minlength=10)
label_counts = np.asarray(label_counts, dtype=np.int32)
# Result is the most frequent label
result[i] = np.argmax(label_counts)
return np.asarray(result, dtype=np.int32) | 28440569b918a2a58ca0ee1589884948809994c7 | 3,638,046 |
import os
def sanitize_path(path):
"""
Ensure the local filesystem path we're supposed to write to is legit.
"""
if not path.startswith("/"):
raise Exception("Path must be fully qualified.")
os.chdir(path)
return os.getcwd() | 203cbf5cad2f252540bfa1cff74c68c236d4b105 | 3,638,047 |
def handle_led(req):
""" In this function all the work is done :) """
# switch GPIO to HIGH, if '1' was sent
if (req.state == 1):
if hostname == 'minibot':
GPIO.output(req.pin, GPIO.HIGH)
else:
# for all other values we set it to LOW
# (LEDs are low active!)
if hostname == 'minibot':
GPIO.output(req.pin, GPIO.LOW)
# debug
rospy.loginfo("GPIO %s switched to %s. Result: %s", req.pin, req.state, req.pin)
# The name of the 'xyzResponse' comes directly from the Xyz.srv filename!
return LedResponse(req.pin) | d2ea7c703d9b1002901d68c35d576fedd1e61a9a | 3,638,048 |
from typing import Dict
def evaluate_submission_with_proto(
submission: Submission,
ground_truth: Submission,
) -> Dict[str, float]:
"""Calculates various motion prediction metrics given
the submission and ground truth protobuf messages.
Args:
submission (Submission): Proto message with predicted trajectories.
ground_truth (Submission): Proto message with ground truth trajectories.
Raises:
ValueError:
Number of objects in submission is not equal to number of objects in ground truth.
ValueError:
Objects order in submission violates objects order in ground truth.
Returns:
Dict[str, float]: Mapping from metric name to its aggregated value.
"""
_check_submission_and_ground_truth(submission, ground_truth)
metrics = defaultdict(list)
gt_map = {
(prediction.scene_id, prediction.track_id): prediction
for prediction in ground_truth.predictions
}
for i in range(len(submission.predictions)):
pred = submission.predictions[i]
gt = gt_map[(pred.scene_id, pred.track_id)]
if pred.scene_id != gt.scene_id:
raise ValueError(f'Check scenes order: {pred.scene_id} != {gt.scene_id}')
if pred.track_id != gt.track_id:
raise ValueError(f'Check objects order: {pred.track_id} != {gt.track_id}')
pred_trajectories, weights = get_trajectories_weights_arrays(pred.weighted_trajectories)
pred_trajectories = pred_trajectories[np.argsort(weights)][-MAX_NUM_MODES:]
weights = weights[np.argsort(weights)][-MAX_NUM_MODES:]
gt_trajectory, _ = get_trajectories_weights_arrays(gt.weighted_trajectories)
gt_trajectory = gt_trajectory[0] # Reduce modes dim
metrics['avg_ade'].append(avg_ade(gt_trajectory, pred_trajectories))
metrics['avg_fde'].append(avg_fde(gt_trajectory, pred_trajectories))
metrics['min_ade'].append(min_ade(gt_trajectory, pred_trajectories))
metrics['min_fde'].append(min_fde(gt_trajectory, pred_trajectories))
metrics['top1_ade'].append(top1_ade(gt_trajectory, pred_trajectories, weights))
metrics['top1_fde'].append(top1_fde(gt_trajectory, pred_trajectories, weights))
metrics['weighted_ade'].append(weighted_ade(gt_trajectory, pred_trajectories, weights))
metrics['weighted_fde'].append(weighted_fde(gt_trajectory, pred_trajectories, weights))
metrics['log_likelihood'].append(log_likelihood(gt_trajectory, pred_trajectories, weights))
metrics['corrected_nll'].append(
corrected_negative_log_likelihood(gt_trajectory, pred_trajectories, weights))
metrics['is_ood'].append(gt.is_ood)
return metrics | c777359e6385164a463318f089ceb330e7bad814 | 3,638,049 |
def build_optimizer(args, model):
"""
Build an optimizer based on the arguments given
"""
if args['optim'].lower() == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=args['learning_rate'], momentum=0.9, weight_decay=args['weight_decay'])
elif args['optim'].lower() == 'adadelta':
optimizer = optim.Adadelta(model.parameters(), lr=args['learning_rate'], weight_decay=args['weight_decay'])
elif args['optim'].lower() == 'adamw':
optimizer = optim.AdamW(model.parameters(), lr=args['learning_rate'], weight_decay=args['weight_decay'])
else:
raise ValueError("Unknown optimizer: %s" % args.optim)
return optimizer | 09677cbd5dc13f0dbab227f64d7724ff653eb9ec | 3,638,050 |
import re
def decorator_matcher(func_names, keyword, fcreate=None):
"""Search pattern @[namespace]<func_name>("<skey>")
Parameters
----------
func_names : list
List of macro names to match.
fcreate : Function (skey, path, range, func_name) -> result.
"""
decorator = r"@?(?P<decorator>([a-zA-Z_]?[a-zA-Z_0-9.]*.)?("
decorator += "|".join(re.escape(x) for x in func_names)
decorator += "))((\(\"(?P<skey>[^\"]+)\")|(\s*\Z))"
nextline = keyword + r"\s+(?P<skey>[a-zA-Z_0-9]+)\("
decorator = re.compile(decorator)
nextline = re.compile(nextline)
def _matcher(path, source, begin_line=0, end_line=None):
source = source.split("\n") if isinstance(source, str) else source
results = []
end_line = min(end_line, len(source)) if end_line else len(source)
for line in range(begin_line, end_line):
content = source[line]
match = decorator.match(content)
if match:
skey = match.group("skey")
if skey:
start, end = match.span("skey")
lineno = line
if not skey and line + 1 < len(source):
match_name = nextline.match(source[line + 1])
if match_name:
skey = match_name.group("skey")
start, end = match_name.span("skey")
lineno = line + 1
if skey:
start_pos = Position(lineno, start)
end_pos = Position(lineno, end)
item = fcreate(skey, path,
Range(start_pos, end_pos),
match.group("decorator"))
if item:
results.append(item)
return results
return _matcher | 62db3ae8bfaab36266cbe07f886531d60f47905e | 3,638,051 |
def log_binomial(n, k, tol=0.):
"""
Computes log binomial coefficient.
When ``tol >= 0.02`` this uses a shifted Stirling's approximation to the
log Beta function via :func:`log_beta`.
:param torch.Tensor n: A nonnegative integer tensor.
:param torch.Tensor k: An integer tensor ranging in ``[0, n]``.
:rtype: torch.Tensor
"""
assert isinstance(tol, (float, int)) and tol >= 0
n_plus_1 = n + 1
if tol < 0.02:
# At small tolerance it is cheaper to defer to torch.lgamma().
return n_plus_1.lgamma() - (k + 1).lgamma() - (n_plus_1 - k).lgamma()
return -n_plus_1.log() - log_beta(k + 1, n_plus_1 - k, tol=tol) | 6e61ac09020c65202a2ab32134cb0c2ff6ff7f80 | 3,638,052 |
def error_500(request, *args, **kwargs):
"""
Throws a JSON response for INTERNAL errors
:param request: the request
:return: response
"""
message = "An internal server error ocurred"
response = JsonResponse(data={"message": message, "status_code": 500})
response.status_code = 500
return response | 59f31de52a0d77ca71b3176929cf3142858637cf | 3,638,053 |
from datetime import datetime
import warnings
def datetimes_to_durations(start_times, end_times, fill_date=datetime.today(), freq="D", dayfirst=False, na_values=None):
"""
This is a very flexible function for transforming arrays of start_times and end_times
to the proper format for lifelines: duration and event observation arrays.
Parameters
----------
start_times: an array, Series or DataFrame
iterable representing start times. These can be strings, or datetime objects.
end_times: an array, Series or DataFrame
iterable representing end times. These can be strings, or datetimes. These values can be None, or an empty string, which corresponds to censorship.
fill_date: datetime, optional (default=datetime.Today())
the date to use if end_times is a None or empty string. This corresponds to last date
of observation. Anything after this date is also censored.
freq: string, optional (default='D')
the units of time to use. See Pandas 'freq'. Default 'D' for days.
dayfirst: bool, optional (default=False)
convert assuming European-style dates, i.e. day/month/year.
na_values : list, optional
list of values to recognize as NA/NaN. Ex: ['', 'NaT']
Returns
-------
T: numpy array
array of floats representing the durations with time units given by freq.
C: numpy array
boolean array of event observations: 1 if death observed, 0 else.
Examples
--------
.. code:: python
from lifelines.utils import datetimes_to_durations
start_dates = ['2015-01-01', '2015-04-01', '2014-04-05']
end_dates = ['2016-02-02', None, '2014-05-06']
T, E = datetimes_to_durations(start_dates, end_dates, freq="D")
T # array([ 397., 1414., 31.])
E # array([ True, False, True])
"""
fill_date = pd.to_datetime(fill_date)
freq_string = "timedelta64[%s]" % freq
start_times = pd.Series(start_times).copy()
end_times = pd.Series(end_times).copy()
C = ~(pd.isnull(end_times).values | end_times.isin(na_values or [""]))
end_times[~C] = fill_date
start_times_ = pd.to_datetime(start_times, dayfirst=dayfirst)
end_times_ = pd.to_datetime(end_times, dayfirst=dayfirst, errors="coerce")
deaths_after_cutoff = end_times_ > fill_date
C[deaths_after_cutoff] = False
T = (end_times_ - start_times_).values.astype(freq_string).astype(float)
if (T < 0).sum():
warnings.warn("Warning: some values of start_times are after end_times.\n", UserWarning)
return T, C.values | f30f64ac90b92a8614e60119d93f14954e24e2ef | 3,638,054 |
from datetime import datetime
import calendar
def offset_from_date(v, offset, gran='D', exact=False):
"""
Given a date string and some numeric offset, as well as a unit, then compute
the offset from that value by offset gran's. Gran defaults to D. If exact
is set to true, then the exact date is figured out, otherwise the level of
granuality given by gran is used. Returns a date string.
"""
gran = string_conversions.units_to_gran(gran)
# check for valid refdate
if len(v) > 0:
# Extract date components into a datetime object for manipulation
y = int(v[:4])
m = int(v[4:6])
if len(v) >= 8:
d = int(v[6:8])
really_d = True
else:
really_d = False
d = 1
if len(v) >= 11:
h = int(v[9:11])
else:
h = None
dt = datetime.datetime(y, m, d)
if len(v) >= 13:
min = int(v[11:13])
else:
min = None
if h is not None:
dt = datetime.datetime(y, m, d, h)
if len(v) >= 15:
s = int(v[13:15])
dt = datetime.datetime(y, m, d, h, min, s)
else:
s = None
if min is not None:
dt = datetime.datetime(y, m, d, h, min)
elif offset >= 1:
return 'FUTURE_REF'
elif offset <= -1:
return 'PAST_REF'
else:
return v
# Do manipulations
if gran == 'TM':
# minutes
dt += datetime.timedelta(minutes=offset)
return dt.strftime('%Y%m%dT%H%M')
elif gran == 'TH':
# hours
dt += datetime.timedelta(hours=offset)
if exact:
return dt.strftime('%Y%m%dT%H%M')
else:
return dt.strftime('%Y%m%dT%H')
elif gran == 'D':
# days
dt += datetime.timedelta(days=offset)
if exact and min is not None:
return dt.strftime('%Y%m%dT%H%M')
elif exact and h is not None:
return dt.strftime('%Y%m%dT%H')
else:
return dt.strftime('%Y%m%d')
elif gran == 'W' or gran == 'F':
# weeks/fortnights
if gran == 'F':
offset *= 2
dt += datetime.timedelta(weeks=offset)
if exact:
return dt.strftime('%Y%m%d')
else:
return dt.strftime('%YW%W')
elif gran == 'M':
# months - timedelta rather annoyingly doesn't support months, so we
# need to do a bit more work here
m += offset
if m > 12:
y += int(m / 12)
m %= 12
elif m < 0:
y += int(m / 12)
m %= 12
if m == 0:
m = 12
y -= 1
# avoid bad days
dt = None
while dt is None and d > 0:
try:
dt = datetime.datetime(y, m, d)
except ValueError:
d -= 1
if exact:
return dt.strftime('%Y%m%d')
else:
return dt.strftime('%Y%m')
elif gran == 'Y' or gran == 'E' or gran == 'C':
# years/decades/centuries - again, need to do a bit more work
if gran == 'C':
offset *= 100
if gran == 'E':
offset *= 10
y += offset
# Python doesn't allow datetime objects to be created representing years
# before 1970, so do this the old fashioned way
if not exact:
if gran == 'C':
return ("{0:04d}".format(y))[:2]
elif gran == 'E':
return ("{0:04d}".format(y))[:3]
else:
return "%04d" % y
else:
if d == 29 and m == 2 and not calendar.isleap(y):
# eugh, mucking about with a date that's not going to be in the
# target year - fall back
d = 28
if really_d:
return "%04d%02d%02d" % (y, m, d)
else:
return "%04d%02d" % (y, m)
elif offset >= 1:
return 'FUTURE_REF'
elif offset <= -1:
return 'PAST_REF'
else:
return v | 6b45b553df7926ee0e68ee0e2678d4da5304e9b9 | 3,638,055 |
def _strip(x):
"""remvoe tensor-hood from the input structure"""
if isinstance(x, Tensor):
x = x.item()
elif isinstance(x, dict):
x = {k: _strip(v) for k, v in x.items()}
return x | 74023594b2da6f58dea425411d72ca493fb80f61 | 3,638,056 |
def rank_array(a, descending=True):
"""Rank array counting from 1"""
temp = np.argsort(a)
if descending:
temp = temp[::-1]
ranks = np.empty_like(temp)
ranks[temp] = np.arange(1,len(a)+1)
return ranks | 8f15ab7123aa7652fe5208ef96808791454b876c | 3,638,057 |
import torch
def create_board_game_mcts(observation_spec,
action_spec,
dirichlet_alpha: float,
pb_c_init=1.25,
num_simulations=800,
debug_summaries=False):
"""Helper function for creating MCTSAlgorithm for board games."""
def visit_softmax_temperature(num_moves):
t = torch.ones_like(num_moves, dtype=torch.float32)
# paper pseudocode uses 0.0
# Current code does not support 0.0, so use a small value, which should
# not make any difference since a difference of 1e-3 for visit probability
# translates to about exp(1e-3*1e10) probability ratio.
t[num_moves >= 30] = 1e-10
return t
return MCTSAlgorithm(
observation_spec=observation_spec,
action_spec=action_spec,
discount=1.0,
root_dirichlet_alpha=dirichlet_alpha,
root_exploration_fraction=0.25,
num_simulations=num_simulations,
pb_c_init=pb_c_init,
pb_c_base=19652,
visit_softmax_temperature_fn=visit_softmax_temperature,
known_value_bounds=(-1, 1),
is_two_player_game=True) | 5c4f25e5c8dcd360967d9af8fc9429d7b7dde8b4 | 3,638,058 |
def normalize(params, axis=0):
"""
Function normalizing the parameters vector params with respect to the Axis: axis
:param params: array of parameters of shape [axis0, axis1, ..., axisp] p can be variable
:return: params: array of same shape normalized
"""
return params / np.sum(params, axis=axis, keepdims=True) | c11ee3ccab492769e8c60cf2057d010190c7362f | 3,638,059 |
from re import DEBUG
def product_soft_update(uuid):
"""
Product soft update route
:return Endpoint with RESTful pattern
# pylint: disable=line-too-long
See https://madeiramadeira.atlassian.net/wiki/spaces/CAR/pages/2244149708/WIP+-+Guidelines+-+RESTful+e+HATEOS
:rtype flask.Response
---
patch:
summary: Soft Product Update
parameters:
- in: path
name: uuid
description: "Product Id"
required: true
schema:
type: string
format: uuid
example: 4bcad46b-6978-488f-8153-1c49f8a45244
requestBody:
description: 'Product field to be updated'
required: true
content:
application/json:
schema: ProductSoftUpdateRequestSchema
responses:
200:
description: Success response
content:
application/json:
schema: ProductUpdateResponseSchema
4xx:
description: Error response
content:
application/json:
schema: ProductUpdateErrorResponseSchema
5xx:
description: Service fail response
content:
application/json:
schema: ProductUpdateErrorResponseSchema
"""
request = ApiRequest().parse_request(APP)
LOGGER.info(f'request: {request}')
status_code = 200
response = ApiResponse(request)
response.set_hateos(False)
manager = ProductManager(logger=LOGGER, product_service=ProductServiceV1(logger=LOGGER))
manager.debug(DEBUG)
try:
response.set_data(manager.soft_update(request.to_dict(), uuid))
# response.set_total(manager.count(request))
except CustomException as error:
LOGGER.error(error)
if not isinstance(error, ValidationException):
error = ApiException(MessagesEnum.UPDATE_ERROR)
status_code = 400
if manager.exception:
error = manager.exception
response.set_exception(error)
return response.get_response(status_code) | 7100adb6cef03a6bad8c835afb2ede074f86e56f | 3,638,060 |
from typing import Sequence
from typing import Dict
from typing import List
def estimate_cv_regression(
results: pd.DataFrame, critical_values: Sequence[float]
) -> Dict[float, List[float]]:
"""
Parameters
----------
results : DataFrame
A dataframe with rows contaoning the quantiles and columns containign the
number of observations
critical_values : Sequence[float]
The critical values to use
"""
# For percentiles 1, 5 and 10, regress on a constant, and powers of 1/T
out = {}
quantiles = np.asarray(results.index)
tau = np.array(results.columns).reshape((1, -1)).T
rhs = (1.0 / tau) ** np.arange(4)
for cv in critical_values:
loc = np.argmin(np.abs(100 * quantiles - cv))
lhs = np.squeeze(np.asarray(results.iloc[loc]))
res = OLS(lhs, rhs).fit()
params = res.params.copy()
params[res.pvalues > 0.05] = 0.0
out[cv] = [round(val, 5) for val in params]
return out | 22e163da18cb7a9707ebba16690788467a4132c9 | 3,638,061 |
def parse_field(field_info, op):
""" Combines a field with the operation object
field_info is a dictionary containing at least the key 'fields'
op is an elasticObject
returns an elasticObject """
fields = field_info["fields"]
constraints = field_info["constraints"]
_logger.debug("fields %s", fields)
if len(fields) > 1 or constraints:
# If there's more than one field, use the multiple_field_string
op.multiple_fields_string(fields=fields, constraints=constraints)
else:
# otherwise use the standard string
op.set_field(field=fields[0])
return op | 40c05cfcad903f739279b6c3e083803f14c6038c | 3,638,062 |
def resize(image, size):
"""Resize multiband image to an image of size (h, w)"""
n_channels = image.shape[2]
if n_channels >= 4:
return skimage.transform.resize(
image, size, mode="constant", preserve_range=True
)
else:
return cv2.resize(image, size, interpolation=cv2.INTER_AREA) | 6a67524552397f1b1c9cd315b2cbaccd624027fb | 3,638,063 |
import os
import yaml
def get_configs(conf_file=None):
""" Get configurations, like db_url
"""
if not conf_file:
conf_file = os.path.join(os.path.dirname(__file__),
os.pardir,
'config.yml')
with open(conf_file, 'r') as fp:
configs = yaml.safe_load(fp)
return configs | 0baee739a2fc8770e11c247d38ff486d194579c4 | 3,638,064 |
def version():
"""Return the version of this cli tool"""
return __version__ | 790059de16a48ea7dd5dbcc4470f2be851562146 | 3,638,065 |
def load_file_dangerous(file_path):
""" Load a single-lined file. Using eval with no safety check! """
#TODO: Dangerous `eval`! Be aware of the content in the file
with open(file_path, "r") as f:
content = eval(f.read().strip())
return content | 516fec3f96499cc59864ea4ddcf8a3b80234fc63 | 3,638,066 |
def ho2cu(ho):
"""
Homochoric vector to cubochoric vector.
References
----------
D. Roşca et al., Modelling and Simulation in Materials Science and Engineering 22:075013, 2014
https://doi.org/10.1088/0965-0393/22/7/075013
"""
rs = np.linalg.norm(ho,axis=-1,keepdims=True)
xyz3 = np.take_along_axis(ho,Rotation._get_pyramid_order(ho,'forward'),-1)
with np.errstate(invalid='ignore',divide='ignore'):
# inverse M_3
xyz2 = xyz3[...,0:2] * np.sqrt( 2.0*rs/(rs+np.abs(xyz3[...,2:3])) )
qxy = np.sum(xyz2**2,axis=-1,keepdims=True)
q2 = qxy + np.max(np.abs(xyz2),axis=-1,keepdims=True)**2
sq2 = np.sqrt(q2)
q = (beta/np.sqrt(2.0)/R1) * np.sqrt(q2*qxy/(q2-np.max(np.abs(xyz2),axis=-1,keepdims=True)*sq2))
tt = np.clip((np.min(np.abs(xyz2),axis=-1,keepdims=True)**2\
+np.max(np.abs(xyz2),axis=-1,keepdims=True)*sq2)/np.sqrt(2.0)/qxy,-1.0,1.0)
T_inv = np.where(np.abs(xyz2[...,1:2]) <= np.abs(xyz2[...,0:1]),
np.block([np.ones_like(tt),np.arccos(tt)/np.pi*12.0]),
np.block([np.arccos(tt)/np.pi*12.0,np.ones_like(tt)]))*q
T_inv[xyz2<0.0] *= -1.0
T_inv[np.broadcast_to(np.isclose(qxy,0.0,rtol=0.0,atol=1.0e-12),T_inv.shape)] = 0.0
cu = np.block([T_inv, np.where(xyz3[...,2:3]<0.0,-np.ones_like(xyz3[...,2:3]),np.ones_like(xyz3[...,2:3])) \
* rs/np.sqrt(6.0/np.pi),
])/ sc
cu[np.isclose(np.sum(np.abs(ho),axis=-1),0.0,rtol=0.0,atol=1.0e-16)] = 0.0
cu = np.take_along_axis(cu,Rotation._get_pyramid_order(ho,'backward'),-1)
return cu | be55f70c27be789a51c9aee0ba94b36879755915 | 3,638,067 |
def apparent_resistivity(
dc_survey, survey_type='dipole-dipole',
space_type='half-space', dobs=None,
eps=1e-10
):
"""
Calculate apparent resistivity. Assuming that data are normalized
voltages - Vmn/I (Potential difference [V] divided by injection
current [A]). For fwd modelled data an injection current of 1A is
assumed in SimPEG.
Input:
:param SimPEG.EM.Static.DC.SurveyDC.Survey dc_survey: DC survey object
:param numpy.ndarray dobs: normalized voltage measurements [V/A]
:param str survey_type: Either 'dipole-dipole' | 'pole-dipole' |
'dipole-pole' | 'pole-pole'
:param float eps: Regularizer in case of a null geometric factor
Output:
:return rhoApp: apparent resistivity
"""
# Use dobs in survey if dobs is None
if dobs is None:
if dc_survey.dobs is None:
raise Exception()
else:
dobs = dc_survey.dobs
# Calculate Geometric Factor
G = geometric_factor(
dc_survey, survey_type=survey_type, space_type=space_type
)
# Calculate apparent resistivity
# absolute value is required because of the regularizer
rhoApp = np.abs(dobs*(1./(G+eps)))
return rhoApp | ec9cc774b2ae9484916da46083730156c64559d1 | 3,638,068 |
import json
def readJsonFile(filePath):
"""read data from json file
Args:
filePath (str): location of the json file
Returns:
variable: data read form the json file
"""
result = None
with open(filePath, 'r') as myfile:
result = json.load(myfile)
return result | cf15e358c52edcfb00d0ca5257cf2b5456c6e951 | 3,638,069 |
def _sort_torch(tensor):
"""Update handling of sort to return only values not indices."""
sorted_tensor = _i("torch").sort(tensor)
return sorted_tensor.values | 0480d397726f9cd97c9fa9b7e566db1d9266a75a | 3,638,070 |
def lambda_handler(event, context):
"""
Route the incoming request based on type (LaunchRequest, IntentRequest, etc).
The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID
to prevent someone else from configuring a skill that sends requests
to this function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session']) | cda1cecdde08ae0cb7c925a5f8b598efd1c47e16 | 3,638,071 |
def get_authorized_client(config):
"""Get an OAuth-authorized client
Following
http://requests-oauthlib.readthedocs.org/en/latest/examples/google.html
"""
client = requests_oauthlib.OAuth2Session(
client_id=config['client']['id'],
scope=SCOPE,
redirect_uri=config['client']['redirect_uri'])
# redirect user for authorization
authorization_url, state = client.authorization_url(
url=AUTHORIZATION_BASE_URL,
access_type='offline', # offline for refresh token
approval_prompt='force') # force to always make user click authorize
print('Please go here and authorize,', authorization_url)
# get the authorization verifier code from the callback url
redirect_response = input('Paste the full redirect URL here: ')
# fetch the access token
client.fetch_token(
token_url=TOKEN_URL,
client_secret=config['client']['secret'],
authorization_response=redirect_response)
return client | 00012be81fed9e29cfabcf81799d887259fa395c | 3,638,072 |
from datetime import datetime
def query_obs_4h(session, station_name: str, start: datetime, end: datetime) -> pd.DataFrame:
"""
SQLite 读取 & 解析数据.
"""
time_format = "%Y-%m-%d %H:00:00"
resp = session.query(
ObsDataQcLinear.time,
ObsDataQcLinear.watertemp,
ObsDataQcLinear.pH,
ObsDataQcLinear.DO,
ObsDataQcLinear.conductivity,
ObsDataQcLinear.turbidity,
ObsDataQcLinear.codmn,
ObsDataQcLinear.nh3n,
ObsDataQcLinear.tp,
ObsDataQcLinear.tn) \
.filter_by(name=station_name) \
.filter(between(ObsDataQcLinear.time, start.strftime(time_format), end.strftime(time_format))) \
.all()
data = pd.DataFrame(resp)
return data.replace([-999.0, 999.0], [np.nan, np.nan]) | 35730e5de1b675dc14ef5771459e7cb9629d34e5 | 3,638,073 |
def get_valid_user_input(*, prompt='', strict=False):
"""Return a valid user input as Fraction."""
frac_converter = parse_fraction_strict if strict else Fraction
while True:
user_input = input(prompt)
try:
user_input_fraction = frac_converter(user_input)
except (ValueError, ZeroDivisionError):
print('Format error, please try again')
else:
return user_input_fraction | 7726df5b1a57ab9c9122a60e847be95161829a09 | 3,638,074 |
import os
def get_rsync_pid_file_path(image_id):
"""Generate the path for an rsync pid file."""
return os.path.join(CONF.baremetal.rsync_pid_path, image_id) | 475896fafc8423498694b2c82857df978f2fa3b7 | 3,638,075 |
def binary_irrev(t, kf, prod, major, minor, backend=None):
"""Analytic product transient of a irreversible 2-to-1 reaction.
Product concentration vs time from second order irreversible kinetics.
Parameters
----------
t : float, Symbol or array_like
kf : number or Symbol
Forward (bimolecular) rate constant.
prod : number or Symbol
Initial concentration of the complex.
major : number or Symbol
Initial concentration of the more abundant reactant.
minor : number or Symbol
Initial concentration of the less abundant reactant.
backend : module or str
Default is 'numpy', can also be e.g. ``sympy``.
"""
be = get_backend(backend)
return prod + major * (1 - be.exp(-kf * (major - minor) * t)) / (
major / minor - be.exp(-kf * t * (major - minor))
) | c399cb13bbe32a066b07e0aaa93fc40ddfd8c6ec | 3,638,076 |
def adapted_rand_error(seg, gt, all_stats=False):
"""Compute Adapted Rand error as defined by the SNEMI3D contest [1]
Formula is given as 1 - the maximal F-score of the Rand index
(excluding the zero component of the original labels). Adapted
from the SNEMI3D MATLAB script, hence the strange style.
Parameters
----------
seg : np.ndarray
the segmentation to score, where each value is the label at that point
gt : np.ndarray, same shape as seg
the groundtruth to score against, where each value is a label
all_stats : boolean, optional
whether to also return precision and recall as a 3-tuple with rand_error
Returns
-------
are : float
The adapted Rand error; equal to $1 - \frac{2pr}{p + r}$,
where $p$ and $r$ are the precision and recall described below.
prec : float, optional
The adapted Rand precision. (Only returned when `all_stats` is ``True``.)
rec : float, optional
The adapted Rand recall. (Only returned when `all_stats` is ``True``.)
References
----------
[1]: http://brainiac2.mit.edu/SNEMI3D/evaluation
"""
# segA is query, segB is truth
segA = seg
segB = gt
n = segA.size
# This is the contingency table obtained from segA and segB, we obtain
# the marginal probabilities from the table.
p_ij = contingency_table(segA, segB, norm=False)
# Sum of the joint distribution squared
sum_p_ij = p_ij.data @ p_ij.data
# These are the axix-wise sums (np.sumaxis)
a_i = p_ij.sum(axis=0).A.ravel()
b_i = p_ij.sum(axis=1).A.ravel()
# Sum of the segment labeled 'A'
sum_a = a_i @ a_i
# Sum of the segment labeled 'B'
sum_b = b_i @ b_i
# This is the new code, wherein 'n' is subtacted from the numerator
# and the denominator.
precision = (sum_p_ij - n)/ (sum_a - n)
recall = (sum_p_ij - n)/ (sum_b - n)
fscore = 2. * precision * recall / (precision + recall)
are = 1. - fscore
if all_stats:
return (are, precision, recall)
else:
return are | 0530fad7982e83aaf33e3f190b435bc7e216af00 | 3,638,077 |
from typing import List
def get_knp_span(type_: str, span: Span) -> List[Span]:
"""Get knp tag or bunsetsu list"""
assert type_ != MORPH
knp_list = span.sent._.get(getattr(KNP_USER_KEYS, type_).list_)
if not knp_list:
return []
res = []
i = span.start_char
doc = span.doc
for b in knp_list:
j = i + len(b.midasi)
bspan = doc.char_span(i, j)
bspan._.set(getattr(KNP_USER_KEYS, type_).element, b)
res.append(bspan)
i = j
return res | 90c0f753a6dd4acdbdfebc550fa66b1e8c5f21eb | 3,638,078 |
def get_namespace_leaf(namespace):
"""
From a provided namespace, return it's leaf.
>>> get_namespace_leaf('foo.bar')
'bar'
>>> get_namespace_leaf('foo')
'foo'
:param namespace:
:return:
"""
return namespace.rsplit(".", 1)[-1] | 0cb21247f9d1ce5fa4dd8d313142c4b09a92fd7a | 3,638,079 |
from typing import Tuple
def fft_real_dB(sig: np.ndarray,
sample_interval_s: float) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
FFT, real frequencies only, magnitude in dB
:param sig: array with input signal
:param sample_interval_s: sample interval in seconds
:return: four numpy ndarrays with fft_frequency_pos, fft_sig_pos, fft_spectral_power_pos_dB,
fft_spectral_phase_radians
"""
fft_points = len(sig)
fft_sig_pos = np.fft.rfft(sig)
# returns correct RMS power level sqrt(2) -> 1
fft_sig_pos /= fft_points
fft_frequency_pos = np.fft.rfftfreq(fft_points, d=sample_interval_s)
fft_spectral_power_pos_dB = 10.*np.log10(2.*(np.abs(fft_sig_pos))**2. + EPSILON)
fft_spectral_phase_radians = np.angle(fft_sig_pos)
return fft_frequency_pos, fft_sig_pos, fft_spectral_power_pos_dB, fft_spectral_phase_radians | fc2aec1bad283aa7e1817e6d91c833e3d8c83f8f | 3,638,080 |
from typing import Union
from typing import List
def get_ipv4_gateway_mac_address_over_ssh(connected_ssh_client: SSHClient,
target_os: str = 'MacOS',
gateway_ipv4_address: str = '192.168.0.254') -> Union[None, str]:
"""
Get MAC address of IPv4 gateway in target host over SSH
:param connected_ssh_client: Already connected SSH client
:param target_os: MacOS, Linux or Windows (Installation of OpenSSH For Windows: https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_install_firstuse)
:param gateway_ipv4_address: IPv4 address of gateway
:return: None if error or MAC address string
"""
gateway_mac_address: Union[None, str] = None
try:
if target_os == 'Windows':
arp_table_command: str = 'arp -a ' + gateway_ipv4_address + ' | findstr ' + gateway_ipv4_address
else:
arp_table_command: str = 'arp -an ' + gateway_ipv4_address
stdin, stdout, stderr = connected_ssh_client.exec_command(arp_table_command)
arp_table: bytes = stdout.read()
arp_table: str = arp_table.decode('utf-8')
assert 'No route to host' not in arp_table, \
'No route to host' + base.error_text(args.target_ip)
assert arp_table != '', \
'Not found host: ' + base.error_text(gateway_ipv4_address) + \
' in ARP table in host: ' + base.error_text(args.target_ip)
if target_os == 'Windows':
assert base.windows_mac_address_regex.search(arp_table), \
'Not found host: ' + base.error_text(gateway_ipv4_address) + \
' in ARP table in host: ' + base.error_text(args.target_ip)
mac_address = base.windows_mac_address_regex.search(arp_table)
return mac_address.group(1).replace('-', ':').lower()
else:
target_arp_table: List[str] = arp_table.split(' ')
if target_os == 'Linux':
assert base.mac_address_validation(target_arp_table[3]), \
'Invalid MAC address: ' + base.error_text(target_arp_table[3])
return target_arp_table[3]
except AssertionError as Error:
base.print_error(Error.args[0])
return gateway_mac_address
except IndexError:
return gateway_mac_address | 5701aaec179d0d36f38df929d7838fc9e31ebe4f | 3,638,081 |
def sort(list_):
"""
This function is a selection sort algorithm. It will put a list in numerical order.
:param list_: a list
:return: a list ordered by numerial order.
"""
for minimum in range(0, len(list_)):
for c in range(minimum + 1, len(list_)):
if list_[c] < list_[minimum]:
temporary = list_[minimum]
list_[minimum] = list_[c]
list_[c] = temporary
return list_ | 99007e4b72a616ae73a20358afc94c76e0011d3e | 3,638,082 |
import requests
import tqdm
def stock_em_xgsglb(market: str = "沪市A股") -> pd.DataFrame:
"""
新股申购与中签查询
http://data.eastmoney.com/xg/xg/default_2.html
:param market: choice of {"全部股票", "沪市A股", "科创板", "深市A股", "创业板"}
:type market: str
:return: 新股申购与中签数据
:rtype: pandas.DataFrame
"""
market_map = {
"全部股票": """(APPLY_DATE>'2010-01-01')""",
"沪市A股": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE in ("058001001","058001008"))(TRADE_MARKET_CODE in ("069001001001","069001001003","069001001006"))""",
"科创板": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE in ("058001001","058001008"))(TRADE_MARKET_CODE="069001001006")""",
"深市A股": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE="058001001")(TRADE_MARKET_CODE in ("069001002001","069001002002","069001002003","069001002005"))""",
"创业板": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE="058001001")(TRADE_MARKET_CODE="069001002002")""",
}
url = "http://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'APPLY_DATE,SECURITY_CODE',
'sortTypes': '-1,-1',
'pageSize': '5000',
'pageNumber': '1',
'reportName': 'RPTA_APP_IPOAPPLY',
'columns': 'SECURITY_CODE,SECURITY_NAME,TRADE_MARKET_CODE,APPLY_CODE,TRADE_MARKET,MARKET_TYPE,ORG_TYPE,ISSUE_NUM,ONLINE_ISSUE_NUM,OFFLINE_PLACING_NUM,TOP_APPLY_MARKETCAP,PREDICT_ONFUND_UPPER,ONLINE_APPLY_UPPER,PREDICT_ONAPPLY_UPPER,ISSUE_PRICE,LATELY_PRICE,CLOSE_PRICE,APPLY_DATE,BALLOT_NUM_DATE,BALLOT_PAY_DATE,LISTING_DATE,AFTER_ISSUE_PE,ONLINE_ISSUE_LWR,INITIAL_MULTIPLE,INDUSTRY_PE_NEW,OFFLINE_EP_OBJECT,CONTINUOUS_1WORD_NUM,TOTAL_CHANGE,PROFIT,LIMIT_UP_PRICE,INFO_CODE,OPEN_PRICE,LD_OPEN_PREMIUM,LD_CLOSE_CHANGE,TURNOVERRATE,LD_HIGH_CHANG,LD_AVERAGE_PRICE,OPEN_DATE,OPEN_AVERAGE_PRICE,PREDICT_PE,PREDICT_ISSUE_PRICE2,PREDICT_ISSUE_PRICE,PREDICT_ISSUE_PRICE1,PREDICT_ISSUE_PE,PREDICT_PE_THREE,ONLINE_APPLY_PRICE,MAIN_BUSINESS',
'filter': market_map[market],
'source': 'WEB',
'client': 'WEB',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']['pages']
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page+1), leave=False):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"股票代码",
"股票简称",
"_",
"申购代码",
"_",
"_",
"_",
"发行总数",
"网上发行",
"_",
"顶格申购需配市值",
"_",
"申购上限",
"_",
"发行价格",
"最新价",
"首日收盘价",
"申购日期",
"中签号公布日",
"中签缴款日期",
"上市日期",
"发行市盈率",
"中签率",
"询价累计报价倍数",
"_",
"配售对象报价家数",
"连续一字板数量",
"涨幅",
"每中一签获利",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"行业市盈率",
"_",
"_",
"_",
]
big_df = big_df[
[
"股票代码",
"股票简称",
"申购代码",
"发行总数",
"网上发行",
"顶格申购需配市值",
"申购上限",
"发行价格",
"最新价",
"首日收盘价",
"申购日期",
"中签号公布日",
"中签缴款日期",
"上市日期",
"发行市盈率",
"行业市盈率",
"中签率",
"询价累计报价倍数",
"配售对象报价家数",
"连续一字板数量",
"涨幅",
"每中一签获利",
]
]
big_df['申购日期'] = pd.to_datetime(big_df['申购日期']).dt.date
big_df['中签号公布日'] = pd.to_datetime(big_df['中签号公布日']).dt.date
big_df['中签缴款日期'] = pd.to_datetime(big_df['中签缴款日期']).dt.date
big_df['发行总数'] = pd.to_numeric(big_df['发行总数'])
big_df['网上发行'] = pd.to_numeric(big_df['网上发行'])
big_df['顶格申购需配市值'] = pd.to_numeric(big_df['顶格申购需配市值'])
big_df['申购上限'] = pd.to_numeric(big_df['申购上限'])
big_df['发行价格'] = pd.to_numeric(big_df['发行价格'])
big_df['最新价'] = pd.to_numeric(big_df['最新价'])
big_df['首日收盘价'] = pd.to_numeric(big_df['首日收盘价'])
big_df['发行市盈率'] = pd.to_numeric(big_df['发行市盈率'])
big_df['行业市盈率'] = pd.to_numeric(big_df['行业市盈率'])
big_df['中签率'] = pd.to_numeric(big_df['中签率'])
big_df['询价累计报价倍数'] = pd.to_numeric(big_df['询价累计报价倍数'])
big_df['配售对象报价家数'] = pd.to_numeric(big_df['配售对象报价家数'])
big_df['涨幅'] = pd.to_numeric(big_df['涨幅'])
big_df['每中一签获利'] = pd.to_numeric(big_df['每中一签获利'])
return big_df | 2ea409c176e5a2b266866f5af7bbced291dbadca | 3,638,083 |
def geomfill_Mults(*args):
"""
:param TypeConv:
:type TypeConv: Convert_ParameterisationType
:param TMults:
:type TMults: TColStd_Array1OfInteger &
:rtype: void
"""
return _GeomFill.geomfill_Mults(*args) | d79a857308bb796e08ce3e84f75963d473f37b76 | 3,638,084 |
import math
def arcminutes(degrees=0, radians=0, arcseconds=0): # pylint: disable=W0621
"""
TODO docs.
"""
if radians:
degrees += math.degrees(radians)
if arcseconds:
degrees += arcseconds / arcsec(degrees=1.)
return degrees * 60. | 985e9dad40609d07d262f26ee69a638c0f873dfc | 3,638,085 |
import ctypes
def PCO_GetRecordingStruct(handle):
"""
Get the complete set of the recording function
settings. Please fill in all wSize parameters,
even in embedded structures.
"""
strRecording = PCO_Recording()
f = pixelfly_dll.PCO_GetRecordingStruct
f.argtypes = (ctypes.wintypes.HANDLE, ctypes.POINTER(PCO_Recording))
f.restype = ctypes.c_int
ret_code = f(handle, ctypes.byref(strRecording))
PCO_manage_error(ret_code)
return strRecording | 2d50968e9445f3937726583c00a61c8839b488b2 | 3,638,086 |
import random
def auxiliar2(Letra, tabuleiro):
"""
Função auxiliar para jogada do computador, esta função compõe a estratégia e é responsável por realizar uma das jogadas do computador. Recebe como parâmetro o Simbolo do computador
e retorna a jogada que será realizada.
"""
if Letra == "X":
Letra2 = "O"
else:
Letra2 = "X"
if tabuleiro[1] == Letra2 and tabuleiro[5] == Letra:
if tabuleiro[3] == Letra:
return 7
elif tabuleiro[2] == Letra:
return 8
elif tabuleiro[4] == Letra:
return 6
elif tabuleiro[6] == Letra:
return 4
elif tabuleiro[7] == Letra:
return 3
elif tabuleiro[8] == Letra:
return 2
elif tabuleiro[9] == Letra:
jogada = random.choice([3, 7])
return jogada
elif tabuleiro[3] == Letra and tabuleiro[5] == Letra2:
if tabuleiro[1] == Letra2:
return 9
elif tabuleiro[2] == Letra2:
return 8
elif tabuleiro[4] == Letra2:
return 6
elif tabuleiro[6] == Letra2:
return 4
elif tabuleiro[8] == Letra2:
return 2
elif tabuleiro[9] == Letra2:
return 1
elif tabuleiro[7] == Letra2:
jogada = random.choice([9, 1])
return jogada
elif tabuleiro[7] == Letra and tabuleiro[5] == Letra2:
if tabuleiro[1] == Letra2:
return 9
elif tabuleiro[2] == Letra2:
return 8
elif tabuleiro[4] == Letra2:
return 6
elif tabuleiro[6] == Letra2:
return 4
elif tabuleiro[8] == Letra2:
return 2
elif tabuleiro[9] == Letra2:
return 1
elif tabuleiro[3] == Letra2:
jogada = random.choice([9, 1])
return jogada
elif tabuleiro[9] == Letra and tabuleiro[5] == Letra2:
if tabuleiro[3] == Letra2:
return 7
elif tabuleiro[2] == Letra2:
return 8
elif tabuleiro[4] == Letra2:
return 6
elif tabuleiro[6] == Letra2:
return 4
elif tabuleiro[7] == Letra2:
return 3
elif tabuleiro[8] == Letra2:
return 2
elif tabuleiro[1] == Letra2:
jogada = random.choice([3, 7])
return jogada | 1db9ca4a9a9e97a3b689efc4295b59ad553cfd7a | 3,638,087 |
def create_task():
"""
处理创建根据相关抓取参数及抓取节点服务的名称,启动数据抓取.
:return:
"""
payload = request.get_json()
# 查找抓取节点信息
node = db.nodes.find_one({"name": payload["node"]})
# 未找到抓取节点返回404
if node is None:
return abort(404)
# 保存任务信息至数据库中
payload["task"] = "%s@%s" % (payload["node"], strftime("%Y%m%d%H%M%S", localtime()))
payload["status"] = 0
payload["done"] = 0
db.tasks.insert(payload)
# 请求节点的抓取任务接口,启动抓取任务
try:
resp = post("http://%s:%d/tasks" % (node["addr"], node["port"]), json={
"task": str(payload["_id"]),
"type": payload["type"],
"keyword": payload["keyword"],
"start": payload["start"],
"end": payload["end"]
})
# 如果抓取节点请求成功,直接返回请求节点的处理结果,反之返回请求节点http状态
if resp.status_code == codes.ok:
payload["status"] = 1
db.tasks.save(payload)
return jsonify(resp.json())
else:
abort(resp.status_code)
except ConnectionError:
# 网络原因无法通知节点服务启动任务时,删除本次建立的任务信息.
db.tasks.delete_one({"_id": payload["_id"]})
return jsonify({"success": False, "code": -1}) | 4963b66174f57687453a4a59b321d564cbec9225 | 3,638,088 |
def eco_hist_calcs(mass,bins,dlogM):
"""
Returns dictionaries with the counts for the upper
and lower density portions; calculates the
three different percentile cuts for each mass
array given
Parameters
----------
mass: array-like
A 1D array with log stellar mass values, assumed
to be an order which corresponds to the ascending
densities; (necessary, as the index cuts are based
on this)
bins: array-like
A 1D array with the values which will be used as the bin edges
dlogM: float-like
The log difference between bin edges
Returns
-------
hist_dict_low: dictionary-like
A dictionary with three keys (the frac vals), with arrays
as values. The values for the lower density cut
hist_dict_high: dictionary like
A dictionary with three keys (the frac vals), with arrays
as values. The values for the higher density cut
"""
hist_dict_low = {}
hist_dict_high = {}
bin_cens_low = {}
bin_cens_high = {}
frac_val = np.array([2,4,10])
frac_dict = {2:0,4:1,10:2}
edges = bins
bin_centers = 0.5 * (edges[:-1]+edges[1:])
low_err = [[] for xx in xrange(len(frac_val))]
high_err = [[] for xx in xrange(len(frac_val))]
for ii in frac_val:
# hist_dict_low[ii] = {}
# hist_dict_high[ii] = {}
frac_data = int(len(mass)/ii)
frac_mass = mass[0:frac_data]
counts, edges = np.histogram(frac_mass,bins)
low_counts = (counts/float(len(frac_mass))/dlogM)
non_zero = (low_counts!=0)
low_counts_1 = low_counts[non_zero]
hist_dict_low[ii] = low_counts_1
bin_cens_low[ii] = bin_centers[non_zero]
##So... I don't actually know if I need to be calculating error
##on the mocks. I thought I didn't, but then, I swear someone
##*ahem (Victor)* said to. So I am. Guess I'm not sure they're
##useful. But I'll have them if necessary. And ECO at least
##needs them.
low_err = np.sqrt(counts)/len(frac_mass)/dlogM
low_err_1 = low_err[non_zero]
err_key = 'err_{0}'.format(ii)
hist_dict_low[err_key] = low_err_1
frac_mass_2 = mass[-frac_data:]
counts_2, edges_2 = np.histogram(frac_mass_2,bins)
high_counts = (counts_2/float(len(frac_mass_2))/dlogM)
non_zero = (high_counts!=0)
high_counts_1 = high_counts[non_zero]
hist_dict_high[ii] = high_counts_1
bin_cens_high[ii] = bin_centers[non_zero]
high_err = np.sqrt(counts_2)/len(frac_mass_2)/dlogM
high_err_1 = high_err[non_zero]
hist_dict_high[err_key] = high_err_1
return hist_dict_low, hist_dict_high, bin_cens_low, bin_cens_high | bf4e9ffeec76b2cf7a3e85db73dd1a7fc996ecd9 | 3,638,089 |
def avgSentenceLength(text):
"""Return the average length of a sentence."""
tokens = langtools.tokenize(text)
return len(tokens) / sentenceCount(text) | 4988c4fe945e2eada9abf01aebe0d454620cf565 | 3,638,090 |
def retrieve_context_topology_link_available_capacity_total_size_total_size(uuid, link_uuid): # noqa: E501
"""Retrieve total-size
Retrieve operation of resource: total-size # noqa: E501
:param uuid: ID of uuid
:type uuid: str
:param link_uuid: ID of link_uuid
:type link_uuid: str
:rtype: CapacityValue
"""
return 'do some magic!' | 6c0ee9cbf2784b17a6d624530bdf94875b4e751f | 3,638,091 |
import re
def harmonize_geonames_id(uri):
"""checks if a geonames Url points to geonames' rdf expression"""
if 'geonames' in uri:
geo_id = "".join(re.findall(r'\d', uri))
return "http://sws.geonames.org/{}/".format(geo_id)
else:
return uri | acfb8cb4277363c6bee4844a0a95ed2ea464e741 | 3,638,092 |
import argparse
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Gashlycrumb',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('letter',
metavar='letter',
help='Letter(s)',
nargs='+')
parser.add_argument('-f',
'--file',
help='Input file',
metavar='FILE',
type=argparse.FileType('rt'),
default='gashlycrumb.txt')
return parser.parse_args() | b3b3eb5f3a85a82d9feee8e99e1ae16cd05d8f32 | 3,638,093 |
def get_apikey(api):
"""Return the API key."""
if api == "greynoise":
return config.greynoise_key
if api == "hybrid-analysis":
return config.hybrid_analysis_apikey
if api == "malshare":
return config.malshare_apikey
if api == "pulsedive":
return config.pulsedive_apikey
if api == "twitter":
return {
"access_token": config.twitter_access_token,
"access_token_secret": config.twitter_access_token_secret,
"consumer_key": config.twitter_consumer_key,
"consumer_secret": config.twitter_consumer_secret
} | bf52c5424c657dc9d2173c0fa7434040daf967f3 | 3,638,094 |
def create_admin_account():
"""
Creates a new admin account
"""
try:
original_api_key = generate_key()
secret_key = generate_key()
hashed_api_key = generate_password_hash(original_api_key)
Interactions.insert(DEFAULT_ACCOUNTS_TABLE,
**{'username': 'admin',
'endpoint': '',
'is_admin': True,
'api_key': hashed_api_key,
'secret_key': secret_key})
return {'api_key': original_api_key, 'secret_key': secret_key}
except (RqlRuntimeError, RqlDriverError) as err:
raise err | 3d9d1c7fdfe0492080930855490eb8f050056c54 | 3,638,095 |
def _arg_wrap(func):
""" Decorator to decorate decorators to support optional arguments. """
@wraps(func)
def new_decorator(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return func(args[0])
else:
return lambda realf: func(realf, *args, **kwargs)
return new_decorator | a74f7cf363aab33d770c5f7a080b8796d27e91ec | 3,638,096 |
def _point_as_tuple(input_string: str) -> _Tuple[float]:
"""
Attempts to parse a string as a tuple of floats.
Checks that the number of elements corresponds to the specified dimensions.
The purpose of this function more than anything else is to validate correct
syntax of a CLI argument that is supposed to be a point in space.
"""
out = tuple(float(coordinate) for coordinate in input_string.split(','))
if len(out) == DIMENSIONS:
return out
raise TypeError | f828c5ed9cbcf9b1820abaff07c0b646027e6ab9 | 3,638,097 |
import random
def generate_id():
"""Generate Hexadecimal 32 length id."""
return "%032x" % random.randrange(16 ** 32) | 2f9a9eb7cc1808515fb7d71607899bb43d2ac682 | 3,638,098 |
def __charge_to_sdf(charge):
"""Translate RDkit charge to the SDF language.
Args:
charge (int): Numerical atom charge.
Returns:
str: Str representation of a charge in the sdf language
"""
if charge == -3:
return "7"
elif charge == -2:
return "6"
elif charge == -1:
return "5"
elif charge == 0:
return "0"
elif charge == 1:
return "+1"
elif charge == 2:
return "+2"
elif charge == 3:
return "+4"
else:
return "0" | 1bfda86ee023e8c11991eaae2969b87a349b7f7e | 3,638,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.