content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import logging
import sys
def log_stdout() -> logging.Logger:
"""
Returns stdout logging object
"""
log_level = logging.INFO
log = logging.getLogger("stdout_logger")
if not log.handlers:
log.setLevel(log_level)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
log.addHandler(sh)
log.handler_set = True
log.propagate = False
return log
|
f36f8c1eb641d6c9d7d44c99503105e8c7b0876d
| 15,600
|
def training_set_multiplication(training_set, mult_queue):
"""
Multiply the training set by all methods listed in mult_queue.
Parameters
----------
training_set :
set of all recordings that will be used for training
mult_queue :
list of all algorithms that will take one recording and generate more
than one.
Returns
-------
mutliple recordings
"""
logger.info("Multiply data...")
for algorithm in mult_queue:
new_trning_set = []
for recording in training_set:
samples = algorithm(recording["handwriting"])
for sample in samples:
new_trning_set.append(
{
"id": recording["id"],
"is_in_testset": 0,
"formula_id": recording["formula_id"],
"handwriting": sample,
"formula_in_latex": recording["formula_in_latex"],
}
)
training_set = new_trning_set
return new_trning_set
|
db7105d64ba760ba88088363547795fff833cce6
| 15,601
|
def addDictionaryFromWeb(url, params=None, **kwargs):
"""
指定した URL にあるページに含まれる辞書メタデータ(JSON-LD)を取得し、
メタデータに記載されている URL から地名解析辞書(CSVファイル)を取得し、
データベースに登録します。
既に同じ identifier を持つ辞書データがデータベースに登録されている場合、
削除してから新しい辞書データを登録します。
登録した辞書を利用可能にするには、 ``setActivateDictionaries()``
または ``activateDictionaires()`` で有効化する必要があります。
Parameters
----------
url : str
辞書メタデータを含むウェブページの URL。
params : dict, optional
requests.get に渡す params パラメータ。
**kwargs : dict, optional
requests.get に渡す kwargs パラメータ。
Returns
-------
bool
常に True。登録に失敗した場合は例外が発生します。
Examples
--------
>>> import pygeonlp.api as api
>>> api.init()
>>> api.addDictionaryFromWeb('https://geonlp.ex.nii.ac.jp/dictionary/geoshape-city/')
True
>>> api.updateIndex()
>>> api.activateDictionaries(pattern=r'geoshape-city')
['geonlp:geoshape-city']
>>> geowords = api.searchWord('千代田区')
>>> len(geowords)
1
>>> next(iter(geowords.values()))['dictionary_identifier']
'geonlp:geoshape-city'
"""
_check_initialized()
return _default_service.addDictionaryFromWeb(url, params, **kwargs)
|
52ea0c105dd5b5725859cd440f10a1fe163d36be
| 15,602
|
from re import T
def test_nested_blocks(pprint):
"""
Expected result:
procedure test(x, y: Integer);
begin
x:=1;
y:=200;
for z:= 1 to 100 do
begin
x := x + z;
end;
y:=x;
end;
"""
def brk(offset=0):
"force a new line and indent by given offset"
return T.BREAK(blankSpace=9999, offset=offset)
text = [
T.BEGIN(breakType=BreakType.consistent, offset=0),
T.STRING('procedure test(x, y: Integer);'), brk(),
T.STRING("begin"),
brk(2), T.STRING("x:=1;"),
brk(2), T.STRING("y:=200;"),
# indented for loop
brk(2), T.BEGIN(breakType=BreakType.consistent, offset=0),
T.STRING("for z:= 1 to 100 do"), brk(),
T.STRING("begin"),
brk(2), T.STRING("x := x + z;"), brk(),
T.STRING("end;"),
T.END(),
brk(2), T.STRING("y:=x;"), brk(),
T.STRING("end;"),
T.END(),
T.EOF()]
result = pprint(text)
assert result == (
'procedure test(x, y: Integer);\n'
'begin\n'
' x:=1;\n'
' y:=200;\n'
' for z:= 1 to 100 do\n'
' begin\n'
' x := x + z;\n'
' end;\n'
' y:=x;\n'
'end;'
)
|
7ef533d66f57483fac98bc249fd121e7c47f3d9a
| 15,603
|
from typing import Sequence
import asyncio
async def read(
sensors: Sequence[Sensor], msg: str = "", retry_single: bool = False
) -> bool:
"""Read from the Modbus interface."""
global READ_ERRORS # pylint:disable=global-statement
try:
try:
await SUNSYNK.read(sensors)
READ_ERRORS = 0
return True
except asyncio.TimeoutError:
_LOGGER.error("Read error%s: Timeout", msg)
except ModbusIOException:
# TCP: try to reconnect since it got a fairly serious error
await asyncio.sleep(1)
await SUNSYNK.connect()
except Exception as err: # pylint:disable=broad-except
_LOGGER.error("Read Error%s: %s", msg, err)
READ_ERRORS += 1
if READ_ERRORS > 3:
raise Exception(f"Multiple Modbus read errors: {err}") from err
if retry_single:
_LOGGER.info("Retrying individual sensors: %s", [s.name for s in SENSORS])
for sen in sensors:
await asyncio.sleep(0.02)
await read([sen], msg=sen.name, retry_single=False)
return False
|
7e871984d3b86207a2e9c9909b5f83d3ef9c3c4a
| 15,604
|
def boxlist_iou_guide_nms(boxlist, nms_thresh, max_proposals=-1, score_field="scores"):
"""
Performs non-maximum suppression on a boxlist, with scores specified
in a boxlist field via score_field.
Arguments:
boxlist(BoxList)
nms_thresh (float)
max_proposals (int): if > 0, then only the top max_proposals are kept
after non-maximum suppression
score_field (str)
"""
if nms_thresh <= 0:
return boxlist
mode = boxlist.mode
boxlist = boxlist.convert("xyxy")
boxes = boxlist.bbox
scores = boxlist.get_field(score_field)
ious = boxlist.get_field('ious')
keep, scores_new = iou_guide_nms(boxes, scores, ious, nms_thresh)
if max_proposals > 0:
keep = keep[: max_proposals]
scores_new = scores_new[:, max_proposals]
boxlist = boxlist[keep]
boxlist.add_field("scores", scores_new)
return boxlist.convert(mode)
|
1824575d4768b2145730caa0f9f9809dd784260d
| 15,605
|
import sys
def parse(tokens:list):
"""Transforme la liste des tokens en un arbre d'instructions ou de valeurs"""
ouverts=Pile(newnode(tokens[0]))
for token in tokens:
if token[0]=="balise":
if token[1][0]=="/":
if ouverts.top.REPR.lower()[:len(token[1])-1]!=token[1][1:]:
print(f"A tag has been opened({ouverts.top.REPR}) but not well closed(found {token[1][1:]})")
sys.exit()
else:
ouverts.pop()
else:
if token[1][-1]=="/": # Balise autofermante
token=token[0],token[1][:-1]
new=newnode(token)
ouverts.top.childs.append(new)
else:
new=newnode(token)
if new.REPR=="Indicium":
new.childs.append(ouverts.top.childs.pop())
ouverts.top.childs.append(new)
ouverts.add(new)
else:
ouverts.top.childs.append(newnode(token))
return ouverts.top
|
43fc47d060cb75ceea4e5ca49099a3646a486d20
| 15,606
|
def get_payload_bin(payload, seconds):
"""
Since we can't run the ysoserial.exe file in ubuntu (at least not
easily with mono) we build the different payloads in windows and
save them to the PAYLOADS map above.
:param payload: The payload name
:param seconds: The seconds to wait
:return: The payload
"""
return SAVED_PAYLOADS[payload][seconds]
|
eec033969157ec2a67b2f9edbba5b355d25f55bc
| 15,607
|
def srange(start, step, length, dtype=None):
"""
Like np.arange() but you give the start, the step, and the number
of steps. Saves having to compute the end point yourself.
"""
stop = start + (step * length)
return np.arange(start, stop, step, dtype)
|
ba71777c720063cbf5429085d8d249d83634b4f7
| 15,608
|
def get_tool_by_id(tool_id):
"""
returns the tool given the id
"""
tool = ToolType.objects.get(pk=tool_id)
return tool
|
7cee1d4a484028db94049227f9a968541974cd1e
| 15,609
|
import http
def domainr(text):
"""<domain> - uses domain.nr's API to search for a domain, and similar domains
:type text: str
"""
try:
data = http.get_json('http://domai.nr/api/json/search?q=' + text)
except (http.URLError, http.HTTPError):
return "Unable to get data for some reason. Try again later."
if data['query'] == "":
return "An error occurred: {status} - {message}".format(**data['error'])
domains = [format_domain(domain) for domain in data["results"]]
return "Domains: {}".format(", ".join(domains))
|
d44e49c25256a10f242cb8515b1e970cf509676d
| 15,610
|
def manage_blog():
""" 博文管理页面路由 """
if 'adminname' in session:
if request.method == 'POST':
del_result = manage_del_blog(db, Post, Comment, request.form.get('edit_id'))
return del_result
else:
blog_list = Post.query.order_by(Post.post_time.desc()).all()
return render_template('admin_blog.html',
page_in='blog',
blog_list=blog_list)
else:
return redirect(url_for('login'))
|
be73948d29e96413bff987447c5b9baa87177ccf
| 15,611
|
def split(nodes, index, axis=0):
"""
Split a array of nodes into two separate, non-overlapping arrays.
Parameters
----------
nodes : numpy.ndarray
An N x M array of individual node coordinates (i.e., the
x-coords or the y-coords only)
index : int
The leading edge of where the split should occur.
axis : int, optional
The axis along which ``nodes`` will be split. Use `axis = 0`
to split along rows and `axis = 1` for columns.
Raises
------
ValueError
Trying to split ``nodes`` at the edge (i.e., resulting in the
original array and an empty array) will raise an error.
Returns
-------
n1, n2 : numpy.ndarrays
The two non-overlapping sides of the original array.
"""
if index + 1 >= nodes.shape[axis] or index == 0:
raise ValueError("cannot split grid at or beyond its edges")
if axis == 0:
n1, n2 = nodes[:index, :], nodes[index:, :]
elif axis == 1:
n1, n2 = nodes[:, :index], nodes[:, index:]
return n1, n2
|
4ba4a078e35c7a4164675eab2fe36c943264bb28
| 15,612
|
def smart_oracle(oracle, text, code, block_len, max_rand):
"""Call oracle normally, or repeatedly call oracle in case of random prefix.
Returns "clean" oracle ouptut regardless of whether the oracle adds a
random prefix.
"""
if not max_rand:
return oracle(text, code) if code else oracle(text)
# append arbitrary bytes unlikely to occur in attacker-controlled plaintext
text_mod = bytearray([7] * block_len * 2) + text
success = False
while not success:
encrypted = oracle(text_mod, code) if code else oracle(text_mod)
text_start = blocks_aligned(encrypted, block_len, max_rand)
if text_start is not None:
success = True
return encrypted[text_start:]
|
fb20586236509838333b2723b24ead9fba9f2887
| 15,613
|
import json
def start_active_span_from_edu(
edu_content,
operation_name,
references=[],
tags=None,
start_time=None,
ignore_active_span=False,
finish_on_close=True,
):
"""
Extracts a span context from an edu and uses it to start a new active span
Args:
edu_content (dict): and edu_content with a `context` field whose value is
canonical json for a dict which contains opentracing information.
For the other args see opentracing.tracer
"""
if opentracing is None:
return _noop_context_manager()
carrier = json.loads(edu_content.get("context", "{}")).get("opentracing", {})
context = opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier)
_references = [
opentracing.child_of(span_context_from_string(x))
for x in carrier.get("references", [])
]
# For some reason jaeger decided not to support the visualization of multiple parent
# spans or explicitely show references. I include the span context as a tag here as
# an aid to people debugging but it's really not an ideal solution.
references += _references
scope = opentracing.tracer.start_active_span(
operation_name,
child_of=context,
references=references,
tags=tags,
start_time=start_time,
ignore_active_span=ignore_active_span,
finish_on_close=finish_on_close,
)
scope.span.set_tag("references", carrier.get("references", []))
return scope
|
43a5e2eb5695b0623f550492e38c9635fc315dfe
| 15,614
|
def inference_video_feed(request, project_id):
"""inference_video_feed
"""
return Response({
"status": "ok",
"url": "http://" + inference_module_url() + "/video_feed?inference=1",
})
|
7751737093b0f1cd72301e3dcd07c9ab929c9931
| 15,615
|
from datetime import datetime
def extract_start_timestamp() -> datetime:
"""Define extraction start timestamp.
Returns:
Extraction start timestamp used for testing.
"""
timestamp = datetime(2019, 8, 6, tzinfo=timezone.utc)
return timestamp
|
f03668c5b19a05c623040b8be1ff6fca23765437
| 15,616
|
def phi_pdf(X, corr=None):
"""
Standard normal PDF/Multivariate pdf.
**Input:**
* **X** (`float`)
Argument.
* **corr** (`ndarray`)
Correlation matrix.
**Output**
Standard normal PDF of X.
"""
norm_pdf = None
if isinstance(X, int) or isinstance(X, float):
norm_pdf = norm.pdf(X, loc=0, scale=1)
else:
if np.trace(corr) != len(X):
shape_error(' X or corr ')
else:
norm_pdf = multivariate_normal.pdf(X, cov=corr)
return norm_pdf
|
31566a5e0c50eaae7be367f7ccfd5dc0c1bfcd94
| 15,617
|
def computeStatistic( benchmarks, field, func ):
"""
Return the result of func applied to the values of field in benchmarks.
Arguments:
benchmarks: The list of benchmarks to gather data from.
field: The field to gather from the benchmarks.
func: The function to apply to the data, must accept a list and return a single value.
"""
results = []
for benchmark in benchmarks:
results.append( benchmark[ field ] )
return func( results )
|
7eced912d319a3261170f8274c4562db5e28c34c
| 15,618
|
import re
def bus_update_request(payload):
"""Parser for `bus_update_request` tracepoint"""
try:
match = re.match(bus_update_request_pattern, payload)
if match:
match_group_dict = match.groupdict()
return BusUpdateRequest(**match_group_dict)
except Exception as e:
raise ParserError(e.message)
|
ea5d95eaef4964b900b5201a8878886529f8c132
| 15,619
|
def update_employee(request,id):
"""
Updating the employee profile.
"""
try:
obj = User.objects.get(id=id)
total_cl = obj.no_of_cl
total_sl = obj.no_of_sl
total_wh = obj.no_of_wh
attendance_cl = Attendance.objects.filter(id=id,leave_type='cl',approved_or_not=True).count()
attendance_sl = Attendance.objects.filter(id=id,leave_type='sl',approved_or_not=True).count()
attendance_wh = Attendance.objects.filter(id=id,leave_type='wl',approved_or_not=True).count()
taken_cl = (total_cl-attendance_cl)
taken_sl = (total_sl-attendance_sl)
taken_wh = (total_wh-attendance_wh)
if request.method == "GET":
form = EmployeeCreationForm(instance=obj,initial={'email':obj.email})
context = {
'form':form,
'obj':obj,
'attendance_cl':attendance_cl,
'attendance_sl':attendance_sl,
'attendance_wh':attendance_wh,
'taken_cl':taken_cl,
'taken_sl':taken_sl,
'taken_wh':taken_wh
}
return render (request,'Employees/edit_employee.html', context)
elif request.method == "POST":
form = EmployeeCreationForm(request.POST,request.FILES,instance=obj)
if form.is_valid():
form_save = form.save(commit=False)
form_save.email = form.cleaned_data['email']
form_save.img = form.cleaned_data['img']
form_save.save()
return render(request,'Employees/edit_employee.html',{'form':form})
else:
return render(request,'Employees/edit_employee.html',{'form':form})
else:
return HttpResponseRedirect('/forbidden/')
except Exception, e:
return HttpResponseRedirect('/error/')
|
bcbfd2183ad6ea835b8e6fc4d6818b0bd31ea051
| 15,620
|
def func(TI, S0, alpha, T1):
""" exponential function for T1-fitting.
Args
----
x (numpy.ndarray): Inversion times (TI) in the T1-mapping sequence as input for the signal model fit.
Returns
-------
a, b, T1 (numpy.ndarray): signal model fitted parameters.
"""
mz = 1 - alpha * np.exp(-TI*(alpha-1)/T1)
return np.abs(S0 * mz)
|
605393be1aaf9f70f7f65c56dc5a31d4a38390e7
| 15,621
|
import functools
def validate(spec):
"""Decorator to validate a REST endpoint input.
Uses the schema defined in the openapi.yml file
to validate.
"""
def validate_decorator(func):
@functools.wraps(func)
def wrapper_validate(*args, **kwargs):
try:
data = request.get_json()
except BadRequest:
result = "The request body is not a well-formed JSON."
log.debug("create_circuit result %s %s", result, 400)
raise BadRequest(result) from BadRequest
if data is None:
result = "The request body mimetype is not application/json."
log.debug("update result %s %s", result, 415)
raise UnsupportedMediaType(result)
validator = RequestValidator(spec)
openapi_request = FlaskOpenAPIRequest(request)
result = validator.validate(openapi_request)
if result.errors:
errors = result.errors[0]
if hasattr(errors, "schema_errors"):
schema_errors = errors.schema_errors[0]
error_log = {
"error_message": schema_errors.message,
"error_validator": schema_errors.validator,
"error_validator_value": schema_errors.validator_value,
"error_path": list(schema_errors.path),
"error_schema": schema_errors.schema,
"error_schema_path": list(schema_errors.schema_path),
}
log.debug("error response: %s", error_log)
error_response = f"{schema_errors.message} for field"
error_response += f" {'/'.join(schema_errors.path)}."
else:
error_response = (
"The request body mimetype is not application/json."
)
raise BadRequest(error_response) from BadRequest
return func(*args, data=data, **kwargs)
return wrapper_validate
return validate_decorator
|
4cba2a7a240192f17b16fce09161afb39d2cfc75
| 15,622
|
def make_random_coordinate():
""" Make a random coordinate dictionary"""
return make_coordinate(randint(0, 100), randint(0, 100))
|
7719714d19e94a5be2ae3f93fc5290514bfe4b5e
| 15,623
|
def inv_qft_core(qubits):
"""
Generates a quil programm that performs
inverse quantum fourier transform on given qubits
without swaping qubits at the end.
:param qubits: A list of qubit indexes.
:return: A Quil program to compute the invese QFT of the given qubits without swapping.
"""
qft_quil = Program.inst(qft_core(qubits, coef=-1))
inv_qft_quil = Program()
while(len(qft_quil) > 0):
inst = qft_quil.pop()
inv_qft_quil.inst(inst)
return inv_qft_quil
|
3c7982cdb44398e1730a3aaaeee2c323694fae96
| 15,624
|
def analysis_precheck(_id, feature_table, rep_seqs, taxonomy, metadata):
"""
Do prechecks as to decrease the chance of job failing.
Input:
- feature_table: QIIME2 artifact of type FeatureTable[Frequency]
- rep_seqs: QIIME2 artifact of type FeatureData[Sequence]
"""
feature_table_path = save_uploaded_file(_id, feature_table)
rep_seqs_path = save_uploaded_file(_id, rep_seqs)
taxonomy_path = save_uploaded_file(_id, taxonomy)
metadata_path = save_uploaded_file(_id, metadata)
def validate_analysis_input(feature_table, rep_seqs, taxonomy):
"""
Precheck input files prior to running denoise step
Input:
- feature_table: Path to QIIME2 artifact of type FeatureTable[Frequency]
- rep_seqs: Path to QIIME2 artifact of type FeatureData[Sequence]
"""
# Check Artifact type
try:
feature_table_artifact = Artifact.load(feature_table)
rep_seqs_artifact = Artifact.load(rep_seqs)
if(str(feature_table_artifact.type) != "FeatureTable[Frequency]"):
msg = "Input Feature Table is not of type 'FeatureTable[Frequency]'!"
raise ValueError(msg)
if(str(rep_seqs_artifact.type) != "FeatureData[Sequence]"):
msg = "Input Representative Sequences is not of type 'FeatureData[Sequence]'!"
raise ValueError(msg)
except ValueError as err:
message = str(err)
return 400, message
return 200, "Imported data good!"
responseIfError(validate_analysis_input, feature_table=feature_table_path, rep_seqs=rep_seqs_path, taxonomy=taxonomy_path)
return feature_table_path, rep_seqs_path, taxonomy_path, metadata_path
|
65dde12b312926d185722a09779c8d11705d71dc
| 15,625
|
import socket
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Deluge from a config entry."""
host = entry.data[CONF_HOST]
port = entry.data[CONF_PORT]
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
api = await hass.async_add_executor_job(
DelugeRPCClient, host, port, username, password
)
api.web_port = entry.data[CONF_WEB_PORT]
try:
await hass.async_add_executor_job(api.connect)
except (
ConnectionRefusedError,
socket.timeout,
SSLError,
) as ex:
raise ConfigEntryNotReady("Connection to Deluge Daemon failed") from ex
except Exception as ex: # pylint:disable=broad-except
if type(ex).__name__ == "BadLoginError":
raise ConfigEntryAuthFailed(
"Credentials for Deluge client are not valid"
) from ex
_LOGGER.error("Unknown error connecting to Deluge: %s", ex)
coordinator = DelugeDataUpdateCoordinator(hass, api, entry)
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
|
70ab1c52569274eb7125e56f38028db3eb792259
| 15,626
|
import cmath
def powerFactor(n):
"""Function to compute power factor given a complex power value
Will this work if we're exporting power? I think so...
"""
# Real divided by apparent
pf = n.real.__abs__() / n.__abs__()
# Determine lagging vs leading (negative).
# NOTE: cmath.phase returns counter-clockwise angle on interval [-pi, pi],
# so checking sign should be reliable for determining lead vs. lag
p = cmath.phase(n)
if p < 0:
return (pf, 'lead')
else:
return (pf, 'lag')
|
1a507818f9c9906d27a1374cc9b757766b3038c1
| 15,627
|
import requests
import html
def send_request(url, raise_errors):
"""
Sends a request to a URL and parses the response with lxml.
"""
try:
response = requests.get(url, headers={'Accept-Language': '*'}, verify=_PEM_PATH)
response.raise_for_status()
doc = html.fromstring(response.text)
return doc
except requests.exceptions.RequestException:
if raise_errors:
raise
return None
|
ddf8dc7c899b97cebdc45727bd43ca1acf015ecb
| 15,628
|
def _static_idx(idx, size):
"""Helper function to compute the static slice start/limit/stride values."""
assert isinstance(idx, slice)
start, stop, step = idx.indices(size)
if (step < 0 and stop >= start) or (step > 0 and start >= stop):
return 0, 0, 1, False # sliced to size zero
if step > 0:
return start, stop, step, False
else:
k = (start - stop - 1) % (-step)
return stop + k + 1, start + 1, -step, True
|
8c586375f018be36c0e7688549a551d17d4e2bc8
| 15,629
|
def crop_array(input_array, ylength, xlength=None, orgn=(0,0)):
"""Crops an image in numpy array format. Pads crops outside
of input image with zeros if necessary. If no y dimension
is specified, outputs a square image.
"""
if xlength == None:
xlength = ylength
ylength = int(ylength)
xlength = int(xlength)
orgn = (int(orgn[0]), int(orgn[1]))
target = np.zeros((ylength, xlength))
#slice ranges
ymin = max(orgn[0], 0)
xmin = max(orgn[1], 0)
ymax = min(orgn[0] + ylength, input_array.shape[0])
xmax = min(orgn[1] + xlength, input_array.shape[1])
yslice = slice(ymin, ymax)
xslice = slice(xmin, xmax)
#top, left, bottom, right pads
tp = max(-orgn[0], 0)
lp = max(-orgn[1], 0)
bp = max((ylength + orgn[0] - tp - input_array.shape[0]), 0)
rp = max((xlength + orgn[1] - lp - input_array.shape[1]), 0)
#insert slice into the right spot.
target[tp:(ylength-bp),lp:(xlength-rp)] = input_array[yslice, xslice]
return target
|
4daeb126a8424fc038a5a42448d17eada9d12ee3
| 15,630
|
def get_service_defaults(servicename, version, **_):
"""
Load the default configuration for a given service version
Variables:
servicename => Name of the service to get the info
version => Version of the service to get
Data Block:
None
Result example:
{'accepts': '(archive|executable|java|android)/.*',
'category': 'Extraction',
'classpath': 'al_services.alsvc_extract.Extract',
'config': {'DEFAULT_PW_LIST': ['password', 'infected']},
'cpu_cores': 0.1,
'description': "Extracts some stuff"
'enabled': True,
'name': 'Extract',
'ram_mb': 256,
'rejects': 'empty|metadata/.*',
'stage': 'EXTRACT',
'submission_params': [{'default': u'',
'name': 'password',
'type': 'str',
'value': u''},
{'default': False,
'name': 'extract_pe_sections',
'type': 'bool',
'value': False},
{'default': False,
'name': 'continue_after_extract',
'type': 'bool',
'value': False}],
'timeout': 60}
"""
service = STORAGE.service.get(f"{servicename}_{version}", as_obj=False)
if service:
return make_api_response(service)
else:
return make_api_response("", err=f"{servicename} service does not exist", status_code=404)
|
2ba13382e2d5a668f1653f90fed5efe01200d6e2
| 15,631
|
def read_nnet3_model(model_path: str) -> nnet3.Nnet:
"""Read in a nnet3 model in raw format.
Actually if this model is not a raw format it will still work, but this is
not an official feature; it was due to some kaldi internal code.
Args:
model_path: Path to a raw nnet3 model, e.g., "data/final.raw"
Returns:
nnet: A neural network AM.
"""
nnet = nnet3.Nnet()
with xopen(model_path) as istream:
nnet.read(istream.stream(), istream.binary)
return nnet
|
b5dbadeb0f2072dfeccd1f4fdc77990680d12068
| 15,632
|
def flat_list_of_lists(l):
"""flatten a list of lists [[1,2], [3,4]] to [1,2,3,4]"""
return [item for sublist in l for item in sublist]
|
c121dff7d7d9a4da55dfb8aa1337ceeea191fc30
| 15,633
|
def cancel(api, order_ids=None):
"""
DELETE all orders by api["symbol"] (or) by symbol and order_id:
"""
if DETAIL:
print(cancel.__doc__, "symbol", api['symbol'], "order_ids", order_ids)
if order_ids is None:
order_ids = [] # must be a list
# format remote procedure call to exchange api standards
symbol = symbol_syntax(api["exchange"], api['symbol'])
if not order_ids:
print("Cancel All")
else:
print("Cancel Order Ids:", order_ids)
# Coinbase and Poloniex offer both Cancel All and Cancel One
if api["exchange"] in ["coinbase", "poloniex"]:
if order_ids:
# Cancel a list of orders
ret = []
for order_id in order_ids:
print("Cancel Order", order_id)
if api["exchange"] == "coinbase":
api["endpoint"] = "/orders/" + str(order_id)
api["params"] = {}
api["method"] = "DELETE"
elif api["exchange"] == "poloniex":
api["endpoint"] = "/tradingApi"
api["params"] = {"command": "cancelOrder", "orderNumber": int(order_id)}
api["method"] = "POST"
response = process_request(api)
ret.append({"order_id": order_id, "response": response})
else:
# Cancel All
if api["exchange"] == "coinbase":
api["endpoint"] = "/orders"
api["params"] = {"product_id": symbol}
api["method"] = "DELETE"
elif api["exchange"] == "poloniex":
api["endpoint"] = "/tradingApi"
api["params"] = {"command": "cancelAllOrders", "currencyPair": symbol}
api["method"] = "POST"
ret = process_request(api)
# Handle cases where "Cancel All" in one market is not supported
elif api["exchange"] in ["kraken", "binance", "bittrex", "Bitfinex"]:
if (api["exchange"] == "bitfinex") and not api["symbol"]:
print("WARN: Cancel All in ALL MARKETS")
api["endpoint"] = "/v2/auth/w/order/cancel/multi"
api["params"] = {}
api["method"] = "POST"
ret = process_request(api)
else:
# If we have an order_ids list we'll use it, else make one
if not order_ids:
print("Open Orders call to suppport Cancel All")
orders = get_orders(api)
order_ids = []
for order in orders["asks"]:
order_ids.append(order["order_id"])
for order in orders["bids"]:
order_ids.append(order["order_id"])
ret = []
for order_id in order_ids:
print("Cancel Order", order_id)
if api['exchange'] == "bitfinex":
api["endpoint"] = "/v2/auth/w/order/cancel"
api["params"] = {"id": order_id}
api["method"] = ""
elif api["exchange"] == "binance":
api["endpoint"] = "/api/v3/order"
api["params"] = {"symbol": symbol, "orderId": order_id}
api["method"] = "DELETE"
elif api["exchange"] == "bittrex":
api["endpoint"] = "/api/v1.1/market/cancel"
api["params"] = {"uuid": order_id}
api["method"] = "GET"
elif api["exchange"] == "kraken":
api["endpoint"] = "/0/private/CancelOrder"
api["params"] = {"txid": order_id}
api["method"] = "POST"
response = process_request(api)
ret.append(response)
return ret
|
24688256b20d3fdcc40eebb393ab98a963037d96
| 15,634
|
def prev_next_group(project, group):
"""Return adjacent group objects or None for the given project and group.
The previous and next group objects are relative to sort order of the
project's groups with respect to the passed in group.
"""
# TODO: Profile and optimize this query if necessary
groups = sorted(x for x in project.groups if x.submissions)
try:
index = groups.index(group)
except ValueError:
return None, None
prev_group = groups[index - 1] if index > 0 else None
next_group = groups[index + 1] if index + 1 < len(groups) else None
return prev_group, next_group
|
d1ed4370b0b2f76d6700080d4553e91827dc575c
| 15,635
|
def decrypt(mess, key):
"""Decrypt the cypher text using AES decrypt"""
if len(key) % 16 != 0:
a = 16 - len(key) % 16
key = key.ljust(len(key) + a)
cipher = AES.new(key)
plain_txt = cipher.decrypt(mess)
return plain_txt
|
eb587960d63539e1e57158f84174af5b5dae3fb5
| 15,636
|
def multiVecMat( vector, matrix ):
"""
Pronásobí matici vektorem zprava.
Parametry:
----------
vector: list
Vektor
matrix: list
Pronásobená matice. Její dimenze se musí shodovat s dimenzí
vektoru.
Vrací:
list
Pole velikosti vektoru.
"""
# Vytvoří pole o velikosti vektoru
result = [0] * len( matrix[0] )
# Projde matici po řádcích
for r, row in enumerate( matrix ):
# Pokud nesedí rozměry, končíme
if len(row) != len(vector):
return None
# Projde každý prvek v řádku
for i, elem in enumerate( row ):
# K poli s výsledkem přičte na index aktuálního řádku výsledek
# násobení aktuálního prvku v řádku a jemu odpovídajícího
# prvku z vektoru.
result[r] += elem * vector[i]
return result
|
8a10241173ab981d6007d8ff939199f9e86806e5
| 15,637
|
def verify_state(
state_prec_gdf,
state_abbreviation,
source,
year,
county_level_results_df,
office,
d_col=None,
r_col=None,
path=None,
):
"""
returns a complete (StateReport) object and a ((CountyReport) list) for the state.
:state_prec_gdf: (GeoDataFrame) containing precinct geometries and election results
:state_abbreviation: (str) e.g. 'MA' for Massachusetts
:source: (str) person or organization that made the 'state_prec_gdf' e.g 'VEST'
:year: (str) 'YYYY' indicating the year the election took place e.g. '2016'
:county_level_results_df: (DataFrame) containing official county-level election results
:office: (str) office to be evaluated in vote validation e.g. 'U.S. Senate'
:d_col: (str) denotes the column for democratic vote counts in each precinct
:r_col: (str) denotes the column for republican vote counts in each precinct
:path: (str) filepath to which the report should be saved (if None it won't be saved)
d_col, r_col are optional - if they are not provided, `get_party_cols` will be used
to guess based on comparing each column in state_prec_gdf to the expected results.
"""
print("Starting verification process for: ", state_abbreviation, source, year)
state_prec_gdf = state_prec_gdf.reset_index()
county_level_results_df = county_level_results_df.reset_index()
# enforce expected schema
assert "geometry" in state_prec_gdf.columns
assert {"county", "GEOID", "party", "votes"}.issubset(
set(county_level_results_df.columns)
)
# assign d_col and r_col
if not d_col or not r_col:
print("Candidate vote count columns are being assigned automatically")
d_col, r_col = get_party_cols(state_prec_gdf, state_abbreviation)
else:
print("Candidate vote count columns are being assigned manually")
print("Choose d_col as: ", d_col)
print("Choose r_col as: ", r_col)
state_prec_gdf = state_prec_gdf.rename(columns={d_col: "d_col", r_col: "r_col"})
# remove unecessary columns
cols_to_keep = ["d_col", "r_col", "geometry"]
if "GEOID" in state_prec_gdf.columns:
cols_to_keep.append("GEOID")
state_prec_gdf = state_prec_gdf[cols_to_keep]
print("Verification will now begin with this GeoDataFrame: \n")
print(state_prec_gdf.head())
# initialize state report
print("Starting Vote Verification")
state_report = StateReport(
county_level_results_df,
state_prec_gdf,
state_abbreviation,
year,
source,
office,
)
# poplulate the report
print("Starting Topology Verification")
state_report = verify_topology(state_prec_gdf, state_report)
print("Starting County Verification")
# assign GEOID
if "GEOID" not in state_prec_gdf.columns:
try:
print("Missing GEOID Column - attempting automatic assignment")
state_prec_gdf = assign_GEOID(state_prec_gdf, state_report.fips)
print("GEOID assignment successful")
except:
pass
else:
print("Using the GEOID Column in the original shapefile.")
assert "GEOID" in state_prec_gdf.columns
state_report, county_reports = verify_counties(
state_prec_gdf, county_level_results_df, state_report
)
if path:
make_report(path, state_report, county_reports)
print("All done!\n")
return state_report, county_reports
|
245baf26d1fad646abcee493bfa31935f2d1db59
| 15,638
|
import requests
def remove_profile(serial, profile_id):
"""hubcli doesn't remove profiles so we have to do this server-side."""
r = requests.post(
url=f"https://{ AIRWATCH_DOMAIN }/API/mdm/profiles/{ profile_id }/remove",
json={"SerialNumber": serial},
headers={
"aw-tenant-code": AIRWATCH_KEY,
"Content-Type": "application/json",
"Accept": "application/json",
},
auth=HTTPBasicAuth(AIRWATCH_USER, AIRWATCH_PASSWORD),
)
r.raise_for_status()
return r
|
8a47cd5c6588c3140d9e44aac94b59916517345a
| 15,639
|
import os
def skin_detect_percentage(image_dir=None):
"""Skin detection from image."""
result = skin_detect(image_dir)
filename = os.path.join(PROCESSED_DIR, image_dir.split('/')[-1])
if not os.path.exists(PROCESSED_DIR):
os.makedirs(PROCESSED_DIR)
cv2.imwrite(filename, result)
# take pixel values from inside contours,
# that way we get random samples as well.
grey_img = cv2.cvtColor(result, cv2.COLOR_RGB2GRAY)
greyscale_skin_nonzero_count = cv2.countNonZero(grey_img)
return float(greyscale_skin_nonzero_count)/float(grey_img.size)
|
d1d9db5abe11ea68485cb3d4d89e678a6603259c
| 15,640
|
def KELCH(df, n):
"""
Keltner Channel
"""
temp = (df['High'] + df['Low'] + df['Close']) / 3
KelChM = pd.Series(temp.rolling(n).mean(), name='KelChM_' + str(n))
temp = (4 * df['High'] - 2 * df['Low'] + df['Close']) / 3
KelChU = pd.Series(temp.rolling(n).mean(), name='KelChU_' + str(n))
temp = (-2 * df['High'] + 4 * df['Low'] + df['Close']) / 3
KelChD = pd.Series(temp.rolling(n).mean(), name='KelChD_' + str(n))
result = pd.DataFrame([KelChM, KelChU, KelChD]).transpose()
return out(SETTINGS, df, result)
|
3335fd45ce073eec33d65b7a6d23c07b6a71a662
| 15,641
|
def find_isotopes(ms, peptides_in_bin, tolerance=0.01):
"""
Find the isotopes between mass shifts using mass difference of C13 and C12, information of amino acids statistics as well.
Paramenters
-----------
ms : Series
Series with mass in str format as index and values float mass shift.
peptides_in_bin : Series
Series with # of peptides in each mass shift.
tolerance : float
Tolerance for isotop matching.
Returns
-------
DataFrame with 'isotop'(boolean) and 'monoisotop_index' columns.
"""
out = pd.DataFrame({'isotope': False, 'monoisotop_index': None}, index=ms.index)
np_ms = ms.to_numpy()
difference_matrix = np.abs(np_ms.reshape(-1, 1) - np_ms.reshape(1, -1) - DIFF_C13)
isotop, monoisotop = np.where(difference_matrix < tolerance)
logger.debug('Found %d potential isotopes.', isotop.sum())
out.iloc[isotop, 0] = True
out.iloc[isotop, 1] = out.iloc[monoisotop, :].index
for i, row in out.iterrows():
if row['isotope']:
if peptides_in_bin[i] > peptides_in_bin[row['monoisotop_index']]:
out.at[i, 'isotope'], out.at[i, 'monoisotop_index'] = False, None
return out
|
01ceae733261b73a92c24e4033a34a77caaf7850
| 15,642
|
def wait_until_complete(jobs):
"""wait jobs finish"""
return [j.get() for j in jobs]
|
530c3af30ca40025891980191c1f121d8f026a53
| 15,643
|
import io
def torquery(url):
"""
Uses pycurl to fetch a site using the proxy on the SOCKS_PORT.
"""
output = io.BytesIO()
query = pycurl.Curl()
query.setopt(pycurl.URL, url)
query.setopt(pycurl.PROXY, 'localhost')
query.setopt(pycurl.PROXYPORT, SOCKS_PORT)
query.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5_HOSTNAME)
query.setopt(pycurl.WRITEFUNCTION, output.write)
try:
query.perform()
return output.getvalue()
except pycurl.error as exc:
return "Unable to reach %s (%s)" % (url, exc)
|
7ae660a9a5c3af7be0fbf8789cc96d481863a2c4
| 15,644
|
def check_for_negative_residual(vel, data, errors, best_fit_list, dct,
signal_ranges=None, signal_mask=None,
force_accept=False, get_count=False,
get_idx=False, noise_spike_mask=None):
"""Check for negative residual features and try to refit them.
We define negative residual features as negative peaks in the residual that were introduced by the fit. These negative peaks have to have a minimum negative signal-to-noise ratio of dct['snr_negative'].
In case of a negative residual feature, we try to replace the Gaussian fit component that is causing the feature with two narrower components. We only accept this solution if it yields a better fit as determined by the AICc value.
Parameters
----------
vel : numpy.ndarray
Velocity channels (unitless).
data : numpy.ndarray
Original data of spectrum.
errors : numpy.ndarray
Root-mean-square noise values.
best_fit_list : list
List containing parameters of the current best fit for the spectrum. It is of the form [{0} params_fit, {1} params_errs, {2} ncomps_fit, {3} best_fit, {4} residual, {5} rchi2, {6} aicc, {7} new_fit, {8} params_min, {9} params_max, {10} pvalue]
dct : dict
Dictionary containing parameter settings for the improved fitting.
signal_ranges : list
Nested list containing info about ranges of the spectrum that were estimated to contain signal. The goodness-of-fit calculations are only performed for the spectral channels within these ranges.
signal_mask : numpy.ndarray
Boolean array containing the information of signal_ranges.
force_accept : bool
Experimental feature. Default is 'False'. If set to 'True', the new fit will be forced to become the best fit.
get_count : bool
Default is 'False'. If set to 'True', only the number of occurring negative residual features will be returned.
get_idx : bool
Default is 'False'. If set to 'True', the index of the Gaussian fit component causing the negative residual feature is returned. In case of multiple negative residual features, only the index of one of them is returned.
Returns
-------
best_fit_list : list
List containing parameters of the chosen best fit for the spectrum. It is of the form [{0} params_fit, {1} params_errs, {2} ncomps_fit, {3} best_fit, {4} residual, {5} rchi2, {6} aicc, {7} new_fit, {8} params_min, {9} params_max, {10} pvalue]
"""
params_fit = best_fit_list[0]
ncomps_fit = best_fit_list[2]
# in case a single rms value is given instead of an array
if not isinstance(errors, np.ndarray):
errors = np.ones(len(data)) * errors
if ncomps_fit == 0:
if get_count:
return 0
return best_fit_list
residual = best_fit_list[4]
amps_fit, fwhms_fit, offsets_fit = split_params(params_fit, ncomps_fit)
amp_guesses, fwhm_guesses, offset_guesses = get_initial_guesses(
residual, errors[0], dct['snr_negative'], dct['significance'],
peak='negative')
# check if negative residual feature was already present in the data
remove_indices = []
for i, offset in enumerate(offset_guesses):
if residual[offset] > (data[offset] - dct['snr']*errors[0]):
remove_indices.append(i)
if len(remove_indices) > 0:
amp_guesses, fwhm_guesses, offset_guesses = remove_components_from_sublists(
[amp_guesses, fwhm_guesses, offset_guesses], remove_indices)
if get_count:
return (len(amp_guesses))
if len(amp_guesses) == 0:
return best_fit_list
# in case of multiple negative residual features, sort them in order of increasing amplitude values
sort = np.argsort(amp_guesses)
amp_guesses = np.array(amp_guesses)[sort]
fwhm_guesses = np.array(fwhm_guesses)[sort]
offset_guesses = np.array(offset_guesses)[sort]
for amp, fwhm, offset in zip(amp_guesses, fwhm_guesses, offset_guesses):
idx_low = max(0, int(offset - fwhm))
idx_upp = int(offset + fwhm) + 2
exclude_idx = check_which_gaussian_contains_feature(
idx_low, idx_upp, fwhms_fit, offsets_fit)
if get_idx:
return exclude_idx
if exclude_idx is None:
continue
params_fit = replace_gaussian_with_two_new_ones(
data, vel, errors[0], dct['snr'], dct['significance'],
params_fit, exclude_idx, offset)
best_fit_list = get_best_fit(
vel, data, errors, params_fit, dct, first=False,
best_fit_list=best_fit_list, signal_ranges=signal_ranges,
signal_mask=signal_mask, force_accept=force_accept,
noise_spike_mask=noise_spike_mask)
params_fit = best_fit_list[0]
ncomps_fit = best_fit_list[2]
amps_fit, fwhms_fit, offsets_fit = split_params(params_fit, ncomps_fit)
return best_fit_list
|
3901af224a080e99aa0cfdef22a27b8951ee4c3c
| 15,645
|
def make_dirs(path):
"""
Creates any folders that are missing and assigns them the permissions of their
parents
"""
logger.log(u"Checking if the path " + path + " already exists", logger.DEBUG)
if not ek.ek(os.path.isdir, path):
# Windows, create all missing folders
if os.name == 'nt' or os.name == 'ce':
try:
logger.log(u"Folder " + path + " didn't exist, creating it", logger.DEBUG)
ek.ek(os.makedirs, path)
except (OSError, IOError), e:
logger.log(u"Failed creating " + path + " : " + ex(e), logger.ERROR)
return False
# not Windows, create all missing folders and set permissions
else:
sofar = ''
folder_list = path.split(os.path.sep)
# look through each subfolder and make sure they all exist
for cur_folder in folder_list:
sofar += cur_folder + os.path.sep;
# if it exists then just keep walking down the line
if ek.ek(os.path.isdir, sofar):
continue
try:
logger.log(u"Folder " + sofar + " didn't exist, creating it", logger.DEBUG)
ek.ek(os.mkdir, sofar)
# use normpath to remove end separator, otherwise checks permissions against itself
chmodAsParent(ek.ek(os.path.normpath, sofar))
# do the library update for synoindex
notifiers.synoindex_notifier.addFolder(sofar)
except (OSError, IOError), e:
logger.log(u"Failed creating " + sofar + " : " + ex(e), logger.ERROR)
return False
return True
|
f03ada0a9116a82a6e29fb5edc02b919793ad0ca
| 15,646
|
import requests
import shutil
def download(url, filename, proxies=None):
"""
Telechargement de l'URL dans le fichier destination
:param url: URL a telecharger
:param filename: fichier de destination
"""
error = ''
try:
req = requests.get(url, proxies=proxies, stream=True)
with open(filename, "wb") as f:
shutil.copyfileobj(req.raw, f)
except FileNotFoundError as fnf:
error = f"Error while downloading {url} - I/O Problem with {filename} : FileNotFound -> check path"
except Exception as ex:
error = f"Error while downloading {url}. {str(ex)}"
return len(error) == 0, error, filename
|
da097b46aef574623ac975aa8d5e9506ff191d53
| 15,647
|
import json
async def validate_devinfo(hass, data):
"""检验配置是否缺项。无问题返回[[],[]],有缺项返回缺项。"""
# print(result)
devtype = data['devtype']
ret = [[],[]]
requirements = VALIDATE.get(devtype)
if not requirements:
return ret
else:
for item in requirements[0]:
if item not in json.loads(data[CONF_MAPPING]):
ret[0].append(item)
for item in requirements[1]:
if item not in json.loads(data[CONF_CONTROL_PARAMS]):
ret[1].append(item)
return ret
|
7e0822ae36617f209447221dc49f0485e88759e9
| 15,648
|
from pathlib import Path
import os
import math
def plot_artis_spectrum(
axes, modelpath, args, scale_to_peak=None, from_packets=False, filterfunc=None,
linelabel=None, plotpacketcount=False, **plotkwargs):
"""Plot an ARTIS output spectrum."""
if not Path(modelpath, 'input.txt').exists():
print(f"Skipping '{modelpath}' (no input.txt found. Not an ARTIS folder?)")
return
if plotpacketcount:
from_packets = True
for index, axis in enumerate(axes):
if args.multispecplot:
(timestepmin, timestepmax, args.timemin, args.timemax) = at.get_time_range(
modelpath, timedays_range_str=args.timedayslist[index])
else:
(timestepmin, timestepmax, args.timemin, args.timemax) = at.get_time_range(
modelpath, args.timestep, args.timemin, args.timemax, args.timedays)
modelname = at.get_model_name(modelpath)
if timestepmin == timestepmax == -1:
return
timeavg = (args.timemin + args.timemax) / 2.
timedelta = (args.timemax - args.timemin) / 2
if linelabel is None:
if len(modelname) < 70:
linelabel = f'{modelname}'
else:
linelabel = f'...{modelname[-67:]}'
if not args.hidemodeltime and not args.multispecplot:
# todo: fix this for multispecplot - use args.showtime for now
linelabel += f' +{timeavg:.0f}d'
if not args.hidemodeltimerange and not args.multispecplot:
linelabel += r' ($\pm$ ' + f'{timedelta:.0f}d)'
# Luke: disabled below because line label has already been formatted with e.g. timeavg values
# formatting for a second time makes it impossible to use curly braces in line labels (needed for LaTeX math)
# else:
# linelabel = linelabel.format(**locals())
if from_packets:
spectrum = get_spectrum_from_packets(
modelpath, args.timemin, args.timemax, lambda_min=args.xmin, lambda_max=args.xmax,
use_comovingframe=args.use_comovingframe, maxpacketfiles=args.maxpacketfiles,
delta_lambda=args.deltalambda, useinternalpackets=args.internalpackets, getpacketcount=plotpacketcount)
if args.outputfile is None:
statpath = Path()
else:
statpath = Path(args.outputfile).resolve().parent
else:
spectrum = get_spectrum(modelpath, timestepmin, timestepmax, fnufilterfunc=filterfunc)
if args.plotviewingangle: # read specpol res.
angles = args.plotviewingangle
viewinganglespectra = {}
for angle in angles:
viewinganglespectra[angle] = get_res_spectrum(modelpath, timestepmin, timestepmax, angle=angle,
fnufilterfunc=filterfunc, args=args)
elif args.plotvspecpol is not None and os.path.isfile(modelpath/'vpkt.txt'):
# read virtual packet files (after running plotartisspectrum --makevspecpol)
vpkt_config = at.get_vpkt_config(modelpath)
if (vpkt_config['time_limits_enabled'] and (
args.timemin < vpkt_config['initial_time'] or args.timemax > vpkt_config['final_time'])):
print(f"Timestep out of range of virtual packets: start time {vpkt_config['initial_time']} days "
f"end time {vpkt_config['final_time']} days")
quit()
angles = args.plotvspecpol
viewinganglespectra = {}
for angle in angles:
viewinganglespectra[angle] = get_vspecpol_spectrum(
modelpath, timeavg, angle, args, fnufilterfunc=filterfunc)
spectrum.query('@args.xmin <= lambda_angstroms and lambda_angstroms <= @args.xmax', inplace=True)
print(f"Plotting '{linelabel}' timesteps {timestepmin} to {timestepmax} "
f'({args.timemin:.3f} to {args.timemax:.3f}d)')
print(f" modelpath {modelname}")
print_integrated_flux(spectrum['f_lambda'], spectrum['lambda_angstroms'])
if scale_to_peak:
spectrum['f_lambda_scaled'] = spectrum['f_lambda'] / spectrum['f_lambda'].max() * scale_to_peak
if args.plotvspecpol is not None:
for angle in args.plotvspecpol:
viewinganglespectra[angle]['f_lambda_scaled'] = (
viewinganglespectra[angle]['f_lambda'] / viewinganglespectra[angle]['f_lambda'].max() *
scale_to_peak)
ycolumnname = 'f_lambda_scaled'
else:
ycolumnname = 'f_lambda'
if plotpacketcount:
ycolumnname = 'packetcount'
supxmin, supxmax = axis.get_xlim()
if (args.plotvspecpol is not None and os.path.isfile(modelpath/'vpkt.txt')) or args.plotviewingangle:
for angle in angles:
if args.binflux:
new_lambda_angstroms = []
binned_flux = []
wavelengths = viewinganglespectra[angle]['lambda_angstroms']
fluxes = viewinganglespectra[angle][ycolumnname]
nbins = 5
for i in np.arange(0, len(wavelengths - nbins), nbins):
new_lambda_angstroms.append(wavelengths[i + int(nbins/2)])
sum_flux = 0
for j in range(i, i + nbins):
sum_flux += fluxes[j]
binned_flux.append(sum_flux / nbins)
plt.plot(new_lambda_angstroms, binned_flux)
else:
if args.plotvspecpol:
if args.viewinganglelabelunits == 'deg':
viewing_angle = round(math.degrees(math.acos(vpkt_config['cos_theta'][angle])))
linelabel = fr"$\theta$ = {viewing_angle}$^\circ$" if index == 0 else None
elif args.viewinganglelabelunits == 'rad':
linelabel = fr"cos($\theta$) = {vpkt_config['cos_theta'][angle]}" if index == 0 else None
else:
linelabel = f'bin number {angle}'
viewinganglespectra[angle].query(
'@supxmin <= lambda_angstroms and lambda_angstroms <= @supxmax').plot(
x='lambda_angstroms', y=ycolumnname, ax=axis, legend=None,
label=linelabel) # {timeavg:.2f} days {at.get_model_name(modelpath)}
else:
spectrum.query('@supxmin <= lambda_angstroms and lambda_angstroms <= @supxmax').plot(
x='lambda_angstroms', y=ycolumnname, ax=axis, legend=None,
label=linelabel if index == 0 else None, **plotkwargs)
return spectrum[['lambda_angstroms', 'f_lambda']]
|
d207fa1b037c2a7fac66baf3efa7d0438da9a088
| 15,649
|
from datetime import datetime
def worldbank_date_to_datetime(date):
"""Convert given world bank date string to datetime.date object."""
if "Q" in date:
year, quarter = date.split("Q")
return datetime.date(int(year), (int(quarter) * 3) - 2, 1)
if "M" in date:
year, month = date.split("M")
return datetime.date(int(year), int(month), 1)
return datetime.date(int(date), 1, 1)
|
f36fb2c763da59ae58a08f446e4cbd566d6e87e0
| 15,650
|
from typing import Sequence
def select(
key: bytes, seq: Sequence[BucketType], *, seed: bytes = DEFAULT_SEED
) -> BucketType:
"""
Select one of the elements in seq based on the hash of ``key``.
Example partitioning of input on ``stdin`` into buckets::
bucketed_lines = {} # type: Dict[int, str]
for line in sys.stdin:
buckets[choice(b, [0, 1, 2, 3, 4, 5])] = line
:param key: The bytes to hash.
:param seq: The sequence from which to select an element. Must be non-empty.
:param seed: Seed to hash prior to hashing b.
:raise ValueError: If ``seq`` is empty.
:return: One of the elements in ``seq``.
"""
if not seq:
raise ValueError("non-empty sequence required")
return seq[range(key, len(seq), seed=seed)]
|
d9f3a2f3aa759f252965478e0f065a555b23a24d
| 15,651
|
def unescape(s):
"""
unescape html
"""
html_codes = (
("'", '''),
('"', '"'),
('>', '>'),
('<', '<'),
('&', '&')
)
for code in html_codes:
s = s.replace(code[1], code[0])
return s
|
c1434498694d90b219c962c0ce75b6c8978533bb
| 15,652
|
def ua_mnem(*args):
"""ua_mnem(ea_t ea, char buf) -> char"""
return _idaapi.ua_mnem(*args)
|
23322dd3780c9184ebed9f6006a0fad37000f29a
| 15,653
|
import time
def tic(*names):
"""
Start timer, use `toc` to get elapsed time in seconds.
Parameters
----------
names : str, str, ...
Names of timers
Returns
-------
out : float
Current timestamp
Examples
--------
.. code-block:: python
:linenos:
:emphasize-lines: 10,11,12
import plazy
def foo():
total = 0
for _ in range(100000):
total += 1
return total
if __name__ == "__main__":
plazy.tic() # T1
plazy.tic("B") # T2
plazy.tic("C", "D", "E") # T3
foo()
dt1 = plazy.toc() # elapsed time since T1
dt2 = plazy.toc("B") # elapsed time since T2
dt3 = plazy.toc("C", "D") # elapsed time since T3
foo()
dt4 = plazy.toc("E") # elapsed time since T3
dt5 = plazy.toc("B") # elapsed time since T2
print(dt1) # 0.009924173355102539
print(dt2) # 0.009925603866577148
print(dt3) # [0.00992727279663086, 0.00992727279663086]
print(dt4) # 0.020497798919677734
print(dt5) # 0.020506620407104492
See also
--------
toc
"""
now_ts = time.time()
name_arr = list(names) + (
[
g_time_store.default_name,
]
if len(names) == 0
else []
)
for n in name_arr:
g_time_store.set_time(name=n, value=now_ts)
return now_ts
|
af9d93a092bed7849150cb3256a8fac6cc20f7d9
| 15,654
|
import os
import sys
import time
def tile(fnames, resize=(64,64), textonly=0, rows=None, cols=None):
"""Tiles the given images (by filename) and returns a tiled image"""
maxsize = [0, 0]
assert fnames
todel = set()
for fname in fnames:
try:
im = Image.open(fname)
maxsize = [max(m, s) for m, s in zip(maxsize, im.size)]
except Exception:
todel.add(fname)
continue
fnames = [os.path.realpath(f) for f in fnames if f not in todel] # convert symlinks to real paths
print >>sys.stderr, "There were %d images (removed %d bad) with maxsize %d x %d" % (len(fnames), len(todel), maxsize[0], maxsize[1])
# now figure out the right size of the output image
if not cols and not rows: # if neither dim is given, use the sqrt
cols = int(sqrt(len(fnames)))
rows = len(fnames)//cols + (0 if len(fnames)%cols == 0 else 1)
elif cols and not rows: # only cols is given
rows = len(fnames)//cols + (0 if len(fnames)%cols == 0 else 1)
elif not cols and rows: # only rows is given
cols = len(fnames)//rows + (0 if len(fnames)%rows == 0 else 1)
else: # both are given
pass
if textonly:
cur = 0
rows = list(nkgrouper(cols, fnames))
return rows
if resize:
boxsize = resize
else:
boxsize = maxsize
outsize = tuple([s*n for s, n in zip(boxsize, [cols, rows])])
print >>sys.stderr, "Output will be tiling %d x %d images, with image size %d x %d" % (cols, rows, outsize[0], outsize[1])
out = Image.new(im.mode, outsize)
cur = 0
start = time.time()
for r in range(rows):
for c in range(cols):
print >>sys.stderr, ' At col %d, row %d, cur %d, %0.2f secs elapsed...\r ' % (c, r, cur, time.time()-start),
im = Image.open(fnames[cur]).resize(boxsize, Image.ANTIALIAS)
box = (c*boxsize[0], r*boxsize[1])
out.paste(im, box)
cur += 1
if cur >= len(fnames): break
print >>sys.stderr
return out
|
cda20573702a5987079436944a3e9673b4cc1f52
| 15,655
|
import logging
def get_ctrls(controls, timeout=10):
"""Get various servod controls."""
get_dict = {}
cmd = 'dut-control %s' % controls
(retval, _, out) = do_cmd(cmd, timeout, flist=['Errno', '- ERROR -'])
if retval:
for ctrl_line in out.split('\n'):
ctrl_line = ctrl_line.strip()
if len(ctrl_line):
logging.debug('ctrl_line=%s', ctrl_line)
try:
(name, value) = ctrl_line.strip().split(':')
get_dict[name] = value
except ValueError:
logging.debug("Unable to parse ctrl %s", ctrl_line)
return (True, get_dict)
return (False, get_dict)
|
be86729678b2a1da6f668c2c6092b2d8130fb264
| 15,656
|
def retrieve_database():
"""Return the contents of MongoDB as a dataframe."""
return pd.DataFrame(list(restaurant_collection.find({})))
|
20d53dff1cc3164cedef303f8c4e38f7774e9f5e
| 15,657
|
from typing import List
def readAbstractMethodsFromFile(file: str) -> List[AbstractMethod]:
"""
Returns a list of `AbstractMethods` read from the given `file`. The file should have one `AbstractMethod`
per line with tokens separated by spaces.
"""
abstractMethods = []
with open(file, "r") as f:
for line in f:
abstractMethods.append(AbstractMethod(line.strip()))
return abstractMethods
|
44a1fb346bfbb0eb71882b623887744dbbfb5143
| 15,658
|
import os
def _find_possible_tox(path, toxenv):
"""Given a path and a tox target, see if flake8 is already installed."""
# First try to discover existing flake8
while(path and path != '/'):
path = os.path.dirname(path)
# the locations of possible flake8
venv = path + "/.tox/%s" % toxenv
flake8 = venv + "/bin/flake8"
if os.path.isdir(venv) and os.path.exists(flake8):
# we found a flake8 in a venv so set that as the running venv
ENV["VIRTUAL_ENV"] = venv
# parse the ignores to pass them on the command line
ENV["CONFIG"] = ignores(path)
ENV["IGNORES"] = ENV["CONFIG"].get("ignore", "")
# set the working directory so that 'hacking' can pick up
# it's config
ENV['PWD'] = path
LOG.debug("Found flake8 %s, ENV=%s" % (flake8, ENV))
return flake8
|
fc9afddf5564c1eb0d1c6a52bd91b1ff7c1bc857
| 15,659
|
from typing import Callable
def recursive_descent(data: np.ndarray, function: Callable):
"""
**Recursivly process an `np.ndarray` until the last dimension.**
This function applies a callable to the very last dimension of a numpy multidimensional array. It is foreseen
for time series processing expecially in combination with the function `ts_gaf_transform`.
+ param **data**: multidimensional data, type `np.ndarray`.
+ param **function**: callable, type `Callable`.
+ return **function(data)**: all kind of processed data.
"""
if len(data.shape) == 1:
return function(data)
for i in range(0, data.shape[0]):
return ts_recursive_descent(data[i], function)
|
c5955f5c3968aae53ae222cc4c0288320c3fb1c6
| 15,660
|
def watt_spectrum(a, b):
""" Samples an energy from the Watt energy-dependent fission spectrum.
Parameters
----------
a : float
Spectrum parameter a
b : float
Spectrum parameter b
Returns
-------
float
Sampled outgoing energy
"""
return _dll.watt_spectrum(a, b)
|
0682a8791cb1b1cce93b4449bebfa2ac098fde20
| 15,661
|
def get_definition_from_stellarbeat_quorum_set(quorum_set: QuorumSet) -> Definition:
"""Turn a stellarbeat quorum set into a quorum slice definition"""
return {
'threshold': quorum_set['threshold'],
'nodes': set(quorum_set['validators']) if 'validators' in quorum_set else set(),
'children_definitions': [
get_definition_from_stellarbeat_quorum_set(inner_quorum_set)
for inner_quorum_set in quorum_set['innerQuorumSets']
] if 'innerQuorumSets' in quorum_set else set()
}
|
6259c820c4b920672f8b2333826f9996de3cd405
| 15,662
|
def values(names):
"""
Method decorator that allows inject return values into method parameters.
It tries to find desired value going deep. For convinience injects list with only one value as value.
:param names: dict of "value-name": "method-parameter-name"
"""
def wrapper(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
if len(args)>1:
instance=args[1]
else:
instance = kwargs['instance']
def findReturnValues(rvalues):
for k, v in rvalues.iteritems():
if isinstance(v, dict):
findReturnValues(v) #go deep, to find desired name
if k in names.keys():
if isinstance(v,list) and len(v)==1:
kwargs.update({names[k]: v[0]})
else:
kwargs.update({names[k]: v})
findReturnValues(instance.returnValues)
#ensure all names was set
missing_params = [k for k, v in names.items() if v not in kwargs]
if missing_params:
raise AttributeError("Parameters {0} for '{1}' were not found".format(missing_params, func.__name__), missing_params)
func(*args, **kwargs)
return wrapped_func
return wrapper
|
72e958692a14254b26e7ff1241103aa0a1063a33
| 15,663
|
from operator import or_
def select_user(with_dlslots=True):
"""
Select one random user, if can_download is true then user must have
download slots available
:returns User
"""
with session_scope() as db:
try:
query = db.query(User).filter(User.enabled.is_(True))
if with_dlslots:
query = query.filter(or_(
User.downloads_limit > User.downloads_today,
User.downloads_limit.is_(None)
))
user = query.order_by(func.random()).limit(1).one()
except NoResultFound:
raise OperationInterruptedException('No suitable users found')
else:
db.expunge(user)
return user
|
2ba3e6c8b0f5488aa770fa982d3206c2dde5d0e1
| 15,664
|
def silero_number_detector(onnx=False):
"""Silero Number Detector
Returns a model with a set of utils
Please see https://github.com/snakers4/silero-vad for usage examples
"""
if onnx:
url = 'https://models.silero.ai/vad_models/number_detector.onnx'
else:
url = 'https://models.silero.ai/vad_models/number_detector.jit'
model = Validator(url)
utils = (get_number_ts,
save_audio,
read_audio,
collect_chunks,
drop_chunks)
return model, utils
|
46fa5a33b9e33cfebdc081e062f98711e3e8be61
| 15,665
|
def etaCalc(T, Tr = 296.15, S = 110.4, nr = 1.83245*10**-5):
"""
Calculates dynamic gas viscosity in kg*m-1*s-1
Parameters
----------
T : float
Temperature (K)
Tr : float
Reference Temperature (K)
S : float
Sutherland constant (K)
nr : float
Reference dynamic viscosity
Returns
-------
eta : float
Dynamic gas viscosity in kg*m-1*s-1
"""
eta = nr * ( (Tr + S) / (T+S) )*(T/Tr)**(3/2)
return eta
|
3f8182ea29fd558e86280477f2e435247d09798e
| 15,666
|
def refine_markers_harris(patch, offset):
""" Heuristically uses the max Harris response for control point center. """
harris = cv2.cornerHarris(patch, 2, 5, 0.07)
edges = np.where(harris < 0, np.abs(harris), 0)
point = np.array(np.where(harris == harris.max())).flatten()
point += offset
return np.float64(point)
|
afa080a8292c210f04483cddf8d81e297b5f2aec
| 15,667
|
def get_realtime_price(symbol):
"""
获取实时股价
:param symbol:
:return:
"""
try:
df = get_real_price_dataframe()
df_s = df[df['code'] == symbol]
if len(df_s['trade'].get_values()):
return df_s['trade'].get_values()[0]
else:
return -1
except:
return -1
|
af81787ab00283309829a6af2ef04155f73f360c
| 15,668
|
def create_employee(db_session: Session, employee: schemas.EmployeeRequest):
""" Create new employee """
new_employee = Employee(
idir=employee.idir,
status=employee.status,
location=employee.location,
phone=employee.phone)
db_session.add(new_employee)
db_session.commit()
db_session.refresh(new_employee)
return db_session.query(Employee).filter(Employee.idir == employee.idir).first()
|
1f858412e0e2c94ca40414054c28d67639b620aa
| 15,669
|
def sim_categorical(var_dist_params, size):
"""
Function to simulate data for
a categorical/Discrete variable.
"""
values = var_dist_params[0]
freq = var_dist_params[1]
data_sim = np.random.choice(a=values, p=freq, size=size)
return data_sim
|
1b81eac17f041a9200b8c96b9c86310d6d3b003f
| 15,670
|
def validSolution(board: list) -> bool:
"""
A function validSolution/ValidateSolution/valid_solution()
that accepts a 2D array representing a Sudoku board,
and returns true if it is a valid solution, or false otherwise
:param board:
:return:
"""
return all([test_horizontally(board),
test_vertically(board),
test_sub_grids(board)])
|
023741cac4106372e8b4c9b8b7c7e60fb9837a7b
| 15,671
|
async def test_create_saves_data(manager):
"""Test creating a config entry."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 5
async def async_step_init(self, user_input=None):
return self.async_create_entry(title="Test Title", data="Test Data")
await manager.async_init("test")
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 1
entry = manager.mock_created_entries[0]
assert entry["version"] == 5
assert entry["handler"] == "test"
assert entry["title"] == "Test Title"
assert entry["data"] == "Test Data"
assert entry["source"] is None
|
66905b55dcbf3d04b9a1d7d6cbdf843c8694eafb
| 15,672
|
def drawingpad(where=None, x=0, y=0, image=None, color=0xffffff, fillingColor=0x000000, thickness=3):
"""Create a drawing pad.
Args:
where (np.ndarray) : image/frame where the component should be rendered.
x (int) : Position X where the component should be placed.
y (int) : Position Y where the component should be placed.
image (np.ndarray) : Image to be rendered in the specified destination.
color (uint) : Color of the line in the format ``0xRRGGBB``, e.g. ``0xff0000`` for red.
fillingColor (uint) : Color of filling in the format `0xAARRGGBB`, e.g. `0x00ff0000` for red, `0xff000000` for transparent filling.
thickness (int) : Thickness of the lines used to draw a line.
Returns:
np.ndarray : The current ``image`` .
Examples:
>>> import cv2
>>> import numpy as np
>>> from pycharmers.opencv import cvui
...
>>> WINDOW_NAME = 'Drawing Pad'
>>> frame = np.zeros(shape=(400, 650, 3), dtype=np.uint8)
>>> image = np.full(shape=(250,250,3), fill_value=255, dtype=np.uint8)
>>> bgr = [128, 128, 128]
>>> fillingColors = ["White", "Black"]
>>> fillingStates = [True, False]
>>> thickness = [3]
>>> cvui.init(WINDOW_NAME)
>>> cv2.moveWindow(winname=WINDOW_NAME, x=0, y=0)
...
>>> while (True):
... # Fill the frame with a nice color
... frame[:] = (49, 52, 49)
... cvui.text(where=frame, x=320, y=10, text="Thickness")
... cvui.text(where=frame, x=320, y=100, text="Filling Color")
... thick = cvui.trackbar(where=frame, x=320, y=30, width=300, value=thickness, min=1, max=10, options=cvui.TRACKBAR_DISCRETE, discreteStep=1)
... idx = cvui.radiobox(where=frame, x=350, y=120, labels=fillingColors, states=fillingStates)
... bgr = cvui.colorpalette(where=frame, x=320, y=180, bgr=bgr, width=300, height=50)
... image = cvui.drawingpad(where=frame, x=30, y=50, image=image, color=bgr, fillingColor=[0xffffff, 0x000000][idx], thickness=thick)
... cvui.update()
... # Show everything on the screen
... cv2.imshow(WINDOW_NAME, frame)
... # Check if ESC key was pressed
... if cv2.waitKey(20) == cvui.ESCAPE:
... break
>>> cv2.destroyWindow(WINDOW_NAME)
>>> # You can draw a picture as follows by executing the following program while running the above program.
>>> def drawing(path, dsize=(250,250), thresh=127, sleep=3, drawing_val=0, offset=(30,125)):
... \"\"\"
... Args:
... path (str) : Path to binary image.
... dsize (tuple) : The size of drawing pad. ( ``width`` , ``height`` )
... thresh (int) : If you prepare the binary (bgr) image, you can use ``cv2.threshold`` to convert it to binary image. (See :meth:`cvPencilSketch <pycharmers.cli.cvPencilSketch.cvPencilSketch>` for more details.)
... sleep (int) : Delay execution for a given number of seconds. (You have to click the OpenCV window before before entering the for-loop.)
... drawing_val (int) : At what value to draw.
... offset (tuple) : Offset from top left ( ``cv2.moveWindow(winname=WINDOW_NAME, x=0, y=0)`` ) to drawing pad.
... \"\"\"
... import cv2
... import time
... import pyautogui as pgui # Use for controling the mouse. (https://pyautogui.readthedocs.io/en/latest/mouse.html)
... img = cv2.resize(src=cv2.imread(path, 0), dsize=dsize)
... img = cv2.threshold(src=img, thresh=thresh, maxval=255, type=cv2.THRESH_BINARY)[1]
... WINDOW_NAME = "Apotheosis"
... cv2.imshow(winname=v, mat=img)
... width,height = dsize
... x_offset, y_offset = offset
... time.sleep(sleep)
... for i in range(height):
... pgui.moveTo(x_offset, y_offset+i)
... prev_val, prev_pos = (0, 0)
... for j in range(width+1):
... if j<width:
... val = img[i,j]
... else:
... val = -1 # Must be different from ``prev_val``
... if prev_val != val:
... # Drawing.
... if prev_val == drawing_val:
... pgui.mouseDown()
... pgui.dragRel(xOffset=j-prev_pos, yOffset=0, button="left", duration=0.0, mouseDownUp=True)
... pgui.mouseUp()
... else:
... pgui.moveRel(xOffset=j-prev_pos, yOffset=0, duration=0.0)
... prev_pos = j
... prev_val = val
... key = cv2.waitKey(1)
... if key == 27: break
... if key == 27: break
... cv2.destroyWindow(WINDOW_NAME)
+--------------------------------------------------------+-------------------------------------------------------+
| Example |
+========================================================+=======================================================+
| .. image:: _images/opencv.cvui.drawingpad-konotaro.gif | .. image:: _images/opencv.cvui.drawingpad-tanziro.gif |
+--------------------------------------------------------+-------------------------------------------------------+
"""
handleTypeError(types=[np.ndarray, NoneType], where=where)
if isinstance(where, np.ndarray):
__internal.screen.where = where
block = __internal.screen
else:
block = __internal.topBlock()
x += block.anchor.x
y += block.anchor.y
return __internal.drawingpad(block, x, y, image, color, fillingColor, thickness)
|
25d16ee18b25965dd9d8ecbabb46353950c53297
| 15,673
|
def json_request(url, **kwargs):
"""
Request JSON data by HTTP
:param url: requested URL
:return: the dictionary
"""
if 'auth_creds' in kwargs and 'authentication_enabled' in kwargs['auth_creds']:
if 'sessionToken' in kwargs:
url += "&sessionToken=%s" % kwargs['auth_creds']['sessionToken']
else:
url += "&ignite.login=%s&ignite.password=%s" % (kwargs['auth_creds']['auth_login'],
kwargs['auth_creds']['auth_password'])
req = Request(url)
decoded = {}
try:
r = urlopen(req)
reply = r.read().decode('UTF-8')
decoded = loads(reply)
except HTTPError:
print('')
print("HTTPError %s" % url)
except URLError:
print('')
print("URLError %s" % url)
return decoded
|
d0a496b36905a0a505d3eab721c8e23ab0eb0e21
| 15,674
|
import io
import json
def list():
"""
List all added path
"""
try:
with io.open(FILE_NAME, 'r', encoding='utf-8') as f:
data = json.load(f)
except:
data = {}
return data
|
98d426ece920648d1789c4b2e09ee62eb1cb990d
| 15,675
|
import pickle
def load(filename):
"""
Load an EigenM object
"""
with open(filename, 'rb') as f:
return pickle.load(f)
|
2653cbbe6a1323725c8ba0f771778e1c738daf12
| 15,676
|
def resize_image_bboxes_with_crop_or_pad(image, bboxes, xs, ys,
target_height, target_width, mask_image=None):
"""Crops and/or pads an image to a target width and height.
Resizes an image to a target width and height by either centrally
cropping the image or padding it evenly with zeros.
If `width` or `height` is greater than the specified `target_width` or
`target_height` respectively, this op centrally crops along that dimension.
If `width` or `height` is smaller than the specified `target_width` or
`target_height` respectively, this op centrally pads with 0 along that
dimension.
Args:
image: 3-D tensor of shape `[height, width, channels]`
target_height: Target height.
target_width: Target width.
Raises:
ValueError: if `target_height` or `target_width` are zero or negative.
Returns:
Cropped and/or padded image of shape
`[target_height, target_width, channels]`
"""
with tf.name_scope('resize_with_crop_or_pad'):
image = ops.convert_to_tensor(image, name='image')
if mask_image is not None:
print('Image: ', image)
print('MaskImage: ', mask_image)
mask_image = ops.convert_to_tensor(mask_image, name='image')
assert_ops = []
assert_ops += _Check3DImage(image, require_static=False)
assert_ops += _assert(target_width > 0, ValueError,
'target_width must be > 0.')
assert_ops += _assert(target_height > 0, ValueError,
'target_height must be > 0.')
image = control_flow_ops.with_dependencies(assert_ops, image)
# `crop_to_bounding_box` and `pad_to_bounding_box` have their own checks.
# Make sure our checks come first, so that error messages are clearer.
if _is_tensor(target_height):
target_height = control_flow_ops.with_dependencies(
assert_ops, target_height)
if _is_tensor(target_width):
target_width = control_flow_ops.with_dependencies(assert_ops, target_width)
def max_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.maximum(x, y)
else:
return max(x, y)
def min_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.minimum(x, y)
else:
return min(x, y)
def equal_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.equal(x, y)
else:
return x == y
height, width, _ = _ImageDimensions(image)
width_diff = target_width - width
offset_crop_width = max_(-width_diff // 2, 0)
offset_pad_width = max_(width_diff // 2, 0)
height_diff = target_height - height
offset_crop_height = max_(-height_diff // 2, 0)
offset_pad_height = max_(height_diff // 2, 0)
# Maybe crop if needed.
height_crop = min_(target_height, height)
width_crop = min_(target_width, width)
cropped = tf.image.crop_to_bounding_box(image, offset_crop_height, offset_crop_width,
height_crop, width_crop)
if mask_image is not None:
cropped_mask_image = tf.image.crop_to_bounding_box(mask_image, offset_crop_height, offset_crop_width,
height_crop, width_crop)
bboxes, xs, ys = bboxes_crop_or_pad(bboxes, xs, ys,
height, width,
-offset_crop_height, -offset_crop_width,
height_crop, width_crop)
# Maybe pad if needed.
resized = tf.image.pad_to_bounding_box(cropped, offset_pad_height, offset_pad_width,
target_height, target_width)
if mask_image is not None:
resized_mask_image = tf.image.pad_to_bounding_box(cropped_mask_image, offset_pad_height, offset_pad_width,
target_height, target_width)
bboxes, xs, ys = bboxes_crop_or_pad(bboxes, xs, ys,
height_crop, width_crop,
offset_pad_height, offset_pad_width,
target_height, target_width)
# In theory all the checks below are redundant.
if resized.get_shape().ndims is None:
raise ValueError('resized contains no shape.')
resized_height, resized_width, _ = _ImageDimensions(resized)
assert_ops = []
assert_ops += _assert(equal_(resized_height, target_height), ValueError,
'resized height is not correct.')
assert_ops += _assert(equal_(resized_width, target_width), ValueError,
'resized width is not correct.')
resized = control_flow_ops.with_dependencies(assert_ops, resized)
if mask_image is None:
return resized, None, bboxes, xs, ys
else:
return resized, resized_mask_image, bboxes, xs, ys
|
80c215d0cb750ce55d4d38ef2748f9b89789519c
| 15,677
|
def sharpe_ratio(R_p, sigma_p, R_f=0.04):
"""
:param R_p: 策略年化收益率
:param R_f: 无风险利率(默认0.04)
:param sigma_p: 策略收益波动率
:return: sharpe_ratio
"""
sharpe_ratio = 1.0 * (R_p - R_f) / sigma_p
return sharpe_ratio
|
d197df7aa3b92f3a32cc8f11eb675012ffe8af57
| 15,678
|
def xr_vol_int_regional(xa, AREA, DZ, MASK):
""" volumen integral with regional MASK
input:
xa, AREA, DZ .. same as in 'xr_vol_int'
MASK .. 2D xr DataArray of booleans with the same dimensions as xa
output:
integral, int_levels .. same as in 'xr_vol_int'
"""
assert type(xa)==xr.core.dataarray.DataArray
assert type(AREA)==xr.core.dataarray.DataArray
assert type(DZ)==xr.core.dataarray.DataArray
assert np.shape(AREA)==np.shape(xa)[-2:]
assert np.shape(DZ)==np.shape(xa)[-3:]
assert np.dtype(MASK)==np.dtype('bool')
# determine min/max i/j of masked region
(imin, imax, jmin, jmax) = find_regional_coord_extent(MASK)
xa_reg = xa.where(MASK)[:,jmin:jmax+1,imin:imax+1]
AREA_reg = AREA.where(MASK)[jmin:jmax+1,imin:imax+1]
DZ_reg = DZ.where(MASK)[:,jmin:jmax+1,imin:imax+1]
integral, int_levels = xr_vol_int(xa_reg, AREA_reg, DZ_reg)
return integral, int_levels
|
4840d66ec6164d16df56f9efeebf9d242bc60613
| 15,679
|
from typing import List
def test(
coverage: bool = typer.Option( # noqa: B008
default=False, help='Generate coverage information.'
),
html: bool = typer.Option( # noqa: B008
default=False, help='Generate an html coverage report.'
),
) -> List[Result]:
"""Run tests."""
coverage_flag = [f'--cov={PACKAGE_NAME}'] if coverage else []
return [
execute(['pytest', *coverage_flag, 'tests'], raise_error=False),
*(coverage_html() if coverage and html else ()),
]
|
0b9fe2d265fd604ae32df29bcde71041a1f5dfcf
| 15,680
|
import winreg as _winreg
import _winreg
def _get_win_folder_from_registry(csidl_name: Any) -> Any:
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names."""
if PY3:
else:
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
|
76dda1c71ea7184a327c0d32a35ecbf45579dd7c
| 15,681
|
def transitions2kernelreward(transitions, num_states, num_actions):
"""Transform a dictionary of transitions to kernel, reward matrices."""
kernel = np.zeros((num_states, num_actions, num_states))
reward = np.zeros((num_states, num_actions))
for (state, action), transition in transitions.items():
for data in transition:
kernel[state, action, data["next_state"]] += data["probability"]
reward[state, action] += data["reward"] * data["probability"]
return kernel, reward
|
72165577890342cf1e3f01dbe853cd0024a0324e
| 15,682
|
def _inline_svg(svg: str) -> str:
"""Encode SVG to be used inline as part of a data URI.
Replacements are not complete, but sufficient for this case.
See https://codepen.io/tigt/post/optimizing-svgs-in-data-uris
for details.
"""
replaced = (
svg
.replace('\n', '%0A')
.replace('#', '%23')
.replace('<', '%3C')
.replace('>', '%3E')
.replace('"', '\'')
)
return 'data:image/svg+xml,' + replaced
|
4e3c25f5d91dd7691f42f9b9ace4d64a297eb32f
| 15,683
|
def contact_infectivity_asymptomatic_00x40():
"""
Real Name: b'contact infectivity asymptomatic 00x40'
Original Eqn: b'contacts per person normal 00x40*infectivity per contact'
Units: b'1/Day'
Limits: (None, None)
Type: component
b''
"""
return contacts_per_person_normal_00x40() * infectivity_per_contact()
|
2c71d8955078186636de0d0b369b37a71dcec3fc
| 15,684
|
import click
def implemented_verified_documented(function):
""" Common story options """
options = [
click.option(
'--implemented', is_flag=True,
help='Implemented stories only.'),
click.option(
'--unimplemented', is_flag=True,
help='Unimplemented stories only.'),
click.option(
'--verified', is_flag=True,
help='Stories verified by tests.'),
click.option(
'--unverified', is_flag=True,
help='Stories not verified by tests.'),
click.option(
'--documented', is_flag=True,
help='Documented stories only.'),
click.option(
'--undocumented', is_flag=True,
help='Undocumented stories only.'),
click.option(
'--covered', is_flag=True,
help='Covered stories only.'),
click.option(
'--uncovered', is_flag=True,
help='Uncovered stories only.'),
]
for option in reversed(options):
function = option(function)
return function
|
8c1dd5aaa0b962d96e9e90336183a29e2cf360db
| 15,685
|
import requests
def create_collection(self, name, url, sourceType, **options):
"""Creates a new collection from a web or S3 url. Automatically kick off default indexes"""
(endpoint, method) = self.endpoints['create_collection']
try:
headers = {'Authorization': self.token.authorization_header()}
data = {
'name': name,
'url': url,
'sourceType': sourceType,
'indexWithDefault': 'true' if options.get('indexWithDefault') else 'false'
}
return requests.request(method, endpoint, **{'headers': headers, 'data': data})
except Exception as e:
raise error.APIConnectionError(message=e)
|
37f7128526b5b7b1a22a9f946774f934827aa555
| 15,686
|
from typing import Union
def rmse(estimated: np.ndarray, true: np.ndarray) -> Union[np.ndarray, None]:
"""
Calculate the root-mean-squared error between two arrays.
:param estimated: estimated solution
:param true: 'true' solution
:return: root-mean-squared error
"""
return np.sqrt(((estimated - true) ** 2).mean(axis=1))
|
10eb4974f5d95ca20b5336e2e9637eb6426802ae
| 15,687
|
def energy_com(data):
""" Calculate the energy center of mass for each day, and use this quantity
as an estimate for solar noon.
Function infers time stamps from the length of the first axis of the 2-D
data array.
:param data: PV power matrix as generated by `make_2d` from `solardatatools.data_transforms`
:return: A 1-D array, containing the solar noon estimate for each day in the data set
"""
data = np.copy(data)
data[np.isnan(data)] = 0
num_meas_per_hour = data.shape[0] / 24
x = np.arange(0, 24, 1. / num_meas_per_hour)
div1 = np.dot(x, data)
div2 = np.sum(data, axis=0)
com = np.empty_like(div1)
com[:] = np.nan
msk = div2 != 0
com[msk] = np.divide(div1[msk], div2[msk])
return com
|
1b276b003f8527672fb95ad03b69536043a7ba17
| 15,688
|
import random
def cifar_noniid(dataset, no_participants, alpha=0.9):
"""
Input: Number of participants and alpha (param for distribution)
Output: A list of indices denoting data in CIFAR training set.
Requires: cifar_classes, a preprocessed class-indice dictionary.
Sample Method: take a uniformly sampled 10-dimension vector as parameters for
dirichlet distribution to sample number of images in each class.
"""
np.random.seed(666)
random.seed(666)
cifar_classes = {}
for ind, x in enumerate(dataset):
_, label = x
if label in cifar_classes:
cifar_classes[label].append(ind)
else:
cifar_classes[label] = [ind]
per_participant_list = defaultdict(list)
no_classes = len(cifar_classes.keys())
class_size = len(cifar_classes[0])
datasize = {}
for n in range(no_classes):
random.shuffle(cifar_classes[n])
sampled_probabilities = class_size * np.random.dirichlet(
np.array(no_participants * [alpha]))
for user in range(no_participants):
no_imgs = int(round(sampled_probabilities[user]))
datasize[user, n] = no_imgs
sampled_list = cifar_classes[n][:min(len(cifar_classes[n]), no_imgs)]
per_participant_list[user].extend(sampled_list)
cifar_classes[n] = cifar_classes[n][min(len(cifar_classes[n]), no_imgs):]
train_img_size = np.zeros(no_participants)
for i in range(no_participants):
train_img_size[i] = sum([datasize[i,j] for j in range(10)])
clas_weight = np.zeros((no_participants,10))
for i in range(no_participants):
for j in range(10):
clas_weight[i,j] = float(datasize[i,j])/float((train_img_size[i]))
return per_participant_list, clas_weight
|
8ecbb6df113d04b5ff737bb065bc4e578d06c69b
| 15,689
|
from typing import Dict
def example_metadata(
request,
l1_ls5_tarball_md_expected: Dict,
l1_ls7_tarball_md_expected: Dict,
l1_ls8_folder_md_expected: Dict,
):
"""
Test against arbitrary valid eo3 documents.
"""
which = request.param
if which == "ls5":
return l1_ls5_tarball_md_expected
elif which == "ls7":
return l1_ls7_tarball_md_expected
elif which == "ls8":
return l1_ls8_folder_md_expected
raise AssertionError
|
fd67c395aa7d773bc5757ca5649fed60b023e14f
| 15,690
|
def register_middleware(app: FastAPI):
"""
请求响应拦截 hook
https://fastapi.tiangolo.com/tutorial/middleware/
:param app:
:return:
"""
@app.middleware("http")
async def logger_request(request: Request, call_next):
# https://stackoverflow.com/questions/60098005/fastapi-starlette-get-client-real-ip
logger.info(f"request:{request.method} url:{request.url}\nheaders:{request.headers.get('user-agent')}"
f"\nIP:{request.client.host}")
response = await call_next(request)
return response
|
3455c7d406c405ae0df681d90bfcf57facddaa03
| 15,691
|
def asfarray(a, dtype=_nx.float_):
"""
Return an array converted to float type.
Parameters
----------
a : array_like
Input array.
dtype : string or dtype object, optional
Float type code to coerce input array `a`. If one of the 'int' dtype,
it is replaced with float64.
Returns
-------
out : ndarray, float
Input `a` as a float ndarray.
Examples
--------
>>> np.asfarray([2, 3])
array([ 2., 3.])
>>> np.asfarray([2, 3], dtype='float')
array([ 2., 3.])
>>> np.asfarray([2, 3], dtype='int8')
array([ 2., 3.])
"""
dtype = _nx.obj2sctype(dtype)
if not issubclass(dtype, _nx.inexact):
dtype = _nx.float_
return asarray(a,dtype=dtype)
|
8d26529602853e36dd8dc619d6210c475dcb7cd0
| 15,692
|
def read_geotransform_s2(path, fname='MTD_TL.xml', resolution=10):
"""
Parameters
----------
path : string
location where the meta data is situated
fname : string
file name of the meta-data file
resolution : {float,integer}, unit=meters, default=10
resolution of the grid
Returns
-------
geoTransform : tuple, size=(1,6)
affine transformation coefficients
Notes
-----
The metadata is scattered over the file structure of Sentinel-2, L1C
.. code-block:: text
* S2X_MSIL1C_20XX...
├ AUX_DATA
├ DATASTRIP
│ └ DS_XXX_XXXX...
│ └ QI_DATA
│ └ MTD_DS.xml <- metadata about the data-strip
├ GRANULE
│ └ L1C_TXXXX_XXXX...
│ ├ AUX_DATA
│ ├ IMG_DATA
│ ├ QI_DATA
│ └ MTD_TL.xml <- metadata about the tile
├ HTML
├ rep_info
├ manifest.safe
├ INSPIRE.xml
└ MTD_MSIL1C.xml <- metadata about the product
The following acronyms are used:
- DS : datastrip
- TL : tile
- QI : quality information
- AUX : auxiliary
- MTD : metadata
- MSI : multi spectral instrument
- L1C : product specification,i.e.: level 1, processing step C
"""
root = get_root_of_table(path, fname)
# image dimensions
for meta in root.iter('Geoposition'):
res = float(meta.get('resolution'))
if res == resolution:
ul_X,ul_Y= float(meta[0].text), float(meta[1].text)
d_X, d_Y = float(meta[2].text), float(meta[3].text)
geoTransform = (ul_X, d_X, 0., ul_Y, 0., d_Y)
return geoTransform
|
322a5bb149f0cc28dc813adebb3dad861e3a3218
| 15,693
|
def embed_into_hbox_layout(w, margin=5):
"""Embed a widget into a layout to give it a frame"""
result = QWidget()
layout = QHBoxLayout(result)
layout.setContentsMargins(margin, margin, margin, margin)
layout.addWidget(w)
return result
|
a7a5182ac6e555f3adcbe7a9b11a6826517d08f4
| 15,694
|
def make_word_ds(grids, trfiles, bad_words=DEFAULT_BAD_WORDS):
"""Creates DataSequence objects containing the words from each grid, with any words appearing
in the [bad_words] set removed.
"""
ds = dict()
stories = grids.keys()
for st in stories:
grtranscript = grids[st].tiers[1].make_simple_transcript()
## Filter out bad words
goodtranscript = [x for x in grtranscript
if x[2].lower().strip("{}").strip() not in bad_words]
d = DataSequence.from_grid(goodtranscript, trfiles[st][0])
ds[st] = d
return ds
|
43f405605d461dd6972f131c1474dad6b8acf35c
| 15,695
|
def fslimage_to_qpdata(img, name=None, vol=None, region=None, roi=False):
""" Convert fsl.data.Image to QpData """
if not name: name = img.name
if vol is not None:
data = img.data[..., vol]
else:
data = img.data
if region is not None:
data = (data == region).astype(np.int)
return NumpyData(data, grid=DataGrid(img.shape[:3], img.voxToWorldMat), name=name, roi=roi)
|
1f383f17196a11f64ab6d95f5cf001dc41346372
| 15,696
|
import gc
def xgb_cv(
data_, test_, y_, max_depth,gamma, reg_lambda , reg_alpha,\
subsample, scale_pos_weight, min_child_weight, colsample_bytree,
test_phase=False, stratify=False,
):
"""XGBoost cross validation.
This function will instantiate a XGBoost classifier with parameters
such as max_depth, subsample etc. Combined with data and
targets this will in turn be used to perform cross validation. The result
of cross validation is returned.
Our goal is to find combinations of parameters that maximizes AUC.
Returns:
if test_phase (and new data for validators, just change the test_ param
to the new data and make sure that the features are processed):
sub_preds : models prediction to get the hold-out score
else:
validation AUC score
Model Notes:
XGBoost overfits in this dataset, params should be set accordingly.
Parameter Notes
gamma : Minimum loss reduction required to make a further partition on a leaf \
node of the tree. The larger gamma is, the more conservative the algorithm will be.
min_child_weight : The larger min_child_weight is, the more conservative the algorithm will be.
colsample_bytree : The subsample ratio of columns when constructing each tree.
scale_pos_weight : A typical value to consider: sum(negative instances) / sum(positive instances)
"""
oof_preds = np.zeros(data_.shape[0])
sub_preds = np.zeros(test_.shape[0])
if test_phase:
max_depth = int(np.round(max_depth))
feats = [f for f in data_.columns if f not in ['bookingid', 'label']]
if stratify:
folds_ = StratifiedKFold(n_splits=4, shuffle=True, random_state=610)
splitted = folds_.split(data_, y_)
else:
splitted = folds_.split(data_)
for n_fold, (trn_idx, val_idx) in enumerate(splitted):
trn_x, trn_y = data_[feats].iloc[trn_idx], y_.iloc[trn_idx]
val_x, val_y = data_[feats].iloc[val_idx], y_.iloc[val_idx]
xg_train = xgb.DMatrix(
trn_x.values, label=trn_y.values
)
xg_valid = xgb.DMatrix(
val_x.values, label=val_y.values
)
watchlist = [(xg_train, 'train'),(xg_valid, 'eval')]
num_round=10000
param = {
'gamma' : gamma,
'max_depth':max_depth,
'colsample_bytree':colsample_bytree,
'subsample':subsample,
'min_child_weight':min_child_weight,
'objective':'binary:logistic',
'random_state':1029,
'n_jobs':8,
'eval_metric':'auc',
'metric': 'auc',
'scale_pos_weight':scale_pos_weight,
'eta':0.05,
'silent':True
}
clf = xgb.train(param, xg_train, num_round, watchlist, verbose_eval=100, early_stopping_rounds = 100)
oof_preds[val_idx] = clf.predict(xgb.DMatrix(data_[feats].iloc[val_idx].values), ntree_limit=clf.best_ntree_limit)
if test_phase:
sub_preds += clf.predict(xgb.DMatrix(test_[feats].values), ntree_limit=clf.best_ntree_limit) / folds_.n_splits
print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(val_y, oof_preds[val_idx])))
del clf, trn_x, trn_y, val_x, val_y
gc.collect()
print('Full AUC score %.6f' % roc_auc_score(y_, oof_preds))
if test_phase:
return sub_preds
else:
return roc_auc_score(y_, oof_preds)
|
2ad542c0a6f10835b352ea941a40dfb20b0f02f2
| 15,697
|
def _infer_elem_type(list_var):
"""
Returns types.tensor. None if failed to infer element type.
Example:
Given:
main(%update: (2,fp32)) {
block0() {
%list: List[unknown] = tf_make_list(...) # unknown elem type
%while_loop_0:0: (i32), %while_loop_0:1: List[(2,fp32)] = while_loop(loop_vars=(...))
while_loop_0_body(...) {
%list_write_0: List[(2,fp32)] = list_write(index=..., ls=%list, value=%update)
} -> (%add_0, %list_write_0)
Result:
main(%update: (2,fp32)) {
block0() {
%list: List[(2,fp32)] = tf_make_list(...) # Get the elem type from list_write
%while_loop_0:0: (i32), %while_loop_0:1: List[(2,fp32)] = while_loop(loop_vars=(...))
while_loop_0_body(...) {
%list_write_0: List[(2,fp32)] = list_write(index=..., ls=%list, value=%update)
} -> (%add_0, %list_write_0)
"""
# Search for child op that have informative element types
for o in list_var.child_ops:
if o.op_type in ["list_write", "list_scatter"]:
return o.outputs[0].elem_type
if o.op_type == "while_loop":
idx = list(o.loop_vars).index(list_var)
block = o.blocks[0]
# the corresponding Var in body block
block_var = block.inputs[idx]
elem_type = _infer_elem_type(block_var)
if elem_type is not None:
def _set_types_for_block_inputs(block):
block_var = block.inputs[idx]
new_block_var = ListVar(name=block_var.name, elem_type=elem_type,
init_length=block_var.sym_type.T[1],
dynamic_length=block_var.sym_type.T[2])
block._replace_var(block_var, new_block_var)
_set_types_for_block_inputs(o.blocks[0]) # condition block
_set_types_for_block_inputs(o.blocks[1]) # body block
return elem_type
# otherwise continue to other block_var (a list_var can be
# passed into while_loop twice).
return None
|
207d9ca4bd4f666d867d17756a7cd84110c47e76
| 15,698
|
def plot_hairy_mean_binstat_base(
list_of_pred_true_weight_label_color, key, spec,
is_rel = False, err = 'rms'
):
"""Plot binstats of means of relative energy resolution vs true energy."""
spec = spec.copy()
if spec.title is None:
spec.title = 'MEAN + E[ %s ]' % (err.upper())
else:
spec.title = '(MEAN + E[ %s ]) ( %s )' % (err.upper(), spec.title)
f, ax = plt.subplots()
for pred,true,weights,label,color in list_of_pred_true_weight_label_color:
x = true[key]
y = (pred[key] - true[key])
if is_rel:
y = y / x
plot_hairy_mean_binstat_single(
ax, x, y, weights, spec.bins_x, color, label, err
)
ax.axhline(0, 0, 1, color = 'C2', linestyle = 'dashed')
spec.decorate(ax)
ax.legend()
return f, ax
|
aebe7d7b4131618c4ca1ef4b78a79258b5f405b7
| 15,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.