content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def _removeSimpleAnnotation(ro_config, ro_dir, rofile, attrname, attrvalue):
"""
Remove a simple annotation or multiple matching annotations a research object.
ro_config is the research object manager configuration, supplied as a dictionary
ro_dir is the research object root directory
rofile names the annotated file or resource, possibly relative to the RO.
attrname names the attribute in a form recognized by getAnnotationByName
attrvalue is the attribute value to be deleted, or Nomne to delete all vaues
"""
log.debug("removeSimpleAnnotation: ro_dir %s, rofile %s, attrname %s, attrvalue %s"%
(ro_dir, rofile, attrname, attrvalue))
# Enumerate annotations
# For each:
# if annotation is only one in graph then:
# remove aggregated annotation
# else:
# create new annotation graph witj annotation removed
# update aggregated annotation
ro_graph = ro_manifest.readManifestGraph(ro_dir)
subject = ro_manifest.getComponentUri(ro_dir, rofile)
(predicate,valtype) = getAnnotationByName(ro_config, attrname)
val = attrvalue and makeAnnotationValue(ro_config, attrvalue, valtype)
#@@TODO refactor common code with getRoAnnotations, etc.
add_annotations = []
remove_annotations = []
for ann_node in ro_graph.subjects(predicate=RO.annotatesAggregatedResource, object=subject):
ann_uri = ro_graph.value(subject=ann_node, predicate=AO.body)
ann_graph = readAnnotationBody(ro_dir, ro_manifest.getComponentUriRel(ro_dir, ann_uri))
if (subject, predicate, val) in ann_graph:
ann_graph.remove((subject, predicate, val))
if (subject, None, None) in ann_graph:
# Triples remain in annotation body: write new body and update RO graph
ann_name = createAnnotationGraphBody(ro_config, ro_dir, rofile, ann_graph)
remove_annotations.append(ann_node)
add_annotations.append(ann_name)
else:
# Remove annotation from RO graph
remove_annotations.append(ann_node)
# Update RO graph if needed
if add_annotations or remove_annotations:
for a in remove_annotations:
_removeAnnotationBodyFromRoGraph(ro_graph, a)
for a in add_annotations:
_addAnnotationBodyToRoGraph(ro_graph, ro_dir, rofile, a)
ro_manifest.writeManifestGraph(ro_dir, ro_graph)
return
| 23,000
|
def modify_column_cell_content(content, value_to_colors):
"""
Function to include colors in the cells containing values.
Also removes the index that was used for bookkeeping.
"""
idx, value = content
if type(value) == int or type(value) == float:
color = value_to_colors[content]
return ' '.join(['\cellcolor{{{}}}'.format(color), str(value)])
else:
return value
| 23,001
|
def ReversePolishSolver(expression):
"""
Solves a given problem in reverse polish notation
:param expression - tuple of strings
"""
# Create empty stack
rp_calculator = Stack()
for c in expression:
# Check if next part of expression is an operator or a number
operator = {'+', '-', '*', '/'}
if c in operator:
if rp_calculator.count < 2:
print('Error: Not enough operands')
else:
# Pop two values
right_operand = rp_calculator.pop()
left_operand = rp_calculator.pop()
# Evaluate and push result back to stack
if c == '+':
rp_calculator.push(left_operand + right_operand)
elif c == '-':
rp_calculator.push(left_operand - right_operand)
elif c == '*':
rp_calculator.push(left_operand * right_operand)
elif c == '/':
rp_calculator.push(left_operand / right_operand)
elif c.isnumeric():
# Operand: add to stack
rp_calculator.push(int(c))
else:
print('Error: invalid character')
if rp_calculator.count > 1:
print('Error: too many operands')
return rp_calculator.pop()
| 23,002
|
def _make_selection(stdscr, classes, message='(select one)'):
"""
This function was originally branched from https://stackoverflow.com/a/45577262/5009004
:return: option, classes index
:rtype: (str, int)
"""
attributes = {}
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
attributes['normal'] = curses.color_pair(1)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_WHITE)
attributes['highlighted'] = curses.color_pair(2)
c = 0 # last character read
option = 0 # the current option that is marked
while c != 10: # Enter in ascii
stdscr.erase()
stdscr.addstr(f"{message}\n", curses.A_UNDERLINE)
for i in range(len(classes)):
if i == option:
attr = attributes['highlighted']
else:
attr = attributes['normal']
try:
stdscr.addstr(f"{i + 1}. ")
stdscr.addstr(classes[i] + '\n', attr)
except curses.error as e:
print(f"Curses error {classes[i]} {attr}")
print(e)
return None, None
c = stdscr.getch()
if c == curses.KEY_UP and option > 0:
option -= 1
elif c == curses.KEY_DOWN and option < len(classes) - 1:
option += 1
# stdscr.addstr("You chose {0}".format(classes[option]))
# stdscr.getch()
return classes[option], option
| 23,003
|
def add_newword(cleaned_word, cleaned_meaning, file_name = 'words.txt'):
"""
Takes the preprocessed word, its meaning and default txt file argument as input.
Updates the txt file with the temporary dict containing word and meaning as key:value pair.
Also, handles the condition where the file doesn't exist or contains no serialised data
"""
#Storing it into a temporary dict to append to the file above
tmp = {cleaned_word : cleaned_meaning}
#Checking whether the above file_name exists or not -- To append the dict to it
if os.path.isfile(file_name):
#Above file already exists -- append the tmp dict in the file
#Opening the file in 'r+' mode for reading and writing simultaneously
with open(file_name, 'r+') as file:
#Loading the data from the file using json.load
data = json.load(file)
#Checking whether the cleaned_word is part of the english dictionary or not
#Only allow when word if it is not already present
if cleaned_word in data.keys():
#Handling the condition where word is already present in dictionary and this is a redundant task
print("\033[1m" + '\nThe word "{}" is already present in the dictionary.'.format(cleaned_word) + "\033[0m")
print("\033[1m" + 'Returning back to the main menu...' + "\033[0m")
#Printing a boundary to act as separator
print('\n' + '-x-' * 30)
else:
#Appending the tmp dict into data
data.update(tmp)
#Reverting the cursor of the file to 0 (Original position)
file.seek(0)
#Writing the data file object back into the file using concept of serialisation
json.dump(data, file)
#Printing a message when the data is successfully stored in words.txt
print("\033[1m" + '\nThe word "{}" and its meaning "{}" have been successfully stored !!\n'.format(cleaned_word,
cleaned_meaning) + "\033[0m")
#Printing a boundary to act as separator
print('\n' + '-x-' * 30)
#Logic when the file does not exist or doesn't contain any data in it
else:
#Opening the file in appending mode to handle both scenarios above
file = open(file_name, 'a')
#Writing the temporary dict as a json object into the file
json.dump(tmp, file)
#Closing the file
file.close()
#Printing the message when file and data has been successfully added
print("\033[1m" + '\nThe word "{}" and its meaning "{}" was added to the file.'.format(cleaned_word,
cleaned_meaning) + "\033[0m" )
print("\033[1m" + 'Continue adding words to build dictionary !!' + "\033[0m")
#Printing a boundary to act as separator
print('\n' + '-x-' * 30)
| 23,004
|
def cmorization(in_dir, out_dir, cfg, _):
"""Cmorization func call."""
cmor_table = cfg['cmor_table']
glob_attrs = cfg['attributes']
logger.info("Starting cmorization for Tier%s OBS files: %s",
glob_attrs['tier'], glob_attrs['dataset_id'])
logger.info("Input data from: %s", in_dir)
logger.info("Output will be written to: %s", out_dir)
# run the cmorization
for var, vals in cfg['variables'].items():
inpfile = os.path.join(in_dir, vals['file'])
logger.info("CMORizing var %s from file %s", var, inpfile)
var_info = cmor_table.get_variable(vals['mip'], var)
raw_info = {'name': vals['raw'], 'file': inpfile, 'iTr': vals['iTr']}
glob_attrs['mip'] = vals['mip']
with catch_warnings():
filterwarnings(
action='ignore',
message=('WARNING: missing_value not used since it\n'
'cannot be safely cast to variable data type'),
category=UserWarning,
module='iris',
)
extract_variable(var_info, raw_info, out_dir, glob_attrs)
| 23,005
|
def evaluate_model(epoch, controller, shared_cnn, data_loaders, n_samples=10):
"""Print the validation and test accuracy for a controller and shared_cnn.
Args:
epoch: Current epoch.
controller: Controller module that generates architectures to be trained.
shared_cnn: CNN that contains all possible architectures, with shared weights.
data_loaders: Dict containing data loaders.
n_samples: Number of architectures to test when looking for the best one.
Returns: Nothing.
"""
controller.eval()
shared_cnn.eval()
print('Here are ' + str(n_samples) + ' architectures:')
best_arc, _ = get_best_arc(controller, shared_cnn, data_loaders, n_samples, verbose=True)
valid_loader = data_loaders['valid_subset']
test_loader = data_loaders['test_dataset']
valid_acc = get_eval_accuracy(valid_loader, shared_cnn, best_arc)
test_acc = get_eval_accuracy(test_loader, shared_cnn, best_arc)
print('Epoch ' + str(epoch) + ': Eval')
print('valid_accuracy: %.4f' % (valid_acc))
print('test_accuracy: %.4f' % (test_acc))
controller.train()
shared_cnn.train()
| 23,006
|
def _check_keys(keys, spec):
"""Check a list of ``keys`` equals ``spec``.
Sorts both keys and spec before checking equality.
Arguments:
keys (``list``): The list of keys to compare to ``spec``
spec (``list``): The list of keys to compare to ``keys``
Returns:
``bool``
Raises:
``exceptions.InvalidListError``: Raised if ``keys`` is not
equal to ``spec``.
"""
if not sorted(keys) == sorted(spec):
raise exceptions.InvalidListError('{} does not equal {}'.format(
keys, spec
))
return True
| 23,007
|
def get_snmp_table(hostname, table_oid, community, index=False):
"""
hostname : <str>
table_oid : <str>
community : <str>
index : <bool> append index to every row, snmptable Option -Ci
call snmptable command and get output
ooutput will be transferred to list of dictionaries,
key names are taken from output at line 3
to function properly, the MIB of the mentioned table must be present and installed,
under ubuntu use user specific diectory under ~/.snmp/mibs to store vendor specific files
every dataset - aka row of data - is prependet with key "hostname" and "ts" : timestamp of call
"""
# field should be extra separated, not the default space
cmd = ""
if index is False:
cmd = "snmptable -v2c -c %s -Cf \; %s %s" % (community, hostname, table_oid)
else:
cmd = "snmptable -v2c -c %s -Ci -Cf \; %s %s" % (community, hostname, table_oid)
logging.info(cmd)
output = subprocess.check_output((cmd, ), shell=True)
lines_to_ignore = 1 # ignore first two line
header_line = True # next is header line
headers = [] # headers are stored in list
data = [] # result
keys = {
"hostname" : hostname,
"ts" : time.time()
}
for line in output.split("\n"):
if line == "":
continue # ignore blank lines
if lines_to_ignore > 0:
lines_to_ignore -= 1
continue
else:
if header_line is True:
headers = line.strip().split(";")
header_line = False
else:
subindex = 0
values = keys.copy()
for col in line.strip().split(";"):
values[headers[subindex]] = col.replace("\"", "")
subindex += 1
data.append(values)
return data
| 23,008
|
def hangToJamo(hangul: str):
"""한글을 자소 단위(초, 중, 종성)로 분리하는 모듈입니다.
@status `Accepted` \\
@params `"안녕하세요"` \\
@returns `"ㅇㅏㄴㄴㅕㅇㅎㅏ_ㅅㅔ_ㅇㅛ_"` """
result = []
for char in hangul:
char_code = ord(char)
if not 0xAC00 <= char_code <= 0xD7A3:
result.append(char)
continue
initial_idx = int((((char_code - 0xAC00) / 28) / 21) % 19)
midial_idx = int(((char_code - 0xAC00) / 28) % 21)
final_idx = int((char_code - 0xAC00) % 28)
initial = chosung[initial_idx]
midial = jungsung[midial_idx]
final = jongsung[final_idx]
result.append(initial)
result.append(midial)
result.append(final)
return ''.join(result)
| 23,009
|
def build_metadata_pairs(samples=100):
"""
Build sample data in format:
isbn, title_a, authors_a, title_b, authors_b
Where:
isbn is in both datasets
title_a + authors_a != title_b + authors_b
"""
from itertools import cycle
import gc
gc.enable()
combos = [c for c in combinations(ALL_READERS, 2)]
samples_per_combo = int(samples / len(combos))
print("Creating ~%s samples per data source pair" % samples_per_combo)
fp = os.path.join(iscc_bench.DATA_DIR, "metapairs_%s.sample" % samples)
total_samples = 0
seen_isbns = set()
with open(fp, "wb") as outf:
for combo in combos:
gc.collect()
combo_name = "%s-%s" % (combo[0].__name__, combo[1].__name__)
a_isbns = set(load_isbns(combo[0]))
b_isbns = set(load_isbns(combo[1]))
relevant_isbns = a_isbns.intersection(b_isbns)
data = {}
counter = 0
reader_combo = cycle((combo[0](), combo[1]()))
print("Collecting %s combo" % combo_name)
for reader in reader_combo:
try:
entry = next(reader)
except StopIteration:
print("!!!! StopIteration")
break
isbn = int(entry.isbn)
if isbn in relevant_isbns and isbn not in seen_isbns:
title = entry.title
author = entry.author
if isbn not in data:
data[isbn] = {"title": title, "author": author}
continue
if title != data[isbn]["title"] or author != data[isbn]["author"]:
row = data[isbn]
out_data = "{}|{}|{}|{}|{}\n".format(
isbn,
row["title"].replace("|", ""),
row["author"].replace("|", ""),
title.replace("|", ""),
author.replace("|", ""),
)
print(out_data)
outf.write(out_data.encode("utf-8"))
seen_isbns.add(isbn)
total_samples += 1
relevant_isbns.remove(isbn)
del data[isbn]
if counter == samples_per_combo:
print("Finished samples for %s" % combo_name)
break
if not relevant_isbns:
print(
"Out of relevant ISBNs at %s samples for %s",
(counter, combo_name),
)
break
counter += 1
print("Collected %s total samples" % total_samples)
| 23,010
|
def get_config(config_path):
""" Open a Tiler config and return it as a dictonary """
with open(config_path) as config_json:
config_dict = json.load(config_json)
return config_dict
| 23,011
|
def groupby_index(iter: Iterable[T],n:int) -> Iterable[Iterable[T]]:
"""group list by index
Args:
iter (Iterable[T]): iterator to group by index
n (int): The size of groups
Returns:
Iterable[Iterable[T]]: iterable object to group by index
>>> [*map(lambda x:[*x],groupby_index([1,2,3,4],2))]
[[1, 2], [3, 4]]
"""
def keyfunc(x: Tuple[int,T]) -> int:
k, _ = x
return (k // n)
def mapper(x: Tuple[int, Tuple[int, T]]):
_, v = x
return map(lambda y: y[1],v)
g = itertools.groupby(enumerate(iter), keyfunc)
return map(mapper,g)
| 23,012
|
def create(ctx, name, company, email, position):
"""Create a new Client"""
client = Client(name, company, email, position)
client_service = ClientService(ctx.obj['clients_table'])
client_service.create_client(client)
| 23,013
|
def _build_message_classes(message_name):
"""
Create a new subclass instance of DIMSEMessage for the given DIMSE
`message_name`.
Parameters
----------
message_name : str
The name/type of message class to construct, one of the following:
* C-ECHO-RQ
* C-ECHO-RSP
* C-STORE-RQ
* C-STORE-RSP
* C-FIND-RQ
* C-FIND-RSP
* C-GET-RQ
* C-GET-RSP
* C-MOVE-RQ
* C-MOVE-RSP
* C-CANCEL-RQ
* N-EVENT-REPORT-RQ
* N-EVENT-REPORT-RSP
* N-GET-RQ
* N-GET-RSP
* N-SET-RQ
* N-SET-RSP
* N-ACTION-RQ
* N-ACTION-RSP
* N-CREATE-RQ
* N-CREATE-RSP
* N-DELETE-RQ
* N-DELETE-RSP
"""
def __init__(self):
DIMSEMessage.__init__(self)
# Create new subclass of DIMSE Message using the supplied name
# but replace hyphens with underscores
cls = type(message_name.replace('-', '_'),
(DIMSEMessage,),
{"__init__": __init__})
# Create a new Dataset object for the command_set attributes
d = Dataset()
for elem_tag in command_set_elem[message_name]:
tag = Tag(elem_tag)
vr = dcm_dict[elem_tag][0]
# If the required command set elements are expanded this will need
# to be checked to ensure it functions OK
try:
d.add_new(tag, vr, None)
except:
d.add_new(tag, vr, '')
cls.command_set = d
globals()[cls.__name__] = cls
return cls
| 23,014
|
def valid_string(s, min_len=None, max_len=None,
allow_blank=False, auto_trim=True, pattern=None):
"""
@param s str/unicode 要校验的字符串
@param min_len None/int
@param max_len None/int
@param allow_blank boolean
@param auto_trim boolean
@:param pattern re.pattern
@return boolean is_ok
@return string/int value 若是ok,返回int值,否则返回错误信息
"""
if s is None:
return False, u'不能为None'
if not isinstance(s, basestring):
return False, u"参数类型需要是字符串"
if auto_trim:
s = s.strip()
str_len = len(s)
if not allow_blank and str_len < 1:
return False, u"参数不允许为空"
if max_len is not None and str_len > max_len:
return False, u"参数长度需小于%d" % max_len
if min_len is not None and str_len < min_len:
return False, u"参数长度需大于 %d" % min_len
if pattern is not None and s and not _match_pattern(pattern, s):
return False, u'参数包含的字符: %s' % pattern
return True, s
| 23,015
|
def update_visitor(visitor_key, session_key=None):
""" update the visitor using the visitor key """
visitor = get_visitor(visitor_key)
if visitor:
visitor.mark_visit()
if session_key:
visitor.last_session_key = session_key
visitor.save()
return visitor
| 23,016
|
def test_receive_payment_with_travel_rule_metadata_and_invalid_reference_id(
sender_account: AccountResource,
receiver_account: AccountResource,
currency: str,
hrp: str,
stub_config: AppConfig,
diem_client: jsonrpc.Client,
pending_income_account: AccountResource,
invalid_ref_id: Optional[str],
) -> None:
"""
There is no way to create travel rule metadata with invalid reference id when the payment
amount meets travel rule threshold, because the metadata signature is verified by transaction
script.
Also, if metadata signature is provided, transaction script will also verify it regardless
whether the amount meets travel rule threshold, thus no need to test invalid metadata
signature case.
This test bypasses the transaction script validation by sending payment amount under the
travel rule threshold without metadata signature, and receiver should handle it properly and refund.
Test Plan:
1. Generate a valid payment URI from receiver account.
2. Submit payment under travel rule threshold transaction from sender to receiver on-chain account.
3. Wait for the transaction executed successfully.
4. Assert the payment is refund eventually.
Note: the refund payment will be received by pending income account of the MiniWallet Stub, because
no account owns the original invalid payment transaction which is sent by test.
"""
receiver_uri = receiver_account.generate_payment_uri()
receiver_account_address: diem_types.AccountAddress = receiver_uri.intent(hrp).account_address
sender_uri = sender_account.generate_payment_uri()
sender_address = sender_uri.intent(hrp).account_address
metadata, _ = txnmetadata.travel_rule(invalid_ref_id, sender_address, amount) # pyre-ignore
original_payment_txn: jsonrpc.Transaction = stub_config.account.submit_and_wait_for_txn(
diem_client,
stdlib.encode_peer_to_peer_with_metadata_script(
currency=utils.currency_code(currency),
amount=amount,
payee=receiver_account_address,
metadata=metadata,
metadata_signature=b"",
),
)
pending_income_account.wait_for_event(
"created_transaction",
status=Transaction.Status.completed,
refund_diem_txn_version=original_payment_txn.version,
)
assert receiver_account.balance(currency) == 0
| 23,017
|
def PNewUVTable (inUV, access, tabType, tabVer, err):
""" Obsolete use PGetTable
"""
if ('myClass' in inUV.__dict__) and (inUV.myClass=='AIPSUVData'):
raise TypeError("Function unavailable for "+inUV.myClass)
return PGetTable (inUV, access, tabType, tabVer, err)
| 23,018
|
def fmt_hex(bytes):
"""Format the bytes as a hex string, return upper-case version.
"""
# This is a separate function so as to not make the mistake of
# using the '%X' format string with an ints, which will not
# guarantee an even-length string.
#
# binascii works on all versions of Python, the hex encoding does not.
hex = binascii.hexlify(bytes)
hex = hex.decode() # Returns bytes, which makes no sense to me
return hex.upper()
| 23,019
|
def pressure_filename():
"""
Return the filename used to represent the state of the emulated sense HAT's
pressure sensor. On UNIX we try ``/dev/shm`` then fall back to ``/tmp``; on
Windows we use whatever ``%TEMP%`` contains
"""
fname = 'rpi-sense-emu-pressure'
if sys.platform.startswith('win'):
# just use a temporary file on Windows
return os.path.join(os.environ['TEMP'], fname)
else:
if os.path.exists('/dev/shm'):
return os.path.join('/dev/shm', fname)
else:
return os.path.join('/tmp', fname)
| 23,020
|
def getTracksAudioFeatures(access_token, id_string):
"""
getTracksAudioFeatures() retrieves the track list audio features, this includes danceability, energy, loudness, etc..
"""
# URL to pass a list of tracks to get their audio features
audio_features_url = f"/audio-features"
# Header parameter to allow the application to make requests to the Spotify Web API
header = {
"Authorization" : "Bearer " + access_token,
}
# Query parameters:
# ids: string of song ids separated by a comma
param = {
"ids" : id_string,
}
# GET request to recieve track audio information
response = requests.get(BASE_URL + audio_features_url, headers=header, params=param)
return response
| 23,021
|
def list_model_evaluations(
project_id, compute_region, model_display_name, filter_=None
):
"""List model evaluations."""
result = []
# [START automl_tables_list_model_evaluations]
# TODO(developer): Uncomment and set the following variables
# project_id = 'PROJECT_ID_HERE'
# compute_region = 'COMPUTE_REGION_HERE'
# model_display_name = 'MODEL_DISPLAY_NAME_HERE'
# filter_ = 'filter expression here'
from google.cloud import automl_v1beta1 as automl
client = automl.TablesClient(project=project_id, region=compute_region)
# List all the model evaluations in the model by applying filter.
response = client.list_model_evaluations(
model_display_name=model_display_name, filter_=filter_
)
print("List of model evaluations:")
for evaluation in response:
print("Model evaluation name: {}".format(evaluation.name))
print("Model evaluation id: {}".format(evaluation.name.split("/")[-1]))
print(
"Model evaluation example count: {}".format(
evaluation.evaluated_example_count
)
)
print("Model evaluation time:")
print("\tseconds: {}".format(evaluation.create_time.seconds))
print("\tnanos: {}".format(evaluation.create_time.nanos))
print("\n")
# [END automl_tables_list_model_evaluations]
result.append(evaluation)
return result
| 23,022
|
def tan(x):
"""Element-wise `tangent`."""
return sin(x) / cos(x)
| 23,023
|
def scalingImage(img, minVal, maxVal):
"""
Scale image given a range.
Parameters: img, image to be scaled;
minVal, lower value for range;
maxVal, upper value for range.
Returns: imgScaled, image scaled.
"""
imax = np.max(img)
imin = np.min(img)
std = (img - imin) / (imax - imin)
imgScaled = std * (maxVal - minVal) + minVal
return imgScaled
| 23,024
|
def test_python_module_ctia_positive_incident_search(get_entity):
"""Perform testing for incident/search entity of custom threat
intelligence python module
ID: CCTRI-2848 - 8fc6ba46-a610-4432-a72b-af92836fa560
Steps:
1. Send POST request to create new incident entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: Incident entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
incident = get_entity('incident')
# Create new entity using provided payload
post_tool_response = incident.post(payload=INCIDENT_PAYLOAD,
params={'wait_for': 'true'})
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_incident_search = incident.search.get(
params={'id': entity_id})
assert get_incident_search[0]['type'] == 'incident'
assert get_incident_search[0]['schema_version'] == '1.1.3'
# Count entities after entity created
count_incident_before_deleted = incident.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(incident.search.delete(
params={'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert incident.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_incident_after_deleted = incident.search.count()
# Compare results of count_incident_before_deleted
# and count_incident_after_deleted
assert count_incident_before_deleted != count_incident_after_deleted
| 23,025
|
def forcestr(name):
""" returns `name` as string, even if it wasn't before """
return name if isinstance(name, bytes) else name.encode(RAW_ENCODING, ENCODING_ERROR_HANDLING)
| 23,026
|
def IsGitSVNDirty(directory):
"""
Checks whether our git-svn tree contains clean trunk or some branch.
Errors are swallowed.
"""
# For git branches the last commit message is either
# some local commit or a merge.
return LookupGitSVNRevision(directory, 1) is None
| 23,027
|
def read_attr(
attr_desc: str,
package_dir: Optional[Mapping[str, str]] = None,
root_dir: Optional[_Path] = None
):
"""Reads the value of an attribute from a module.
This function will try to read the attributed statically first
(via :func:`ast.literal_eval`), and only evaluate the module if it fails.
Examples:
read_attr("package.attr")
read_attr("package.module.attr")
:param str attr_desc: Dot-separated string describing how to reach the
attribute (see examples above)
:param dict[str, str] package_dir: Mapping of package names to their
location in disk (represented by paths relative to ``root_dir``).
:param str root_dir: Path to directory containing all the packages in
``package_dir`` (current directory by default).
:rtype: str
"""
root_dir = root_dir or os.getcwd()
attrs_path = attr_desc.strip().split('.')
attr_name = attrs_path.pop()
module_name = '.'.join(attrs_path)
module_name = module_name or '__init__'
_parent_path, path, module_name = _find_module(module_name, package_dir, root_dir)
spec = _find_spec(module_name, path)
try:
return getattr(StaticModule(module_name, spec), attr_name)
except Exception:
# fallback to evaluate module
module = _load_spec(spec, module_name)
return getattr(module, attr_name)
| 23,028
|
def create_pyfunc_dataset(batch_size=32, repeat_size=1, num_parallel_workers=1, num_samples=None):
"""
Create Cifar10 dataset pipline with Map ops containing only Python functions and Python Multiprocessing enabled
"""
# Define dataset
cifar10_ds = ds.Cifar10Dataset(DATA_DIR, num_samples=num_samples)
cifar10_ds = cifar10_ds.map(operations=[py_vision.ToType(np.int32)], input_columns="label",
num_parallel_workers=num_parallel_workers, python_multiprocessing=True)
# Setup transforms list which include Python ops / Pyfuncs
transforms_list = [
py_vision.ToPIL(),
py_vision.RandomGrayscale(prob=0.2),
np.array] # need to convert PIL image to a NumPy array to pass it to C++ operation
compose_op = py_transforms.Compose(transforms_list)
cifar10_ds = cifar10_ds.map(operations=compose_op, input_columns="image",
num_parallel_workers=num_parallel_workers,
python_multiprocessing=True)
# Apply Dataset Ops
buffer_size = 10000
cifar10_ds = cifar10_ds.shuffle(buffer_size=buffer_size)
cifar10_ds = cifar10_ds.batch(batch_size, drop_remainder=True)
cifar10_ds = cifar10_ds.repeat(repeat_size)
return cifar10_ds
| 23,029
|
def test_collect_runtime_dependencies_driver(instr_workbench):
"""Test the collection of drivers as runtime dependencies.
"""
instr_workbench.register(InstrContributor1())
d_p = instr_workbench.get_plugin('exopy.app.dependencies')
d_c = d_p.run_deps_collectors.contributions['exopy.instruments.drivers']
dep = dict.fromkeys(('instruments.test.FalseDriver', 'dummy'))
err = {}
un = set()
d_c.collect(instr_workbench, 'tests', dep, un, err)
assert len(err) == 1
assert 'instruments.test.FalseDriver' not in err['unknown-drivers']
assert 'dummy' in err['unknown-drivers']
assert not un
assert dep['instruments.test.FalseDriver'] is not None
assert dep['dummy'] is None
| 23,030
|
def test_invalid_server_oneshot(tmpworkdir): # pylint: disable=unused-argument,redefined-outer-name
"""test to raise exception in oneshot"""
result = agent_main(['--server', 'http://localhost:0', '--debug', '--oneshot'])
assert result == 1
| 23,031
|
def serve(args):
"""
Create the grpc service server for processing measurement groups
"""
channel = grpc.insecure_channel(args.sendserver)
stub = measurement_pb2_grpc.TrackProducerStub(channel)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
measurement_pb2_grpc.add_MeasurementProducerServicer_to_server(Tracker(stub, args.filter),
server)
server.add_insecure_port('[::]:' + str(args.recvport))
server.start()
server.wait_for_termination()
| 23,032
|
def _infer_structured_outs(
op_config: LinalgStructuredOpConfig,
in_arg_defs: Sequence[OperandDefConfig], ins: Sequence[Value],
out_arg_defs: Sequence[OperandDefConfig],
outs: Union[Sequence[Value], OpResultList]) -> Tuple[ValueList, List[Type]]:
"""Infers implicit outs and output types.
Respects existing contents of outs if not empty.
Returns:
normalized outs, output types
"""
# If outs were explicitly provided, we accept them verbatim.
if outs:
return outs, [out.type for out in outs]
raise NotImplementedError(f"Output tensor inference not yet supported for "
"structured ops")
| 23,033
|
def event_source(method: t.Callable, name: t.Optional[str] = None):
"""A decorator which makes the function act as a source of before and after call events.
You can later subscribe to these event with :py:func:`before` and :py:func`after` decorators.
:param method: Target class method
:param: Name of for the join point. If not given use the function name.
"""
# We must use function name instead of function pointer for the registry, because function object changes with unbound vs. bound Python class methods
if not name:
name = method.__name__
@functools.wraps(method)
def _inner(*args, **kwargs):
_self = args[0]
fire_advisor_event(_self, name, AdvisorRole.before)
retval = method(*args, **kwargs)
fire_advisor_event(_self, name, AdvisorRole.after)
return retval
assert name not in _event_source_hooks, "There already exist event_source with same name"
_event_source_hooks.append(name)
method._event_source_name = name
return _inner
| 23,034
|
def lst_blocks(uvp, blocks=2, lst_range=(0., 2.*np.pi)):
"""
Split a UVPSpec object into multiple objects, each containing spectra
within different contiguous LST ranges. There is no guarantee that each
block will contain the same number of spectra or samples.
N.B. This function uses the `lst_avg_array` property of an input UVPSpec
object to split the LSTs (and not the LSTs of the individual visibilities
that went into creating each delay spectrum).
Parameters
----------
uvp : UVPSpec
Object containing delay spectra.
blocks : int, optional
How many blocks to return. Default: 2.
lst_range : tuple, optional
Tuple containing the minimum and maximum LST to retain. This is the
range that will be split up into blocks. Default: (0., 2*pi)
Returns
-------
uvp_list : list of UVPSpec
List of UVPSpec objects, one for each LST range. Empty blocks will
appear as None in the list.
lst_bins : array_like
Array of LST bin edges. This has dimension (blocks+1,).
"""
# Check validity of inputs
if not isinstance(uvp, hp.UVPSpec):
raise TypeError("uvp must be a single UVPSpec object.")
if not (lst_range[0] >= 0. and lst_range[1] <= 2.*np.pi):
raise ValueError("lst_range must be in the interval (0, 2*pi)")
if not isinstance(blocks, (int, np.int, np.integer)):
raise TypeError("'blocks' must be an integer")
if not blocks > 0:
raise ValueError("Must have blocks >= 1")
# Get LSTs
lsts = np.unique(uvp.lst_avg_array)
# Define bin edges
lst_bins = np.linspace(lst_range[0], lst_range[1], blocks+1)
# Loop over bins and select() the LST ranges required
uvp_list = []
for i in range(lst_bins.size - 1):
idxs = np.where( np.logical_and(lsts >= lst_bins[i],
lsts < lst_bins[i+1]) )[0]
_uvp = None
if idxs.size > 0:
# Select LSTs in this range
_uvp = uvp.select(lsts=lsts[idxs], inplace=False)
uvp_list.append(_uvp)
return uvp_list, lst_bins
| 23,035
|
def test_list_customers(client, response):
"""Retrieve a list of existing customers."""
response.get("https://api.mollie.com/v2/customers", "customers_list")
customers = client.customers.list()
assert_list_object(customers, Customer)
| 23,036
|
def is_completed(book):
"""Determine if the book is completed.
Args:
book: Row instance representing a book.
"""
return True if book.status == BOOK_STATUS_ACTIVE \
and not book.complete_in_progress \
and book.release_date \
else False
| 23,037
|
def compute_encryption_key_AESV3(password : 'str', encryption_dict : 'dict'):
"""
Derives the key to be used with encryption/decryption algorithms from a user-defined password.
Parameters
----------
password : bytes
Bytes representation of the password string.
encryption_dict : dict
The dictionary containing all the information about the encryption procedure.
Returns
-------
A bytes sequence representing the encryption key.
"""
U = encryption_dict["U"]
U = U.value if isinstance(U, PDFLiteralString) else unhexlify(U.value)
O = encryption_dict["O"]
O = O.value if isinstance(O, PDFLiteralString) else unhexlify(O.value)
prepped = sals_stringprep(password)
truncated = prepped.encode("utf8")[:127]
digest = sha256(truncated + O[32:32+8] + U).digest()
from binascii import hexlify
if digest == O[:32]:
intermediate = sha256(truncated + O[-8:] + U).digest()
OE = encryption_dict["OE"]
OE = OE.value if isinstance(OE, PDFLiteralString) else unhexlify(OE.value)
file_encryption_key = cbc_decrypt(OE, intermediate, b'\x00'*16, padding = False)
else:
digest = sha256(truncated + U[32:32+8]).digest()
if digest == U[:32]:
intermediate = sha256(truncated + U[-8:]).digest()
UE = encryption_dict["UE"]
UE = UE.value if isinstance(UE, PDFLiteralString) else unhexlify(UE.value)
file_encryption_key = cbc_decrypt(UE, intermediate, b'\x00'*16, padding = False)
else:
raise PDFWrongPasswordError()
return file_encryption_key
| 23,038
|
def list_group(group_name, recursive=True):
"""Returns all members, all globs and all nested groups in a group.
The returned lists are unordered.
Returns:
GroupListing object.
"""
return get_request_cache().auth_db.list_group(group_name, recursive)
| 23,039
|
def test_num_iterations():
"""
test values are given in the following order:
theta, interval_width, confidence_level=1-alpha, n_pearson, n_spearman, n_kendall
where theta is the value of the correlation coefficient
References
----------
Sample size requirements for estimating Pearson, Kendall and Spearman correlations
Bonett, Douglas G and Wright, Thomas A, 2000
http://doi.org/10.1007/BF02294183
Test values are taken from Table 1.
"""
test = [
(0.1, 0.1, 0.95, 1507, 1517, 661),
(0.3, 0.1, 0.95, 1274, 1331, 560),
(0.4, 0.2, 0.95, 273, 295, 122),
(0.5, 0.3, 0.99, 168, 189, 76),
(0.6, 0.2, 0.99, 276, 325, 123),
(0.7, 0.3, 0.99, 82, 101, 39),
(0.8, 0.1, 0.95, 205, 269, 93),
(0.9, 0.2, 0.99, 34, 46, 18),
]
for element in test:
theta, width, conf_level, n_pearson, n_spearman, n_kendall = (
element[0],
element[1],
element[2],
element[3],
element[4],
element[5],
)
corrcoef_dict = get_corrcoef_num_iterations(
theta=theta, interval_width=width, confidence_level=conf_level
)
n_pearson_comp = corrcoef_dict["pearson"]["num_iterations"]
n_spearman_comp = corrcoef_dict["spearman"]["num_iterations"]
n_kendall_comp = corrcoef_dict["kendall"]["num_iterations"]
print(element)
assert abs(n_pearson_comp - n_pearson) < 2
assert abs(n_spearman_comp == n_spearman) < 2
assert abs(n_kendall_comp == n_kendall) < 2
| 23,040
|
def list_tickets(result: typing.List[typing.List] = None) -> None:
"""Lists the tickets created on this session.
Args:
result (typing.List[typing.List]): Result received from the decorator.
Defaults to None.
"""
if result is not None:
if len(result) != 0:
headers = ["Name", "Connection ID"]
table = tabulate(result, headers=headers, tablefmt="grid")
Logger().log("The active tickets are:\n\n{}\n".format(table),
LoggedMessageTypes.INFORMATION)
else:
Logger().log("No tickets were opened yet.",
LoggedMessageTypes.FAIL)
| 23,041
|
def pi():
"""Compute Pi to the current precision.
>>> print(pi())
3.141592653589793238462643383
"""
getcontext().prec += 2 # extra digits for intermediate steps
three = Decimal(3) # substitute "three=3.0" for regular floats
lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24
while s != lasts:
lasts = s
n, na = n + na, na + 8
d, da = d + da, da + 32
t = (t * n) / d
s += t
getcontext().prec -= 2
return +s # unary plus applies the new precision
| 23,042
|
def test_reactivate_and_save():
"""Test that the reactivate_and_save method in enrollment models sets properties and saves"""
course_run_enrollment = CourseRunEnrollmentFactory.create(
active=False, change_status=ENROLL_CHANGE_STATUS_REFUNDED
)
program_enrollment = ProgramEnrollmentFactory.create(
active=False, change_status=ENROLL_CHANGE_STATUS_REFUNDED
)
enrollments = [course_run_enrollment, program_enrollment]
for enrollment in enrollments:
enrollment.reactivate_and_save()
enrollment.refresh_from_db()
enrollment.active = True
enrollment.change_status = None
| 23,043
|
def molecule_rot(axis,degree,XYZ):
"""
molecular rotation
use axis to specify x,y,z vectors
"""
atlist=range(0,len(XYZ))
rad=radians(degree)
p2=np.array([0,0,0])
for i in sorted(atlist[:]):
print 'rotating...',i
v=XYZ[i,:]
Rmat= RotMatArb(axis,rad,p2,v)
XYZ[i,:]=RmatxVec(Rmat,v)
| 23,044
|
def load_cifar_data(limit=None) -> np.ndarray:
"""
:param limit:
:return:
"""
# cifar10 data (integrated in TensorFlow, downloaded on first use)
cifar10_data = tf.keras.datasets.cifar10
# split into training and test data
train_data, test_data = cifar10_data.load_data()
# split dta into image and label (not yet desired format c.f. PreProc)
x_train, label_train = train_data
x_test, label_test = test_data
if limit is not None: # optional limit to develop/test faster
x_train = x_train[:limit, :, :, :]
label_train = label_train[:limit]
x_test = x_test[:limit, :, :, :]
label_test = label_test[:limit]
# provide some basic information about data
print('Number of images in training set', len(x_train))
print('Number of images in testing set', len(x_test))
print('Input image size', x_train.shape[1], 'x',
x_train.shape[2], 'in', x_train.shape[-1], 'channels')
return x_train, label_train, x_test, label_test
| 23,045
|
def set_goal_orientation(delta,
current_orientation,
orientation_limit=None,
set_ori=None):
"""
Calculates and returns the desired goal orientation, clipping the result accordingly to @orientation_limits.
@delta and @current_orientation must be specified if a relative goal is requested, else @set_ori must be
specified to define a global orientation position
"""
# directly set orientation
if set_ori is not None:
goal_orientation = set_ori
# otherwise use delta to set goal orientation
else:
rotation_mat_error = trans.euler2mat(-delta)
goal_orientation = np.dot(rotation_mat_error.T, current_orientation)
#check for orientation limits
if np.array(orientation_limit).any():
if orientation_limit.shape != (2,3):
raise ValueError("Orientation limit should be shaped (2,3) "
"but is instead: {}".format(orientation_limit.shape))
# Convert to euler angles for clipping
euler = trans.mat2euler(goal_orientation)
# Clip euler angles according to specified limits
limited = False
for idx in range(3):
if orientation_limit[0][idx] < orientation_limit[1][idx]: # Normal angle sector meaning
if orientation_limit[0][idx] < euler[idx] < orientation_limit[1][idx]:
continue
else:
limited = True
dist_to_lower = euler[idx] - orientation_limit[0][idx]
if dist_to_lower > np.pi:
dist_to_lower -= 2 * np.pi
elif dist_to_lower < -np.pi:
dist_to_lower += 2 * np.pi
dist_to_higher = euler[idx] - orientation_limit[1][idx]
if dist_to_lower > np.pi:
dist_to_higher -= 2 * np.pi
elif dist_to_lower < -np.pi:
dist_to_higher += 2 * np.pi
if dist_to_lower < dist_to_higher:
euler[idx] = orientation_limit[0][idx]
else:
euler[idx] = orientation_limit[1][idx]
else: # Inverted angle sector meaning
if (orientation_limit[0][idx] < euler[idx]
or euler[idx] < orientation_limit[1][idx]):
continue
else:
limited = True
dist_to_lower = euler[idx] - orientation_limit[0][idx]
if dist_to_lower > np.pi:
dist_to_lower -= 2 * np.pi
elif dist_to_lower < -np.pi:
dist_to_lower += 2 * np.pi
dist_to_higher = euler[idx] - orientation_limit[1][idx]
if dist_to_lower > np.pi:
dist_to_higher -= 2 * np.pi
elif dist_to_lower < -np.pi:
dist_to_higher += 2 * np.pi
if dist_to_lower < dist_to_higher:
euler[idx] = orientation_limit[0][idx]
else:
euler[idx] = orientation_limit[1][idx]
if limited:
goal_orientation = trans.euler2mat(np.array([euler[1], euler[0], euler[2]]))
return goal_orientation
| 23,046
|
def report(args, unreconciled, reconciled, explanations, column_types):
"""Generate the report."""
# Everything as strings
reconciled = reconciled.applymap(str)
unreconciled = unreconciled.applymap(str)
# Convert links into anchor elements
reconciled = reconciled.applymap(create_link)
unreconciled = unreconciled.applymap(create_link)
# Get the report template
env = Environment(loader=PackageLoader('reconcile', '.'))
template = env.get_template('lib/summary/template.html')
# Create the group dataset
groups = get_groups(args, unreconciled, reconciled, explanations)
# Create filter lists
filters = get_filters(args, groups, column_types)
# Get transcriber summary data
transcribers = user_summary(args, unreconciled)
# Build the summary report
summary = template.render(
args=vars(args),
header=header_data(args, unreconciled, reconciled, transcribers),
groups=iter(groups.items()),
filters=filters,
columns=util.sort_columns(args, unreconciled, column_types),
transcribers=transcribers,
reconciled=reconciled_summary(explanations, column_types),
problem_pattern=PROBLEM_PATTERN)
# Output the report
with open(args.summary, 'w', encoding='utf-8') as out_file:
out_file.write(summary)
| 23,047
|
def get_fixed_income_index():
"""获取固定收益及中债总财富指数对比走势"""
return get_stg_index('fixed_income', '037.CS')
| 23,048
|
def ignoreQtMessageHandler(msgs):
"""A context that ignores specific qMessages for all bindings
Args:
msgs: list of message strings to ignore
"""
from Qt import QtCompat
def messageOutputHandler(msgType, logContext, msg):
if msg in msgs:
return
sys.stderr.write("{0}\n".format(msg))
QtCompat.qInstallMessageHandler(messageOutputHandler)
try:
yield
finally:
QtCompat.qInstallMessageHandler(None)
| 23,049
|
def get_jobid(db):
"""
Ask MongoDB for the a valid jobid.
All processing jobs should have a call to this function at the beginning
of the job script. It simply queries MongoDB for the largest current
value of the key "jobid" in the history collection. If the history
collection is empty it returns 1 under a bias that a jobid of 0 is
illogical.
:param db: database handle
:type db: top level database handle returned by a call to MongoClient.database
"""
hiscol=db.history
hist_size=hiscol.find().count()
if(hist_size<=0):
return 1
else:
maxcur=hiscol.find().sort([('jobid',pymongo.DESCENDING)]).limit(1)
maxcur.rewind() # may not be necessary but near zero cost
maxdoc=maxcur[0]
return maxdoc['jobid']+1
| 23,050
|
def collect_input_arguments():
"""
Collecting input arguments at the command line for use later.
"""
parser = argparse.ArgumentParser(prog= 'Alignseq', description='Align Codon Sequences', usage='%(prog)s [options]', epilog="And that's how you make an Alignment!")
parser.add_argument('-inf', metavar='Infile', action='store', help='A input file of codons')
parser.add_argument('-outf', metavar='Outfile', action='store', help='An Output file (desired path) of codon Alignment')
parser.add_argument('-prog', metavar='Program', action='store', help='Desired program to Align Sequences', default='mafft')
parser.add_argument('-outtranslated', metavar='Outfile for Translated Data', action='store', help='An Output file (desired path) for translated data')
parser.add_argument('-outtransaligned', metavar='Outfile for Translated and Aligned Data', action='store', help='An Output file (desired path) for translated and aligned data')
parser.add_argument('-outformat', metavar='Output Format', action='store', help='An Output Format', default = "fasta")
# Will print the help menu if no arguments are passed to alignseq.py.
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
# Returns arguments for use in classes.
return parser.parse_args()
| 23,051
|
def isA(token, tt=None, tv=None):
"""
function to check if a token meets certain criteria
"""
# Row and column info may be useful? for error messages
try:
tokTT, tokTV, _row, _col = token
except:
return False
if tt is None and tv is None:
return True
elif tv is None:
return tt == tokTT
elif tt is None:
return tv == tokTV
else:
return tv == tokTV and tt == tokTT
| 23,052
|
def authenticate(
*,
token: str,
key: str,
) -> Tuple[bool, Dict]:
"""Authenticate user by token"""
try:
token_header = jwt.get_unverified_header(token)
decoded_token = jwt.decode(token, key, algorithms=token_header.get("alg"))
except JWTError:
return False, {}
else:
return True, decoded_token
| 23,053
|
def backup(source, destination, *, return_wrappers=False):
"""
Backup the selected source(s) into the destination(s) provided.
Source and destination will be converted into ``Source`` and
``Destination`` respectively. If this conversion fails,
an exception will be raised.
:param return_wrappers: If True, the Source and
Destination objects will be returned.
:param source: The source(s) to backup.
:param destination: The destination(s) of backup.
"""
boa = Boa()
_source = get_any_source(source)
_destination = get_any_destination(destination)
boa.backup(_source, _destination)
if return_wrappers:
return _source, _destination
| 23,054
|
def _prepare_data(cfg, imgs):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]):
Either image files or loaded images.
Returns:
result (dict): Predicted results.
"""
if isinstance(imgs, (list, tuple)):
if not isinstance(imgs[0], (np.ndarray, str)):
raise AssertionError('imgs must be strings or numpy arrays')
elif isinstance(imgs, (np.ndarray, str)):
imgs = [imgs]
else:
raise AssertionError('imgs must be strings or numpy arrays')
is_ndarray = isinstance(imgs[0], np.ndarray)
if is_ndarray:
cfg = cfg.copy()
# set loading pipeline type
cfg.data.test.pipeline[0].type = 'LoadImageFromNdarray'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
test_pipeline = Compose(cfg.data.test.pipeline)
datas = []
for img in imgs:
# prepare data
if is_ndarray:
# directly add img
data = dict(img=img)
else:
# add information into dict
data = dict(img_info=dict(filename=img), img_prefix=None)
# build the data pipeline
data = test_pipeline(data)
# get tensor from list to stack for batch mode (text detection)
datas.append(data)
if isinstance(datas[0]['img'], list) and len(datas) > 1:
raise Exception('aug test does not support '
f'inference with batch size '
f'{len(datas)}')
data = collate(datas, samples_per_gpu=len(imgs))
# process img_metas
if isinstance(data['img_metas'], list):
data['img_metas'] = [
img_metas.data[0] for img_metas in data['img_metas']
]
else:
data['img_metas'] = data['img_metas'].data
if isinstance(data['img'], list):
data['img'] = [img.data for img in data['img']]
if isinstance(data['img'][0], list):
data['img'] = [img[0] for img in data['img']]
else:
data['img'] = data['img'].data
return data
| 23,055
|
def test_update_email_change_request_existing_email(user):
"""Test that update change email request gives validation error for existing user email"""
new_user = UserFactory.create()
change_request = ChangeEmailRequest.objects.create(
user=user, new_email=new_user.email
)
serializer = ChangeEmailRequestUpdateSerializer(change_request, {"confirmed": True})
with pytest.raises(ValidationError):
serializer.is_valid()
serializer.save()
| 23,056
|
def test_sx(params=''):
"""
Execute all tests in docker container printing output and terminating tests at first failure
:param params: parameters to py.test
"""
docker_exec('py.test -sx {}'.format(params))
| 23,057
|
def _HandleJsonList(response, service, method, errors):
"""Extracts data from one *List response page as JSON and stores in dicts.
Args:
response: str, The *List response in JSON
service: The service which responded to *List request
method: str, Method used to list resources. One of 'List' or
'AggregatedList'.
errors: list, Errors from response will be appended to this list.
Returns:
Pair of:
- List of items returned in response as dicts
- Next page token (if present, otherwise None).
"""
items = []
response = json.loads(response)
# If the request is a list call, then yield the items directly.
if method == 'List':
items = response.get('items', [])
# If the request is an aggregatedList call, then do all the
# magic necessary to get the actual resources because the
# aggregatedList responses are very complicated data
# structures...
elif method == 'AggregatedList':
items_field_name = service.GetMethodConfig(
'AggregatedList').relative_path.split('/')[-1]
for scope_result in six.itervalues(response['items']):
# If the given scope is unreachable, record the warning
# message in the errors list.
warning = scope_result.get('warning', None)
if warning and warning['code'] == 'UNREACHABLE':
errors.append((None, warning['message']))
items.extend(scope_result.get(items_field_name, []))
return items, response.get('nextPageToken', None)
| 23,058
|
def gray_arrays_to_rgb_sequence_array(arrays, start_rgb, end_rgb, normalise_input=False, normalise_output=True):
"""Returns an RGB array that is mean of grayscale arrays mapped to linearly spaced RGB colors in a range.
:param list arrays: list of numpy.ndarrays of shape (N, M)
:param tuple start_rgb: (R, G, B) mapping of first array in `arrays`
:param tuple end_rgb: (R, G, B) mapping of last array in `arrays`
:param bool normalise_input: if True, input arrays are normalised concurrently to max value of 1. Default is False.
:param bool normalise_output: if True (default), output is normalised to range between 0 and 1.
:return: rgb_sequence_array shape (N, M, 3)
:rtype: numpy.ndarray
"""
if normalise_input:
max_gray_value = max([np.max(array) for array in arrays])
arrays = [array / max_gray_value for array in arrays]
colors = np.array([np.linspace(start, end, len(arrays)) for start, end in zip(start_rgb, end_rgb)]).T
color_arrays = [color[np.newaxis, np.newaxis, :] * array[:, :, np.newaxis] for color, array in zip(colors, arrays)]
rgb_sequence_array = np.mean(np.stack(color_arrays, axis=3), axis=3)
if normalise_output:
rgb_sequence_array = rgb_sequence_array / np.nanmax(rgb_sequence_array)
return rgb_sequence_array
| 23,059
|
def normalize_inputs(df, metrics):
"""Normalize all inputs around mean and standard deviation.
"""
for m in metrics:
mean = np.mean(df[m])
stdev = np.std(df[m])
def std_normalize(x):
return (x - mean) / stdev
#df[m] = df[m].map(std_normalize)
xmin = min(df[m])
xmax = max(df[m])
def minmax_normalize(x):
return (x - xmin) / (xmax - xmin)
df[m] = df[m].map(minmax_normalize)
return df
| 23,060
|
def calc_z_rot_from_right(right):
"""
Calculates z rotation of an object based on its right vector, relative to the positive x axis,
which represents a z rotation euler angle of 0. This is used for objects that need to rotate
with the HMD (eg. VrBody), but which need to be robust to changes in orientation in the HMD.
"""
# Project right vector onto xy plane
r = np.array([right[0], right[1], 0])
z_zero_vec = np.array([1, 0, 0])
# Get angle in radians
z = np.arccos(np.dot(r, z_zero_vec))
# Flip sign if on the right side of the xy plane
if r[1] < 0:
z *= -1
# Add pi/2 to get forward direction, but need to deal with jumping
# over quadrant boundaries
if 0 <= z and z <= np.pi / 2:
return z + np.pi / 2
elif np.pi / 2 < z and z <= np.pi:
angle_from_ax = np.pi / 2 - (np.pi - z)
return -np.pi + angle_from_ax
elif -np.pi <= z and z <= -np.pi / 2:
return z + np.pi / 2
else:
return np.pi / 2 + z
| 23,061
|
def poisson_interval(data, alpha=0.32):
"""Calculates the confidence interval
for the mean of a Poisson distribution.
Parameters
----------
data: array_like
Data giving the mean of the Poisson distributions.
alpha: float
Significance level of interval. Defaults to
one sigma (0.32).
Returns
-------
low, high: array_like
Lower and higher limits for the interval."""
a = alpha
low, high = (chi2.ppf(a / 2, 2 * data) / 2,
chi2.ppf(1 - a / 2, 2 * data + 2) / 2)
low = np.nan_to_num(low)
return low, high
| 23,062
|
def sales_administrative_expense(ticker, frequency):
"""
:param ticker: e.g., 'AAPL' or MULTIPLE SECURITIES
:param frequency: 'A' or 'Q' for annual or quarterly, respectively
:return: obvious..
"""
df = financials_download(ticker, 'is', frequency)
return (df.loc["Sales, General and administrative"])
| 23,063
|
def clip(x, min_, max_):
"""Clip value `x` by [min_, max_]."""
return min_ if x < min_ else (max_ if x > max_ else x)
| 23,064
|
async def test_get_triggers(hass):
"""Test triggers work."""
gateway = await setup_deconz(hass, options={})
device_id = gateway.events[0].device_id
triggers = await async_get_device_automations(hass, "trigger", device_id)
expected_triggers = [
{
"device_id": device_id,
"domain": "deconz",
"platform": "device",
"type": device_trigger.CONF_SHORT_PRESS,
"subtype": device_trigger.CONF_TURN_ON,
},
{
"device_id": device_id,
"domain": "deconz",
"platform": "device",
"type": device_trigger.CONF_LONG_PRESS,
"subtype": device_trigger.CONF_TURN_ON,
},
{
"device_id": device_id,
"domain": "deconz",
"platform": "device",
"type": device_trigger.CONF_LONG_RELEASE,
"subtype": device_trigger.CONF_TURN_ON,
},
{
"device_id": device_id,
"domain": "deconz",
"platform": "device",
"type": device_trigger.CONF_SHORT_PRESS,
"subtype": device_trigger.CONF_TURN_OFF,
},
{
"device_id": device_id,
"domain": "deconz",
"platform": "device",
"type": device_trigger.CONF_LONG_PRESS,
"subtype": device_trigger.CONF_TURN_OFF,
},
{
"device_id": device_id,
"domain": "deconz",
"platform": "device",
"type": device_trigger.CONF_LONG_RELEASE,
"subtype": device_trigger.CONF_TURN_OFF,
},
]
assert triggers == expected_triggers
| 23,065
|
def generate_code():
"""Generate a URL-compatible short code."""
return ''.join(random.choice(ALPHABET) for _ in range(10))
| 23,066
|
def prox_pos(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = \\max(x,0)` applied
elementwise for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and
d = quad_term. We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
return prox_scale(prox_pos_base, *args, **kwargs)(v, t)
| 23,067
|
def delete_mapping(module, sdk, cloud, mapping):
"""
Attempt to delete a Mapping
returns: the "Changed" state
"""
if mapping is None:
return False
if module.check_mode:
return True
try:
cloud.identity.delete_mapping(mapping)
except sdk.exceptions.OpenStackCloudException as ex:
module.fail_json(msg='Failed to delete mapping: {0}'.format(str(ex)))
return True
| 23,068
|
def insert_target(x, segment_size):
"""
Creates segments of surrounding words for each word in x.
Inserts a zero token halfway the segment to mark the end of the intended
token.
Parameters
----------
x: list(int)
A list of integers representing the whole data as one long encoded
sentence. Each integer is an encoded subword.
segment_size: int
The size of the output samples.
Returns
-------
np.array:
A numpy matrix representing window of `segment_size` moving over the
input sample `x`.
"""
X = []
#pad the start & end of x
x_pad = x[-((segment_size-1)//2-1):] + x + x[:segment_size//2]
for i in range(len(x_pad)-segment_size+2):
segment = x_pad[i:i+segment_size-1]
#zero at the middle to mark the end of intended token
segment.insert((segment_size-1)//2, 0)
X.append(segment)
return np.array(X)
| 23,069
|
def find_nearest_network(ipa, nets):
"""
:param ipa: An ip address string
:param nets:
A of str gives and ip address with prefix, e.g. 10.0.1.0/24
>>> net1 = "192.168.122.0/24"
>>> net2 = "192.168.0.0/16"
>>> net3 = "192.168.1.0/24"
>>> net4 = "192.168.254.0/24"
>>> net5 = "0.0.0.0/32"
>>> find_nearest_network(net1, [net1, net5])
'192.168.122.0/24'
>>> find_nearest_network(net2, [net1, net5])
'192.168.122.0/24'
>>> find_nearest_network(net1, [net2, net3])
'192.168.0.0/16'
>>> find_nearest_network(net3, [net1, net4])
'192.168.122.0/24'
"""
return sorted(nets, key=functools.partial(distance, ipa))[0]
| 23,070
|
def find_lines(image, show_image, logger):
"""Find lines in the *image*."""
logger("preprocessing")
show_image(image, "original image")
im_h = prepare(image, show_image, logger)
hough = Hough.default(im_h)
logger("hough transform")
im_h2 = transform(im_h, hough, show_image)
logger("finding the lines")
r_lines, l1, l2 = run_ransac(im_h2)
lines = map(hough.lines_from_list, r_lines)
# TODO refactor gridf to get rid of this:
bounds = sum(map(lambda l: [l[0], l[-1]], r_lines), [])
# sum(list, []) = flatten list
# TODO do this only if show_all is true:
image_g = image.copy()
draw = ImageDraw.Draw(image_g)
for line in [l for s in lines for l in s]:
draw.line(line_from_angl_dist(line, image.size), fill=(120, 255, 120))
show_image(image_g, "lines")
return lines, l1, l2, bounds, hough
| 23,071
|
def set_action_translation(
language_id: int,
action_id: int,
name: str,
description: str = '',
short_description: str = '',
) -> ActionTranslation:
"""
Create or update an action translation.
:param language_id: the ID of an existing language
:param action_id: the ID of an existing action
:param name: the name of the action
:param description: a (possibly empty) description for the action
:param short_description: the new (possibly empty) short description
:return: the created action translation
:raise errors.LanguageDoesNotExistError: if no language with the given ID
exists
:raise errors.ActionDoesNotExistError: if no action with the given ID
exists
"""
action_translation = models.ActionTranslation.query.filter_by(
language_id=language_id,
action_id=action_id
).first()
if action_translation is None:
actions.get_action(action_id)
languages.get_language(language_id)
action_translation = models.ActionTranslation(
language_id=language_id,
action_id=action_id,
name=name,
description=description,
short_description=short_description,
)
else:
action_translation.name = name
action_translation.description = description
action_translation.short_description = short_description
db.session.add(action_translation)
db.session.commit()
return ActionTranslation.from_database(action_translation)
| 23,072
|
def partialSVD(batch, S, VT, ratio = 1, solver = 'full', tol = None, max_iter = 'auto'):
"""
Fits a partial SVD after given old singular values S
and old components VT.
Note that VT will be used as the number of old components,
so when calling truncated or randomized, will output a
specific number of eigenvectors and singular values.
Checks if new batch's size matches that of the old VT.
Note that PartialSVD has different solvers. Either choose:
1. full
Solves full SVD on the data. This is the most
stable and will guarantee the most robust results.
You can select the number of components to keep
within the model later.
2. truncated
This keeps the top K right eigenvectors and top
k right singular values, as determined by
n_components. Note full SVD is not called for the
truncated case, but rather ARPACK is called.
3. randomized
Same as truncated, but instead of using ARPACK, uses
randomized SVD.
Notice how Batch = U @ S @ VT. However, partialSVD returns
S, VT, and not U. In order to get U, you might consider using
the relation that X = U @ S @ VT, and approximating U by:
X = U @ S @ VT
X @ V = U @ S
(X @ V)/S = U
So, U = (X @ V)/S, so you can output U from (X @ V)/S
You can also get U partially and slowly using reverseU.
"""
data, k, __ = _utilSVD(batch, S, VT, eig = False)
if solver == 'full':
U, S, VT = svd(data)
elif solver == 'truncated':
U, S, VT = truncatedSVD(data, n_components = k, tol = tol)
else:
U, S, VT = randomizedSVD(data, n_components = k, max_iter = max_iter)
return U[k:,:k], S[:k], VT[:k]
| 23,073
|
def can_move_in_direction(node: Node, direction: Direction, factory: Factory):
"""If an agent has a neighbour in the specified direction, add a 1,
else 0 to the observation space. If that neighbour is free, add 1,
else 0 (a non-existing neighbour counts as occupied).
"""
has_direction = node.has_neighbour(direction)
is_free = False
if has_direction:
neighbour: Node = node.get_neighbour(direction)
if neighbour.is_rail:
neighbour_rail = factory.get_rail(neighbour)
is_free = neighbour_rail.is_free() or node in neighbour_rail.nodes
else:
is_free = not neighbour.has_table()
return is_free
| 23,074
|
def render_list_end(token, body, stack, loop):
"""Pops the pushed ``urwid.Pile()`` from the stack (decreases indentation)
See :any:`lookatme.tui.SlideRenderer.do_render` for argument and return
value descriptions.
"""
stack.pop()
| 23,075
|
def action(plugin_name: str, with_client: bool = False):
"""
アクション定義メソッドに使うデコレータ
"""
def _action(func):
def wrapper(client: BaseClient, *args, **kwargs):
logger.debug("%s called '%s'", client.get_send_user(), plugin_name)
logger.debug("%s app called '%s'", client.get_type(), plugin_name)
if with_client:
func(client, *args, **kwargs)
else:
return_val = func(*args, **kwargs)
if isinstance(return_val, str):
client.post(return_val)
return wrapper
return _action
| 23,076
|
def main(architecture: str = DEFAULT_ARCHITECTURE,
clean: bool = False,
mirror_url: str = DEFAULT_MIRROR_URL,
number: int = DEFAULT_NUMBER,
output_dir: str = DEFAULT_OUTPUT_DIR,
reuse_contents_file: bool = DEFAULT_REUSE_CONTENTS_FILE,
sort_descending: bool = DEFAULT_SORT_DESCENDING,
include_udeb: bool = DEFAULT_INCLUDE_UDEB) -> None:
"""
This is the dprs function that manages this application.
Arguments:
architecture: CPU Architecture for the Debian Contents-*.gz file.
clean: If clean option set, output directory will be removed.
mirror_url: Debian Package Repository Address
number: Number of the top packages to show.
output_dir: Output directory where the file will be downloaded and extracted.
reuse_contents_file: Reuse a previously downloaded Debian Contents-*.gz file.
sort_descending:Sort package statistics by descending order.
include_udeb: Debian Contents-*.gz file URL.
Returns:
None: Returns None.
"""
# If clean option set, output directory will be removed.
if clean:
# Check output directory if exists
if os.path.exists(output_dir):
shutil.rmtree(DEFAULT_OUTPUT_DIR)
exit(0)
# Get a list of Contents-*.gz file(s) URL(s)
contents_file_url = get_contents_file_url(architecture=architecture, mirror_url=mirror_url, include_udeb=include_udeb)
if len(contents_file_url) == 0:
# If the application couldn't find given architecture,
# output an error with a list of architectures.
contents_file_list = get_contents_file_list(mirror_url)
found_architectures = sorted({item["architecture"] for item in contents_file_list})
found_architectures = ", ".join(found_architectures)
# Raise a custom ArchitectureNotFound exception
raise ArchitectureNotFound(
f"Architecture: {architecture} was not found in the given Debian Package Repository. "
f"Available architectures are: {found_architectures}"
)
complete_package_data = {}
for url in contents_file_url:
contents_file = download_contents_file(
contents_file_url=url,
output_dir=output_dir,
reuse_contents_file=reuse_contents_file)
print(contents_file)
package_data = parse_contents_file(contents_file)
complete_package_data.update(**package_data)
package_list = complete_package_data.keys()
# Sort them in the descending order of the number of packages
# Unless the user wants the ascending order
package_list = sorted(package_list, key=lambda x: len(complete_package_data[x]), reverse=not sort_descending)
# Print the output
for ix, package in enumerate(package_list):
if ix == 0:
print(f"{'Order':<10}\t{'Package Name':<40}\t{'Number of Files':>20}")
print(f"{ix+1:<10}\t{package:<40}\t{len(complete_package_data[package]):>20}")
if ix+1 == number:
break
| 23,077
|
def setup_helper_functions(app):
"""Sets up the helper functions.
Args:
app (werkzeug.local.LocalProxy): The current application's instance.
"""
import helper
functions = {
"get_project_category": helper.get_project_category,
"get_blurred_cover_image_path": helper.get_blurred_cover_image_path,
}
app.jinja_env.globals.update(**functions)
| 23,078
|
def Usable(entity_type,entity_ids_arr):
"""Only for Linux modules"""
filNam = entity_ids_arr[0]
return filNam.endswith(".ko.xz")
| 23,079
|
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
targets = []
imgs = []
for _, sample in enumerate(batch):
for _, tup in enumerate(sample):
if torch.is_tensor(tup):
imgs.append(tup)
elif isinstance(tup, type(np.empty(0))):
annos = torch.from_numpy(tup).float()
targets.append(annos)
return (torch.stack(imgs, 0), targets)
| 23,080
|
def write_gct(matrix, filepath, descriptions=None):
"""
Establish .gct filepath and write matrix to it.
:param matrix: DataFrame or Serires; (n_samples, m_features)
:param filepath: str; filepath; adds .gct suffix if missing
:param descriptions: iterable; (n_samples); description column
:return: None
"""
# Copy
obj = matrix.copy()
# Work with only DataFrame
if isinstance(obj, Series):
obj = DataFrame(obj).T
# Add description column if missing
if obj.columns[0] != 'Description':
if descriptions:
obj.insert(0, 'Description', descriptions)
else:
obj.insert(0, 'Description', obj.index)
# Set row and column name
obj.index.name = 'Name'
obj.columns.name = None
# Save as .gct
if not filepath.endswith('.gct'):
filepath += '.gct'
establish_filepath(filepath)
with open(filepath, 'w') as f:
f.writelines('#1.2\n{}\t{}\n'.format(obj.shape[0], obj.shape[1] - 1))
obj.to_csv(f, sep='\t')
| 23,081
|
def SkipUntilNewLine(handle):
"""Skips data until a new-line character is received.
This is needed so that the first sample is read from a complete line.
"""
logging.debug("Skipping until the end of a new line.")
while not handle.readline(4096).endswith('\n'):
pass
| 23,082
|
def assert_allclose(
actual: scipy.stats.mstats_basic.Ttest_1sampResult,
desired: scipy.stats.mstats_basic.Ttest_1sampResult,
):
"""
usage.scipy: 1
"""
...
| 23,083
|
def get_total_invites_in_mempool():
"""
Call merit-cli subprocess and get the number of free invites in mempool
:rtype: int
"""
invites_in_mempool = json.loads(
(subprocess.check_output(['merit-cli',
'-conf={}'.format(config.MERIT_CONF),
'-datadir={}'.format(config.MERIT_DATA_DIR),
'getaddressbalance',
'{"addresses":["' + config.MERIT_MEMPOOL_ADDRESS + '"], "invites":true}']))
.decode("utf-8"))
return invites_in_mempool['balance']
| 23,084
|
def _apply_fft_high_pass_filter(data, fmin, fs=None, workers=None,
detrend=True, time_name=None):
"""Apply high-pass filter to FFT of given data.
Parameters
----------
data : xarray.DataArray
Data to filter.
fmin : float
Lowest frequency in pass band.
fs : float
Sampling frequency.
workers : int
Number of parallel jobs to use in computing FFT.
detrend : bool
If True, remove linear trend from data before computing FFT.
time_name : str
Name of the time coordinate.
Returns
-------
filtered : xarray.DataArray
Array containing the high-pass filtered data.
"""
data = rdu.ensure_data_array(data)
time_name = time_name if time_name is not None else rdu.get_time_name(data)
feature_dims = [d for d in data.dims if d != time_name]
# Handle case in which data is simply a time-series
if not feature_dims:
original_shape = None
else:
original_shape = [data.sizes[d] for d in feature_dims]
time_dim_pos = data.get_axis_num(time_name)
if time_dim_pos != 0:
data = data.transpose(*([time_name] + feature_dims))
# Convert to 2D array
n_samples = data.sizes[time_name]
if feature_dims:
n_features = np.product(original_shape)
else:
n_features = 1
flat_data = data.values.reshape((n_samples, n_features))
rdu.check_fixed_missing_values(flat_data, axis=0)
valid_data, missing_features = rdu.remove_missing_features(flat_data)
valid_features = [d for d in range(n_features)
if d not in missing_features]
valid_data = valid_data.swapaxes(0, 1)
if detrend:
valid_data = scipy.signal.detrend(
valid_data, axis=-1, type='linear')
# Compute spectrum and apply high-pass filter
spectrum = rfft(valid_data, axis=-1, workers=workers)
fft_freqs = rfftfreq(n_samples, d=(1.0 / fs))
filter_mask = fft_freqs < fmin
spectrum[..., filter_mask] = 0.0
filtered_valid_data = irfft(
spectrum, n=n_samples, axis=-1, workers=workers).swapaxes(0, 1)
if rdu.is_dask_array(flat_data):
filtered_cols = [None] * n_features
pos = 0
for j in range(n_features):
if j in valid_features:
filtered_cols[j] = filtered_valid_data[:, pos].reshape(
(n_samples, 1))
pos += 1
else:
filtered_cols[j] = da.full((n_samples, 1), np.NaN)
filtered_data = da.hstack(filtered_cols)
else:
filtered_data = np.full((n_samples, n_features), np.NaN)
filtered_data[:, valid_features] = filtered_valid_data
if original_shape:
filtered_data = filtered_data.reshape([n_samples] + original_shape)
filtered_dims = [time_name] + feature_dims
else:
filtered_data = filtered_data.ravel()
filtered_dims = [time_name]
filtered_coords = deepcopy(data.coords)
result = xr.DataArray(
filtered_data, coords=filtered_coords, dims=filtered_dims)
if time_dim_pos != 0:
result = result.transpose(*data.dims)
return result
| 23,085
|
def load_inputs(mod, switch_data, inputs_dir):
"""
Import project-specific data. The following files are expected in
the input directory.
all_projects.tab
PROJECT, proj_dbid, proj_gen_tech, proj_load_zone,
proj_connect_cost_per_mw
existing_projects.tab
PROJECT, build_year, proj_existing_cap
cap_limited_projects is optional because some systems will not have
capacity limited projects.
cap_limited_projects.tab
PROJECT, proj_capacity_limit_mw
The following files are optional because they override generic
values given by descriptions of generation technologies.
proj_heat_rate.tab
PROJECT, proj_heat_rate
Note: Load-zone cost adjustments will not be applied to any costs
specified in project_specific_costs.
project_specific_costs.tab
PROJECT, build_year, proj_overnight_cost, proj_fixed_om
"""
switch_data.load_aug(
filename=os.path.join(inputs_dir, 'all_projects.tab'),
select=('PROJECT', 'proj_dbid', 'proj_gen_tech',
'proj_load_zone', 'proj_connect_cost_per_mw'),
index=mod.PROJECTS,
param=(mod.proj_dbid, mod.proj_gen_tech,
mod.proj_load_zone, mod.proj_connect_cost_per_mw))
switch_data.load_aug(
filename=os.path.join(inputs_dir, 'existing_projects.tab'),
select=('PROJECT', 'build_year', 'proj_existing_cap'),
index=mod.EXISTING_PROJ_BUILDYEARS,
param=(mod.proj_existing_cap))
switch_data.load_aug(
optional=True,
filename=os.path.join(inputs_dir, 'cap_limited_projects.tab'),
select=('PROJECT', 'proj_capacity_limit_mw'),
index=mod.PROJECTS_CAP_LIMITED,
param=(mod.proj_capacity_limit_mw))
switch_data.load_aug(
optional=True,
filename=os.path.join(inputs_dir, 'proj_heat_rate.tab'),
select=('PROJECT', 'full_load_heat_rate'),
param=(mod.proj_full_load_heat_rate))
switch_data.load_aug(
optional=True,
filename=os.path.join(inputs_dir, 'project_specific_costs.tab'),
select=('PROJECT', 'build_year',
'proj_overnight_cost', 'proj_fixed_om'),
param=(mod.proj_overnight_cost, mod.proj_fixed_om))
| 23,086
|
def plot_cospectrum(fnames, figname=None, xlog=None, ylog=None,
output_data_file=None):
"""Plot the cospectra from a list of CPDSs, or a single one."""
if is_string(fnames):
fnames = [fnames]
figlabel = fnames[0]
for i, fname in enumerate(fnames):
pds_obj = load_pds(fname, nosub=True)
models = []
if hasattr(pds_obj, 'best_fits') and pds_obj.best_fits is not None:
models = pds_obj.best_fits
if np.allclose(np.diff(pds_obj.freq), pds_obj.df):
freq = pds_obj.freq
xlog = _assign_value_if_none(xlog, False)
ylog = _assign_value_if_none(ylog, False)
else:
flo = pds_obj.freq - pds_obj.df / 2
fhi = pds_obj.freq + pds_obj.df / 2
freq = (fhi + flo) / 2
xlog = _assign_value_if_none(xlog, True)
ylog = _assign_value_if_none(ylog, True)
cpds = pds_obj.power
cospectrum = cpds.real
if xlog and ylog:
plt.figure('Cospectrum - Loglog ' + figlabel)
else:
plt.figure('Cospectrum ' + figlabel)
ax = plt.gca()
if xlog:
ax.set_xscale('log', nonposx='clip')
if ylog:
ax.set_yscale('log', nonposy='clip')
plt.xlabel('Frequency')
if xlog and ylog:
y = freq[1:] * cospectrum[1:]
plt.plot(freq[1:], y,
drawstyle='steps-mid', label=fname)
for i, func in enumerate(models):
plt.plot(freq, freq * func(freq),
label='Model {}'.format(i + 1))
plt.ylabel('Cospectrum * Frequency')
else:
y = cospectrum[1:]
plt.plot(freq[1:], cospectrum[1:], drawstyle='steps-mid',
label=fname)
plt.ylabel('Cospectrum')
for i, func in enumerate(models):
plt.plot(freq, func(freq),
label='Model {}'.format(i + 1))
if output_data_file is not None:
save_as_qdp([freq[1:], y], filename=output_data_file, mode='a')
plt.legend()
if figname is not None:
plt.savefig(figname)
| 23,087
|
def truncate(array:np.ndarray, intensity_profile:np.ndarray, seedpos:int, iso_split_level:float)->np.ndarray:
"""Function to truncate an intensity profile around its seedposition.
Args:
array (np.ndarray): Input array.
intensity_profile (np.ndarray): Intensities for the input array.
seedpos (int): Seedposition.
iso_split_level (float): Split level.
Returns:
np.ndarray: Truncated array.
"""
minima = int_list_to_array(get_minpos(intensity_profile, iso_split_level))
if len(minima) > 0:
left_minima = minima[minima < seedpos]
right_minima = minima[minima > seedpos]
# If the minimum is smaller than the seed
if len(left_minima) > 0:
minpos = left_minima[-1]
else:
minpos = 0
if len(right_minima) > 0:
maxpos = right_minima[0]
else:
maxpos = len(array)
array = array[minpos:maxpos+1]
return array
| 23,088
|
def stac2ds(
items: Iterable[pystac.Item], cfg: Optional[ConversionConfig] = None
) -> Iterator[Dataset]:
"""
Given a lazy sequence of STAC :class:`pystac.Item` turn it into a lazy
sequence of :class:`datacube.model.Dataset` objects
"""
products: Dict[str, DatasetType] = {}
for item in items:
product = products.get(item.collection_id)
# Have not seen this collection yet, figure it out
if product is None:
product = infer_dc_product(item, cfg)
products[item.collection_id] = product
yield item_to_ds(item, product)
| 23,089
|
def fitting_process_parent(scouseobject, SAA, key, spec, parent_model):
"""
Pyspeckit fitting of an individual spectrum using the parent SAA model
Parameters
----------
scouseobject : Instance of the scousepy class
SAA : Instance of the saa class
scousepy spectral averaging area
key : number
index of the individual spectrum
spec : pyspeckit spectrum
the spectrum to fit
parent_model : instance of the fit class
best-fitting model solution to the parent SAA
"""
# Check the model
happy = False
initfit = True
fit_dud = False
while not happy:
if np.all(np.isfinite(np.array(spec.flux))):
if initfit:
guesses = np.asarray(parent_model.params)
if np.sum(guesses) != 0.0:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
old_log = log.level
log.setLevel('ERROR')
spec.specfit(interactive=False, \
clear_all_connections=True,\
xmin=scouseobject.ppv_vol[0], \
xmax=scouseobject.ppv_vol[1], \
fittype = scouseobject.fittype, \
guesses = guesses,\
verbose=False,\
use_lmfit=True)
log.setLevel(old_log)
modparnames = spec.specfit.fitter.parnames
modncomps = spec.specfit.npeaks
modparams = spec.specfit.modelpars
moderrors = spec.specfit.modelerrs
modrms = spec.error[0]
_inputs = [modparnames, [modncomps], modparams, moderrors, [modrms]]
happy, guesses = check_spec(scouseobject, parent_model, _inputs, happy)
initfit = False
else:
# If no satisfactory model can be found - fit a dud!
fit_dud=True
happy = True
else:
# If no satisfactory model can be found - fit a dud!
fit_dud = True
happy = True
if fit_dud:
bf = fitting_process_duds(scouseobject, SAA, key, spec)
else:
bf = fit(spec, idx=key, scouse=scouseobject)
return bf
| 23,090
|
def train_logistic_model(sess: tf.Session, model: Model, train_feeder: FileFeeder, val_feeder: FileFeeder):
"""Train via scikit-learn because it seems to train logistic regression models more effectively"""
train_data = _get_dataset(train_feeder)
val_data = _get_dataset(val_feeder)
logging.info("read {} train records and {} validation records".format(
train_data.num_records(), val_data.num_records()))
logging.info("Features for train data : "+str(train_data.features.shape))
clf: LogisticRegression = LogisticRegression(solver='lbfgs').fit(train_data.features, train_data.labels)
logging.info("sklearn classifier intercept: {}, weights: {}".format(clf.intercept_, clf.coef_))
val_predictions = clf.predict_proba(val_data.features)[:, 1]
auc = roc_auc_score(val_data.labels, val_predictions)
logging.info("AUC: {}".format(auc))
weights = _get_classifier_weights(clf)
model.set_weights(sess, weights)
| 23,091
|
def run(args: Namespace):
"""
run function which is the start point of program
Args:
args: program arguments
"""
tgr = PosTagger(args.model_dir)
for line_num, line in enumerate(sys.stdin, start=1):
if line_num % 100000 == 0:
logging.info('%d00k-th line..', (line_num // 100000))
line = line.rstrip('\r\n')
if not line:
print()
continue
pos_sent = tgr.tag_raw(line)
for pos_word in pos_sent.pos_tagged_words:
print(pos_word.raw, end='\t')
print(' + '.join([str(m) for m in pos_word.pos_tagged_morphs]))
print()
| 23,092
|
def _load_grammar(grammar_path):
"""Lee una gramática libre de contexto almacenada en un archivo .cfg y
la retorna luego de realizar algunas validaciones.
Args:
grammar_path (str): Ruta a un archivo .cfg conteniendo una gramática
libre de contexto en el formato utilizado por NLTK.
Raises:
InvalidGrammarException: en caso de que la gramática no sea válida.
Returns:
nltk.CFG: Gramática libre de contexto leída del archivo.
"""
grammar = nltk.data.load('file:{}'.format(grammar_path))
if grammar.start().symbol() != _START_PRODUCTION:
raise InvalidGrammarException('Start rule must be "{}"'.format(
_START_PRODUCTION))
if not grammar.is_nonempty():
raise InvalidGrammarException('Empty productions are not allowed')
nonterminals = set()
terminals = {token_name for token_name, _ in _TOKEN_TYPES}
for production in grammar.productions():
nonterminals.add(production.lhs().symbol())
for production in grammar.productions():
for element in production.rhs():
symbol = str(element)
if nltk.grammar.is_nonterminal(element):
if symbol not in nonterminals:
raise InvalidGrammarException(
'Invalid nonterminal: {}'.format(symbol))
elif symbol not in terminals:
raise InvalidGrammarException(
'Invalid terminal: {}'.format(symbol))
return grammar
| 23,093
|
def time_span(ts):
"""计算时间差"""
delta = datetime.now() - ts.replace(tzinfo=None)
if delta.days >= 365:
return '%d年前' % (delta.days / 365)
elif delta.days >= 30:
return '%d个月前' % (delta.days / 30)
elif delta.days > 0:
return '%d天前' % delta.days
elif delta.seconds < 60:
return "%d秒前" % delta.seconds
elif delta.seconds < 60 * 60:
return "%d分钟前" % (delta.seconds / 60)
else:
return "%d小时前" % (delta.seconds / 60 / 60)
| 23,094
|
def check_point(point_a, point_b, alpha, mask):
"""
Test the point "alpha" of the way from P1 to P2
See if it is on a face of the cube
Consider only faces in "mask"
"""
plane_point_x = lerp(alpha, point_a[0], point_b[0])
plane_point_y = lerp(alpha, point_a[1], point_b[1])
plane_point_z = lerp(alpha, point_a[2], point_b[2])
plane_point = (plane_point_x, plane_point_y, plane_point_z)
return face_plane(plane_point) & mask
| 23,095
|
def gIndex(df, query_txt, coluna_citacoes:str):
"""Calcula índice g"""
df = df.query(query_txt).sort_values(by=[coluna_citacoes],ascending=False)
df = df.reset_index(drop=True)
df.index+= 1
df['g^2'] = df.index**2
df['citações acumuladas'] = df[coluna_citacoes].cumsum()
df['corte'] = abs(df['g^2'] - df['citações acumuladas'])
posicao_g = df['corte'].idxmin()
return df.loc[posicao_g]['g^2']
| 23,096
|
def dockerize_cli_args(arg_str: str, container_volume_root="/home/local") -> str:
"""Return a string with all host paths converted to their container equivalents.
Parameters
----------
arg_str : str
The cli arg string to convert
container_volume_root : str, optional
The container directory which is mapped to local working directory,
by default "/home/local"
Returns
-------
str
A string with host paths converted to container paths.
"""
args = arg_str.split(" ")
newargs: List[str] = []
for arg in args:
if uio.file_exists(arg):
newargs.append(_dockerize_path(arg, container_volume_root))
elif "=" in arg:
left, right = arg.split("=")[0], "=".join(arg.split("=")[1:])
if uio.file_exists(right):
newargs.append(
f"{left}={_dockerize_path(right, container_volume_root)}"
)
else:
newargs.append(arg)
return " ".join(newargs)
| 23,097
|
def _invert_monoms(p1):
"""
Compute ``x**n * p1(1/x)`` for a univariate polynomial ``p1`` in ``x``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import _invert_monoms
>>> R, x = ring('x', ZZ)
>>> p = x**2 + 2*x + 3
>>> _invert_monoms(p)
3*x**2 + 2*x + 1
See Also
========
sympy.polys.densebasic.dup_reverse
"""
terms = list(p1.items())
terms.sort()
deg = p1.degree()
R = p1.ring
p = R.zero
cv = p1.listcoeffs()
mv = p1.listmonoms()
for i in range(len(mv)):
p[(deg - mv[i][0],)] = cv[i]
return p
| 23,098
|
def cleaned_picker_data(date: dt.date) -> Dict:
"""Retrieve and process data about Podcast Picker visits
from Webtrekk API for a specific date.
Args:
date (dt.date): Date to request data for.
Returns:
Dict: Reply from API.
"""
config = AnalysisConfig(
[
AnalysisObject("Seiten"),
],
metrics=[
Metric(
"Visits",
sort_order="desc",
),
Metric(
"Visits",
metric_filter=Filter(
filter_rules=[
FilterRule("Werbemittel", "=", "*"),
]
),
),
Metric(
"Ausstiege",
),
],
analysis_filter=Filter(
filter_rules=[
FilterRule("Seiten", "=", "*Podcast-Picker*"),
],
),
start_time=date,
stop_time=date,
row_limit=10000,
)
webtrekk = Webtrekk()
with webtrekk.session():
analysis = webtrekk.get_analysis_data(dict(config))
data = analysis["analysisData"]
date_start = analysis["timeStart"]
date_end = analysis["timeStop"]
logger.info("Start scraping Webtrekk Data between {} and {}.", date_start, date_end)
data_dict = {}
for element in data[:-1]:
name = normalize_name(element[0].split("_")[-1])
item = dict(
visits=int(element[1]),
visits_campaign=int(element[2]),
exits=int(element[3]),
)
if name in data_dict:
data_dict[name]["visits"] += item["visits"]
data_dict[name]["visits_campaign"] += item["visits_campaign"]
data_dict[name]["exits"] += item["exits"]
else:
data_dict[name] = item
return data_dict
| 23,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.