content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def gen_members(schema: List[StructField]):
"""
MEMBERS -> MEMBER
| MEMBER ',' MEMBERS
"""
if len(schema) == 1:
for t in gen_member(schema[0]):
yield t
else:
for t in gen_member(schema[0]):
yield t
yield ","
for t in gen_members(schema[1:]):
yield t | 5,326,900 |
def ws_notifications_connect(message):
"""
Handles incoming connections on the websocket `/notifications`. All senders
are accepted and added to the `notification` group.
Parameters
----------
message: channels.message.Message
The first message which is send by the sender.
"""
Group('notifications').add(message.reply_channel)
message.reply_channel.send({"accept": True}) | 5,326,901 |
def unpack_singleton(x):
"""Gets the first element if the iterable has only one value.
Otherwise return the iterable.
# Argument:
x: A list or tuple.
# Returns:
The same iterable or the first element.
"""
if len(x) == 1:
return x[0]
return x | 5,326,902 |
def main(argv: Optional[Union[str, List[str]]] = None) -> object:
"""
Apply R4 edits to FHIR JSON files
:param argv: Argument list. Can be an unparsed string, a list of strings or nothing. If nothing, we use sys.argv
:return: 0 if all RDF files that had valid FHIR in them were successful, 1 otherwise
"""
def gen_dlp(args: List[str]) -> dirlistproc.DirectoryListProcessor:
return dirlistproc.DirectoryListProcessor(args, "Add FHIR R4 edits to JSON file", '.json', '.json',
addargs=addargs)
dlp = gen_dlp(argv)
if not (dlp.opts.infile or dlp.opts.indir):
gen_dlp(argv if argv is not None else sys.argv[1:] + ["--help"]) # Does not exit
dlp.opts.converted_files = [] # If converting inline
nfiles, nsuccess = dlp.run(convert_file, file_filter_2=check_json)
print(f"Total={nfiles} Successful={nsuccess}")
return 0 if nfiles == nsuccess else 1 | 5,326,903 |
def distribution_filter_for(bijector):
"""Returns a function checking Distribution compatibility with this bijector.
That is, `distribution_filter_for(bijector)(dist) == True` implies
that `bijector` can act on `dist` (i.e., they are safe to compose with
`TransformedDistribution`).
TODO(bjp): Make this sensitive to supports. Currently assumes `bijector` acts
on an unconstrained space, and just checks compatible ranks.
Args:
bijector: A `Bijector` instance to check compatibility with.
Returns:
filter: A Python callable filtering Distributions for compatibility with
this bijector.
"""
if isinstance(bijector, tfb.CholeskyToInvCholesky):
def additional_check(dist):
return (tensorshape_util.rank(dist.event_shape) == 2 and
int(dist.event_shape[0]) == int(dist.event_shape[1]))
elif isinstance(bijector, tfb.CorrelationCholesky):
def additional_check(dist):
# The isinstance check will be redundant when the
# `distribution_eligilibility_filter_for` above has been used, but we keep
# it here for safety.
return isinstance(dist, tfd.LKJ) and dist.input_output_cholesky
else:
additional_check = lambda dist: True
def distribution_filter(dist):
if not dtype_util.is_floating(dist.dtype):
return False
if bijector.forward_min_event_ndims > tensorshape_util.rank(
dist.event_shape):
return False
return additional_check(dist)
return distribution_filter | 5,326,904 |
def create_markdown_table(table_info: dict, index_name: str='Id') -> str:
"""
Returns a string for a markdown table, formatted
according to the dictionary passed as `table_info`
Parameters:
table_info: Mapping from index to values
index_name: Name to use for the index column
Returns:
md_str: Markdown formatted table string
Example:
>>> table_info = {
'Apples': {
'Cost': '40p',
'Colour': 'Red/green',
},
'Oranges': {
'Cost': '50p',
'Colour': 'Orange',
},
}
>>> md_str = create_markdown_table(table_info, index_name='Fruit')
>>> print(md_str)
| Fruit | Cost | Colour |
|:--------|:-------|:----------|
| Apples | 40p | Red/green |
| Oranges | 50p | Orange |
"""
df_info = pd.DataFrame(table_info).T
df_info.index.name = index_name
md_str = df_info.to_markdown()
return md_str | 5,326,905 |
def callvote(*args, **kwargs): # real signature unknown
""" Calls a vote as if started by the server and not a player. """
pass | 5,326,906 |
def _parse_sequence(sequence):
"""Get a string which should describe an event sequence. If it is
successfully parsed as one, return a tuple containing the state (as an int),
the event type (as an index of _types), and the detail - None if none, or a
string if there is one. If the parsing is unsuccessful, return None.
"""
if not sequence or sequence[0] != '<' or sequence[-1] != '>':
return None
words = sequence[1:-1].split('-')
modifiers = 0
while words and words[0] in _modifier_names:
modifiers |= 1 << _modifier_names[words[0]]
del words[0]
if words and words[0] in _type_names:
type = _type_names[words[0]]
del words[0]
else:
return None
if _binder_classes[type] is _SimpleBinder:
if modifiers or words:
return None
else:
detail = None
else:
# _ComplexBinder
if type in [_type_names[s] for s in ("KeyPress", "KeyRelease")]:
type_re = _keysym_re
else:
type_re = _button_re
if not words:
detail = None
elif len(words) == 1 and type_re.match(words[0]):
detail = words[0]
else:
return None
return modifiers, type, detail | 5,326,907 |
def update_note(original_timestamp, new_timestamp, new_note):
"""Updates note at supplied timestamp"""
delete_note(original_timestamp)
save_note(new_timestamp, new_note) | 5,326,908 |
def test_authz_version_not_supported(client, valid_upload_file_patcher, data):
"""
Test create /objects response when the authz provided is not supported.
Assume valid input, ensure correct response.
"""
fake_jwt = "1.2.3"
resp = client.post(
"/objects", json=data, headers={"Authorization": f"bearer {fake_jwt}"}
)
assert str(resp.status_code) == "400"
assert resp.json().get("detail")
assert not resp.json().get("guid")
assert not resp.json().get("upload_url")
assert not resp.json().get("aliases")
assert not resp.json().get("metadata")
assert not valid_upload_file_patcher["data_upload_mock"].called
assert not valid_upload_file_patcher["create_aliases_mock"].called | 5,326,909 |
def make_car_dict(key: str, data: List[str]) -> Dict:
"""Organize car data for 106 A/B of the debtor
:param key: The section id
:param data: Content extract from car data section
:return: Organized data for automobile of debtor
"""
return {
"key": key,
"make": data[0],
"model": data[1],
"year": data[2],
"mileage": data[3],
"other_information": data[5],
"property_value": data[6],
"your_property_value": data[7],
} | 5,326,910 |
def insert_code(src, dest, kind):
"""Insert code in source into destination file."""
source_text = open(src).read().strip()
destination_text = open(dest).read()
destination_lines = destination_text.split('\n')
destination_tree = ast.parse(destination_text)
if not destination_tree.body:
idx = 0
elif kind == "prefix":
idx = find_prefix_insertion_idx(destination_tree)
elif kind == "postfix":
idx = find_postfix_insertion_idx(destination_tree)
if idx >= len(destination_tree.body):
# Strip blank line before insertion
if destination_lines[-1].strip() == '':
del destination_lines[-1]
# Append to file
destination_lines.append('\n\n' + source_text + '\n')
else:
# Start with index at first line above object definition
line_no = destination_tree.body[idx].lineno - 1 # line numbers count from 1
line_no = get_previous_blank_line_no(destination_lines, line_no)
# Strip blank lines before insertion
if destination_lines[line_no - 1].strip() == '':
del destination_lines[line_no - 1]
line_no -= 1
# perform the insertion
destination_lines.insert(line_no, '\n\n' + source_text + '\n')
all_text = '\n'.join(destination_lines)
return all_text | 5,326,911 |
def _CalculatePerDirectoryCoverageSummary(per_file_coverage_summary):
"""Calculates per directory coverage summary.
Args:
per_file_coverage_summary: A dictionary from file path to coverage summary.
Returns:
A dictionary from directory path to coverage summary.
"""
logging.debug('Calculating per-directory coverage summary.')
per_directory_coverage_summary = defaultdict(lambda: _CoverageSummary())
for file_path in per_file_coverage_summary:
summary = per_file_coverage_summary[file_path]
parent_dir = os.path.dirname(file_path)
while True:
per_directory_coverage_summary[parent_dir].AddSummary(summary)
if parent_dir == SRC_ROOT_DIR:
break
parent_dir = os.path.dirname(parent_dir)
logging.debug('Finished calculating per-directory coverage summary.')
return per_directory_coverage_summary | 5,326,912 |
def is_aix():
"""
Simple function to return if host is AIX or not
"""
return salt.utils.platform.is_aix() | 5,326,913 |
def save_species_info(db, folder, name_freq, name2tax, sci2tax=None,
id2tax=None, name2id=None, minimum=0):
"""Writes taxonomy and species name info to file.
Written by Phil Wilmarth, OHSU, 2009.
"""
# sort the species names and write to file
print('...writing species analysis results file...')
sorted_list = fasta_lib.sort_species(name_freq)
fout_name = os.path.join(folder, db+'_fasta_analyze.txt')
fout = open(fout_name, 'w')
sformat = '%s\t%s\t%s\t%s\t%s\t%s\t%s'
print(sformat % ('=SUBTOTAL(109,A2:A65000)', 'Taxon_ID', 'Species_ID', 'Species_Name',
'Sequence_Count', 'Word_Count', 'Is_virus'), file=fout)
dict_list = [name2tax, sci2tax, id2tax, name2id]
for name, count in sorted_list:
if int(count) >= minimum:
taxon = fasta_lib.get_taxon_from_name(db, name, dict_list)
#
try:
ID = name2id[name]
except:
ID = ' '
print(sformat % (str(1), taxon, ID, name, count, len(name.split()),
fasta_lib.virus_test(name)), file=fout)
fout.close()
return | 5,326,914 |
def draw_support_spring(
fig,
support,
orientation="up",
color='orange',
show_values=True,
row=None,
col=None,
units="N/m"):
"""Draw an anchored spring shape on a plotly figure.
Parameters
----------
fig : plotly figure
plotly figure to append roller shape to.
support : Support instance
support to be represented on figure
orientation : 'up' or 'right, optional
direction that the arrow faces, by default "up"
color : str, optional
color of spring, by default 'orange'.
show_values: bool,optional
If true annotates numerical force value next to arrow, by default True.
row : int or None,
Row of subplot to draw line on. If None specified assumes a full plot,
by default None.
col : int or None,
Column of subplot to draw line on. If None specified assumes a full
plot, by default None.
units: str,
The units suffix drawn with the stiffness value. Default is 'N/m'.
Returns
-------
plotly figure
Returns the plotly figure passed into function with the spring shape
appended to it."""
x_sup = support._position
# x0 and y0 initialised so that when loop through each point in the coords
# list will have two points to reference.
x0, y0 = 0, 0
# reduction of 0.8 used on coords specified (simple reduction modification)
reduce = 0.8
if orientation in ['up', 'right']:
# coords are points between lines to be created
# label and stiffness are defined for use as meta data to be added to
# the hovertemplate
if orientation == 'right':
coords = [(5, 0), (7, 5), (12, -5), (14, 0), (19, 0)]
stiffness = support._stiffness[0]
else:
coords = [(0, 5), (-5, 7), (5, 12), (0, 14), (0, 19)]
stiffness = support._stiffness[1]
# x1 and y1 are the ends of the line to be created
for x1, y1 in coords:
x1, y1 = x1 * reduce, y1 * reduce
# Create dictionary for line shape object. Note: multiple lines
# added but reference must always be to the same xanchor
shape = dict(
type="line",
xref="x", yref="y",
x0=x0, y0=y0, x1=x1, y1=y1,
line_color=color,
line_width=2,
xsizemode='pixel',
ysizemode='pixel',
xanchor=x_sup,
yanchor=0
)
# Append line to plot or subplot
if row and col:
fig.add_shape(shape, row=row, col=col)
else:
fig.add_shape(shape)
# set end point to be start point for the next line
x0, y0 = x1, y1
if show_values:
y0 = max(y0, 7)
annotation = dict(
xref="x", yref="y",
x=x_sup,
y=0,
yshift=y0 * 1.5,
xshift=x0 * 2,
text=f"{stiffness:.3f} {units}",
font_color=color,
showarrow=False,
)
# Append shape to plot or subplot
if row and col:
fig.add_annotation(annotation, row=row, col=col)
else:
fig.add_annotation(annotation)
return fig | 5,326,915 |
def main():
"""
Main function of the application.
"""
my_calc = QApplication(sys.argv)
# Calculator user interface (View)
view = MyCalculatorUI()
view.show()
# The model
model = evaluate_result
# Controller
MyCalcController(data=model, calc_ui=view)
# Executing the main loop of the calculator
sys.exit(my_calc.exec()) | 5,326,916 |
def _collins_crt(r, R, P, p, K):
"""Wrapper of CRT for Collins's resultant algorithm. """
return gf_int(gf_crt([r, R], [P, p], K), P*p) | 5,326,917 |
async def test_load_pipette(
decoy: Decoy,
model_utils: ModelUtils,
hardware_api: HardwareAPI,
subject: EquipmentHandler,
) -> None:
"""It should load pipette data, check attachment, and generate an ID."""
decoy.when(model_utils.generate_id()).then_return("unique-id")
result = await subject.load_pipette(
pipette_name=PipetteName.P300_SINGLE,
mount=MountType.LEFT,
pipette_id=None,
)
assert result == LoadedPipette(pipette_id="unique-id")
decoy.verify(
await hardware_api.cache_instruments(
{HwMount.LEFT: PipetteName.P300_SINGLE} # type: ignore[dict-item]
)
) | 5,326,918 |
def quantity_remover(my_thing):
"""
removes pint quantities to make json output happy
Parameters
----------
my_thing
Returns
-------
"""
if hasattr(my_thing, 'magnitude'):
return 'QUANTITY', my_thing.magnitude, my_thing.units.format_babel()
elif isinstance(my_thing, dict):
newdict = dict()
for key, item in my_thing.items():
newdict[key] = quantity_remover(item)
return newdict
elif hasattr(my_thing, '__iter__') and not isinstance(my_thing, str):
my_type = type(my_thing)
return my_type([quantity_remover(item) for item in my_thing])
else:
return my_thing | 5,326,919 |
def updatePositions(now):
"""
Each tracked bee has a time to live of 2 seconds.
If the bee was not found for this amount of time, its entry is cleared from the dictionary.
"""
# get global variables
global position_dict
remove = []
# for every tracked bee
for entry in position_dict:
# if the entry was last updated more than 2 seconds ago
if now - position_dict[entry]['time'] > 2: # not updated for 2 seconds
# add that entry to the list of entries to be removed
remove.append(entry)
# remove all the marked entries
for entry in remove:
position_dict.pop(entry, None) | 5,326,920 |
def sliced_wasserstein(PD1, PD2, M=50):
""" Implementation of Sliced Wasserstein distance as described in
Sliced Wasserstein Kernel for Persistence Diagrams by Mathieu Carriere, Marco Cuturi, Steve Oudot (https://arxiv.org/abs/1706.03358)
Parameters
-----------
PD1: np.array size (m,2)
Persistence diagram
PD2: np.array size (n,2)
Persistence diagram
M: int, default is 50
Iterations to run approximation.
Returns
--------
sw: float
Sliced Wasserstein distance between PD1 and PD2
"""
diag_theta = np.array(
[np.cos(0.25 * np.pi), np.sin(0.25 * np.pi)], dtype=np.float32
)
l_theta1 = [np.dot(diag_theta, x) for x in PD1]
l_theta2 = [np.dot(diag_theta, x) for x in PD2]
if (len(l_theta1) != PD1.shape[0]) or (len(l_theta2) != PD2.shape[0]):
raise ValueError("The projected points and origin do not match")
PD_delta1 = [[np.sqrt(x ** 2 / 2.0)] * 2 for x in l_theta1]
PD_delta2 = [[np.sqrt(x ** 2 / 2.0)] * 2 for x in l_theta2]
# i have the input now to compute the sw
sw = 0
theta = 0.5
step = 1.0 / M
for i in range(M):
l_theta = np.array(
[np.cos(theta * np.pi), np.sin(theta * np.pi)], dtype=np.float32
)
V1 = [np.dot(l_theta, x) for x in PD1] + [np.dot(l_theta, x) for x in PD_delta2]
V2 = [np.dot(l_theta, x) for x in PD2] + [np.dot(l_theta, x) for x in PD_delta1]
sw += step * cityblock(sorted(V1), sorted(V2))
theta += step
return sw | 5,326,921 |
def chunks(a_list, chunk_size):
"""
Args:
a_list: A list.
chunk_size: Size of each sub-list.
Returns:
A list of sub-lists.
"""
for i in xrange(0, len(a_list), chunk_size):
yield a_list[i:i + chunk_size] | 5,326,922 |
def test_single_fa(mocker, bowtie_args):
"""Tests single-end invocation of bowtie2 with fasta file."""
bowtie_args['read_paths'] = [
bowtie_args['read_paths'][0].with_suffix('.fa')
]
bowtie_args['read2_paths'] = None
mock = mocker.patch.object(shell, 'run_piped')
bowtie2(**bowtie_args)
expected_bt2 = ['bowtie2', '--threads', '10', '-U',
str(bowtie_args['read_paths'][0]), '-f', '-x',
str(bowtie_args['index_path'])]
expected_st = ['samtools', 'sort', '-o', str(bowtie_args['output_path']),
'-']
mock.assert_called_with([expected_bt2, expected_st]) | 5,326,923 |
def update_handler(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <https://flask.palletsprojects.com/en/1.1.x/api/#flask.Flask.make_response>`.
"""
print(request)
request_json = request.get_json()
content_type = request.headers['content-type']
if content_type == 'application/json':
request_json = request.get_json(silent=True)
if request_json and 'name' in request_json:
name = request_json['name']
else:
raise ValueError("JSON is invalid, or missing a 'name' property")
elif content_type == 'application/octet-stream':
name = request.data
elif content_type == 'text/plain':
name = request.data
elif content_type == 'application/x-www-form-urlencoded':
name = request.form.get('name')
else:
raise ValueError("Unknown content type: {}".format(content_type))
return 'Hello {}!'.format(escape(name))
# if request.args and 'message' in request.args:
# return request.args.get('message')
# elif request_json and 'message' in request_json:
# return request_json['message']
# else:
# return f'Hello World xxx!' | 5,326,924 |
def test_is_datasource_for():
"""Test the is_datasource_for method of SOTMap.
Note that header data to be provided as an argument
can be a MapMeta object."""
assert sot.is_datasource_for(sot.data, sot.meta) | 5,326,925 |
def test_undistort_pts_on_curve():
"""
"""
fx = 458.654
fy = 457.296
cx = 367.215
cy = 248.375
k1 = -0.28340811
k2 = 0.07395907
k1k2 = np.array([[k1],
[k2]])
pts_orig = [
[546, 20], [545, 40], [543, 83],
[536, 159], [535, 170], [534, 180],
[531, 200], [530, 211], [529, 218],
[526, 236], [524, 253], [521, 269],
[519, 281], [517, 293], [515, 302],
[514, 310], [512, 320], [510, 329],
[508, 341], [506, 353], [505, 357]
]
pts_corrected = undistort_point(
pts_orig[:, 0], pts_orig[:, 1],
fx, fy, cx, cy,
k1k2[0][0], k1k2[1][0]
)
img_path = './distorted.png'
img_orig = cv2.imread(img_path, cv2.IMREAD_UNCHANGED) | 5,326,926 |
def usercourse(request, course_code):
"""
The function is use for course content
"""
user = request.user
extrainfo = ExtraInfo.objects.select_related().get(user=user) # get the type of user
courseid = Courses.objects.select_related().get(code=course_code)
classes = OnlineClasses.objects.select_related().filter(course_id=courseid.id)
if extrainfo.user_type == 'faculty':
if request.method == 'POST':
if 'submiturl' in request.POST:
topic = request.POST.get('topicName')
class_date = request.POST.get('date')
start_time = request.POST.get('StartTime')
end_time = request.POST.get('EndTime')
upload_url = request.POST.get('ClassURL')
OnlineClasses.objects.create(course_id = courseid,
class_date=class_date,
start_time=start_time,
end_time=end_time,
description=topic,
upload_url=upload_url
)
if 'deleteurl' in request.POST:
classid = request.POST.get('delete-id')
OnlineClasses.objects.get(id=classid).delete()
return render(request, "online_cms/course_new.html", {'classes': classes, 'extrainfo': extrainfo}) | 5,326,927 |
def add_device(config_id, name, device_type_id, device_subtype_id, ip4address, ip6address, properties):
"""Add device to BAM."""
response = get_api()._api_client.service.addDevice(config_id, name, device_type_id, device_subtype_id, ip4address,
ip6address, properties)
return get_api().get_entity_by_id(response) | 5,326,928 |
def test_parts_with_charm_part_with_plugin(basic_project, charmcraft_yaml, monkeypatch):
"""Parts are declared with a charm part that uses a different plugin.
When the "parts" section exists in chamcraft.yaml and a part named "charm"
is defined with a plugin that's not "charm", handle it as a regular part
without populating fields for charm building.
"""
charmcraft_yaml(
basic_project,
dedent(
"""
parts:
charm:
plugin: nil
"""
),
)
config = load(basic_project)
monkeypatch.chdir(basic_project)
builder = get_builder(config, entrypoint=None)
monkeypatch.setenv("CHARMCRAFT_MANAGED_MODE", "1")
with patch("charmcraft.parts.PartsLifecycle", autospec=True) as mock_lifecycle:
mock_lifecycle.side_effect = SystemExit()
with pytest.raises(SystemExit):
builder.run([0])
mock_lifecycle.assert_has_calls(
[
call(
{
"charm": {
"plugin": "nil",
}
},
work_dir=pathlib.Path("/root"),
project_dir=basic_project,
project_name="name-from-metadata",
ignore_local_sources=["*.charm"],
)
]
) | 5,326,929 |
def cdf_approx(X): #, smoothness_factor=1):
"""
Generates a ppoly spline to approximate the cdf of a random variable,
from a 1-D array of i.i.d. samples thereof.
Args:
X: a collection of i.i.d. samples from a random variable.
args, kwargs: any options to forward to the cvxopt qp solver
Returns:
scipy.interpolate.PPoly object, estimating the cdf of the random variable.
Raises:
TODO
"""
# Pre-format input as ordered numpy array
X = np.asarray(X)
diff_X = np.diff(X)
if not (diff_X > 0).all():
X.sort()
diff_X = np.diff(X)
assert(diff_X.all()) # avoids case of duplicate X-values
n = len(X)
scale_axi, scale_ei = make_obj_scale(X)#, smoothness_factor)
P, q = make_P_q(X, scale_a=scale_axi, scale_e=scale_ei)
G, h = make_G_h(X)
#A, b = make_A_b(X) # simply unnecessary
bmid_c_init = bmid_c_init_state(X)
qp_res = cvxopt.solvers.qp(
cvxopt.matrix(P),
cvxopt.matrix(q),
cvxopt.matrix(G),
cvxopt.matrix(h),
#cvxopt.matrix(A),
#cvxopt.matrix(b),
#*args, **kwargs
)
X, P_X, dP_X, d2P_X = clean_optimizer_results(np.array(qp_res['x']), X)
return PPoly.construct_fast(np.stack((d2P_X, dP_X, P_X)), X, extrapolate=True) | 5,326,930 |
def cleanse_param_name(name):
"""Converts Chainer parameter names to ONNX names.
Note ONNX identifiers must be a valid C identifier.
Args:
name (str): A Chainer parameter name (e.g., /l/W).
Returns
A valid ONNX name (e.g., param_l_W).
"""
return 'param' + name.replace('/', '_') | 5,326,931 |
def calc_checksum_for_ip_change(old_ip_packet, new_ip_packet, old_checksum, is_ipv6=False):
""" ip地址改变之后重新获取校检码
:param old_ip_packet:
:param new_ip_packet:
:param old_checksum:
:param is_ipv6:是否是ipv6
:return:
"""
final_checksum = old_checksum
a = 0
b = 1
# tmpcsum = old_checksum
if is_ipv6:
n = 8
else:
n = 2
i = 0
while i < n:
old_field = (old_ip_packet[a] << 8) | old_ip_packet[b]
new_field = (new_ip_packet[a] << 8) | new_ip_packet[b]
# final_checksum = checksum.calc_incre_checksum(final_checksum, old_field, new_field)
final_checksum = fn_utils.calc_incre_csum(final_checksum, old_field, new_field)
a = a + 2
b = b + 2
i += 1
return final_checksum | 5,326,932 |
def get_sender_password():
"""Get sender password
"""
try:
return Setting.objects.get(slug=KEY_SENDER_PASSWORD)
except Setting.DoesNotExist:
return None | 5,326,933 |
def standardize_data(df):
"""Standardizes the data by cleaning string values and standardizing column
names.
df: Pandas dataframe to standardize.
"""
# Clean string values in the dataframe.
df = df.applymap(
lambda x: x.replace('"', '').strip() if isinstance(x, str) else x)
# Standardize column names.
df = df.rename(columns=COL_NAME_MAPPING)
# Add race metadata columns.
if std_col.RACE_CATEGORY_ID_COL in df.columns:
std_col.add_race_columns_from_category_id(df)
return df | 5,326,934 |
def MatrixExp6(se3mat):
"""Computes the matrix exponential of an se3 representation of
exponential coordinates
:param se3mat: A matrix in se3
:return: The matrix exponential of se3mat
Example Input:
se3mat = np.array([[0, 0, 0, 0],
[0, 0, -1.57079632, 2.35619449],
[0, 1.57079632, 0, 2.35619449],
[0, 0, 0, 0]])
Output:
np.array([[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, -1.0, 0.0],
[0.0, 1.0, 0.0, 3.0],
[ 0, 0, 0, 1]])
"""
omgtheta = so3ToVec(se3mat[0: 3, 0: 3])
if omgtheta.norm() == 0:
return (eye(3).row_join(se3mat[0: 3, 3])).col_join(Matrix([[0, 0, 0, 1]]))
else:
theta = AxisAng3(omgtheta)[1]
omgmat = se3mat[0: 3, 0: 3] / theta
R = MatrixExp3(se3mat[0: 3, 0: 3])
p = (eye(3) * theta + (1 - cos(theta)) * omgmat + (theta - sin(theta)) \
* omgmat*omgmat) * se3mat[0: 3, 3] / theta
T = (R.row_join(p)).col_join(Matrix(1,4,[0,0,0,1]))
return T | 5,326,935 |
def parse_log(content, arg_parser=json_arg_parser):
""" Parse important information from log files.
These log files are small so we are making the logic a little simpler by loading all
the content into memory at once rather than using an iostream.
Args:
content (string): the string content of the file
Returns:
args (dict<string, value>): a dictionary of function arguments of the program that
created the log and the value those arguments were set to.
history (list<(int, float)>): a list of tuples of time (in epoch index) and corresponding
classification loss
runtime (float): runtime of program in seconds
"""
lines = content.split('\n')
# Part 1: parse arguments
arg_pair_lists = exception_safe_map(arg_parser, lines[:20], exception=NotArgLineException)
args = dict(chain.from_iterable(arg_pair_lists))
# parse CV
for l in lines[:10]:
m = re.match(r'subjects (\d+) are held out', l)
if m:
args['held_out'] = m.group(1)
# Part 2: parse history
history_matches = imap(
lambda l: re.match(r'epoch (\d+), validation accuracy (.*)%', l),
lines)
history_matches = compress(*tee(history_matches, 2)) # filter out the 'Nones'
history = [(int(h.group(1)), float(h.group(2))) for h in history_matches]
# Part 3: parse run time
runtime = None
for l in lines[-3:]:
m = re.match(r'Code ran for ran for (.+)m', l)
if m:
runtime = float(m.group(1))
break
if runtime is None or len(history) == 0 or len(args) == 0:
raise BadLogFileException('file was not formatted properly')
return args, history, runtime | 5,326,936 |
def update_db_conf():
"""Update the database configuration."""
# TODO: Move this these into a common module, so we don't have to bury it like this.
from .servers import set_database_ip
from .servers import set_web_server_ips
execute(set_database_ip)
execute(set_web_server_ips)
# Update the configuration files
upload_template('pg_hba.conf', '/etc/postgresql/9.1/main/pg_hba.conf',
template_dir=os.path.join(CONF_DIR, 'postgres'),
context={'env': env}, use_sudo=True, use_jinja=True)
upload_template('postgresql.conf', '/etc/postgresql/9.1/main/postgresql.conf',
template_dir=os.path.join(CONF_DIR, 'postgres'),
context={'env': env}, use_sudo=True, use_jinja=True)
with settings(warn_only=True):
sudo('/etc/init.d/postgresql start')
sudo('/etc/init.d/postgresql reload')
for web_server_ip in env.webserver_internal_ips:
sudo('ufw allow from %s proto tcp to any port %s' % (web_server_ip, env.db_port)) | 5,326,937 |
def _ohlc_dict(df_or_figure, open='', high='', low='', close='', volume='',
validate='', **kwargs):
"""
Returns a dictionary with the actual column names that
correspond to each of the OHLCV values.
df_or_figure : DataFrame or Figure
open : string
Column name to be used for OPEN values
high : string
Column name to be used for HIGH values
low : string
Column name to be used for LOW values
close : string
Column name to be used for CLOSE values
volume : string
Column name to be used for VOLUME values
validate : string
Validates that the stated column exists
Example:
validate='ohv' | Will ensure Open, High
and close values exist.
"""
c_dir = {}
ohlcv = ['open', 'high', 'low', 'close', 'volume']
if type(df_or_figure) == pd.DataFrame:
cnames = df_or_figure.columns
elif type(df_or_figure) == Figure or type(df_or_figure) == dict:
cnames = df_or_figure.axis['ref'].keys()
elif type(df_or_figure) == pd.Series:
cnames = [df_or_figure.name]
c_min = dict([(v.lower(), v) for v in cnames])
for _ in ohlcv:
if _ in c_min.keys():
c_dir[_] = c_min[_]
else:
for c in cnames:
if _ in c.lower():
c_dir[_] = c
if open:
c_dir['open'] = open
if high:
c_dir['high'] = high
if low:
c_dir['low'] = low
if close:
c_dir['close'] = close
if volume:
c_dir['volume'] = volume
for v in list(c_dir.values()):
if v not in cnames:
raise StudyError('{0} is not a valid column name'.format(v))
if validate:
errs = []
val = validate.lower()
s_names = dict([(_[0], _) for _ in ohlcv])
cols = [_[0] for _ in c_dir.keys()]
for _ in val:
if _ not in cols:
errs.append(s_names[_])
if errs:
raise StudyError('Missing Columns: {0}'.format(', '.join(errs)))
return c_dir | 5,326,938 |
def _s_to_b(value):
"""[string to binary single value]"""
try:
return bytes(value, 'utf-8')
except:
return value | 5,326,939 |
def get_or_create(
*, db_session, email: str, incident: Incident = None, **kwargs
) -> IndividualContact:
"""Gets or creates an individual."""
# we fetch the individual contact from the database
individual_contact = get_by_email_and_project(
db_session=db_session, email=email, project_id=incident.project.id
)
# we try to fetch the individual's contact information using the contact plugin
contact_plugin = plugin_service.get_active_instance(
db_session=db_session, project_id=incident.project.id, plugin_type="contact"
)
individual_info = {}
if contact_plugin:
individual_info = contact_plugin.instance.get(email, db_session=db_session)
kwargs["email"] = individual_info.get("email", email)
kwargs["name"] = individual_info.get("fullname", "Unknown")
kwargs["weblink"] = individual_info.get("weblink", "")
if not individual_contact:
# we create a new contact
individual_contact_in = IndividualContactCreate(**kwargs, project=incident.project)
individual_contact = create(
db_session=db_session, individual_contact_in=individual_contact_in
)
else:
# we update the existing contact
individual_contact_in = IndividualContactUpdate(**kwargs, project=incident.project)
individual_contact = update(
db_session=db_session,
individual_contact=individual_contact,
individual_contact_in=individual_contact_in,
)
return individual_contact | 5,326,940 |
def stats(request):
"""Return stats as JSON according to different GET query parameters."""
offset = request.GET.get('offset', '0')
limit = request.GET.get('limit', '10')
order_by = request.GET.get('order_by', 'public_backlinks')
return build_stats(offset, limit, order_by) | 5,326,941 |
def news():
"""
Return the latest version of the news json
"""
# TODO: add options to request like request.args.get('from', default='')
latest_news = get_latest_news(local=CONFIG['PARAMS']['local'] == 'True')
response = app.response_class(response=latest_news, status=200)
return response | 5,326,942 |
def zero_crossing(arr, rank=1):
"""Calculates the zero crossing rate"""
if rank == 1:
nzc = tf.cast(tf.count_nonzero(tf_diff_axis(tf.sign(arr))), tf.float32)
else:
nzc = tf.cast(tf.count_nonzero(tf_diff_axis(tf.sign(arr)), axis=rank - 1), tf.float32)
arrlen = tf.cast(arr.shape[rank - 1], tf.float32)
return tf.divide(nzc, arrlen, name='zcr') | 5,326,943 |
def get_results_by(parameter, value, table='publications', conn_params=PARAMS):
"""Returns records by given parameter from publications"""
with PostgresConnectionManager(conn_params, returns=True) as postgres:
if value == '':
postgres.cursor.execute(f"SELECT * FROM {table}")
else:
if parameter in ('title', 'author', 'kind', 'publisher', 'language',
'full_name', 'email'):
postgres.cursor.execute(f"SELECT * FROM {table} "
f"WHERE {parameter} LIKE '{value+'%'}';")
else:
postgres.cursor.execute(f"SELECT * FROM {table} "
f"WHERE {parameter}='{value}';")
for result in postgres.cursor:
yield result | 5,326,944 |
def normalize_command(command):
"""Convert `command` to the string representation.
"""
if isinstance(command, list):
if len(command) == 1:
# This is either a quoted compound shell command or a simple
# one-item command. Pass it as is.
command = command[0]
else:
command = " ".join(shlex_quote(c) for c in command)
return command | 5,326,945 |
def make_struct(*args, **kwargs):
"""Create a Struct class according to the given format"""
exec _structdef(*args, **kwargs)
return Struct | 5,326,946 |
async def test_unload_entry(hass):
"""Test successful unload of entry."""
device = await setup_axis_integration(hass)
assert hass.data[axis.DOMAIN]
assert await axis.async_unload_entry(hass, device.config_entry)
assert not hass.data[axis.DOMAIN] | 5,326,947 |
def savefig(fig,
filename,
lgd=None,
tikz=False,
mpl3d=False,
crop=False,
sizes=(5.25, 5)):
"""
Save a figure in various formats.
@param fig: figure object
@param filename: filename (without extension) where the file should be stored
@param lgd: legend object
@param tikz: save result as tikz file using matplotlib2tikz
@param mpl3d: is it a 3d plot? Then one should not use a tight layout
@param crop: crop the resulting pdf -> pdfcrop needs to be installed
@param sizes: tuple containing the figures size in x and y direction in inches
"""
if sizes is not None:
sizex, sizey = sizes
fig.set_size_inches(sizex, sizey, forward=True)
if mpl3d:
fig.savefig("%s.png" % filename)
fig.savefig("%s.pdf" % filename)
else:
# fig.tight_layout()
if lgd is None:
fig.savefig("%s.png" % filename, bbox_inches='tight')
fig.savefig("%s.pdf" % filename, bbox_inches='tight')
else:
fig.savefig("%s.png" % filename,
bbox_extra_artists=(lgd,),
bbox_inches='tight')
fig.savefig("%s.pdf" % filename,
bbox_extra_artists=(lgd,),
bbox_inches='tight')
if tikz:
try:
tikz_save("%s.tex" % filename, fig)
except:
pass
if crop:
subprocess.call(["pdfcrop",
"%s.pdf" % filename,
"%s.pdf" % filename])
plt.close(fig) | 5,326,948 |
def link_iterable_by_fields(unlinked, other=None, fields=None, kind=None,
internal=False, relink=False):
"""Generic function to link objects in ``unlinked`` to objects in ``other`` using fields ``fields``.
The database to be linked must have uniqueness for each object for the given ``fields``.
If ``kind``, limit objects in ``unlinked`` of type ``kind``.
If ``relink``, link to objects which already have an ``input``. Otherwise, skip already linked objects.
If ``internal``, linked ``unlinked`` to other objects in ``unlinked``. Each object must have the attributes ``database`` and ``code``."""
if kind:
kind = {kind} if isinstance(kind, str) else kind
if relink:
filter_func = lambda x: x.get('type') in kind
else:
filter_func = lambda x: x.get('type') in kind and not x.get('input')
else:
if relink:
filter_func = lambda x: True
else:
filter_func = lambda x: not x.get('input')
if internal:
other = unlinked
duplicates, candidates = {}, {}
try:
# Other can be a generator, so a bit convoluted
for ds in other:
key = activity_hash(ds, fields)
if key in candidates:
duplicates.setdefault(key, []).append(ds)
else:
candidates[key] = (ds['database'], ds['code'])
except KeyError:
raise StrategyError("Not all datasets in database to be linked have "
"``database`` or ``code`` attributes")
for container in unlinked:
for obj in filter(filter_func, container.get('exchanges', [])):
key = activity_hash(obj, fields)
if key in duplicates:
raise StrategyError(format_nonunique_key_error(obj, fields, duplicates[key]))
elif key in candidates:
obj['input'] = candidates[key]
return unlinked | 5,326,949 |
def is_conn() -> bool:
"""是否连接核心网"""
return param.parent.ia != utz.IA_INVALID and param.parent.is_conn | 5,326,950 |
def GetConstants():
"""Returns a list of all available constant values used by some Nexpose Criteria"""
return _get_filtered_classes(NexposeCriteriaConstant) | 5,326,951 |
def getEmpiresForUser(user_email):
"""Fetches empires for the given user.
Even though the empires should be in the data store already, we force fetch them from the server. This is
because it could be a new user and it hasn't synced yet, but also this provides a way for the user to force
their empire to update after changing names or shield (otherwise, they'd have to wait for ~3 hours when the
cron job runs)."""
keyname = 'profile:empires-for-user:'+user_email
empires = memcache.get(keyname)
if not empires:
# we fire off an HTTP request to each of the realms to get empire details about this email address
urls = {}
for realm_name,base_url in REALMS.items():
urls[realm_name] = base_url+'empires/search?email=' + user_email
# make simultaneous calls to all the URLs
rpcs = {}
for realm_name,url in urls.items():
rpc = urlfetch.create_rpc()
urlfetch.make_fetch_call(rpc, url, headers = {'Accept': 'text/json'})
rpcs[realm_name] = rpc
empires = {}
for realm_name, rpc in rpcs.items():
result = rpc.get_result()
if result.status_code == 200:
empire = json.loads(result.content)
if empire:
empire = empire["empires"][0]
empires[realm_name] = empire
# while we're here, save it to the data store
model.profile.Empire.Save(realm_name, empire)
memcache.set(keyname, empires, time=3600)
return empires | 5,326,952 |
def netflix(es, ps, e0, l=0.0001):
"""Combine predictions with the optimal weights to minimize RMSE.
Ref: Töscher, A., Jahrer, M., & Bell, R. M. (2009). The bigchaos solution to the netflix grand prize.
Args:
es (list of float): RMSEs of predictions
ps (list of np.array): predictions
e0 (float): RMSE of all zero prediction
l (float): lambda as in the ridge regression
Returns:
(tuple):
- (np.array): ensemble predictions
- (np.array): weights for input predictions
"""
m = len(es)
n = len(ps[0])
X = np.stack(ps).T
pTy = 0.5 * (n * e0 ** 2 + (X ** 2).sum(axis=0) - n * np.array(es) ** 2)
w = np.linalg.pinv(X.T.dot(X) + l * n * np.eye(m)).dot(pTy)
return X.dot(w), w | 5,326,953 |
def metadata_deep_check(metadata):
"""
This will check each metadata element to confirm it correctly abides by:
1 - Metadata Naming Convention guidelines
2 - Metadata 'priority' key guidelines
3 - Metadata 'link' key guidelines
Naming Convention:
- No Spaces
- No Uppercase
- No Hyphens
- Only lowercase
- Underscores for new words
"""
keys = metadata.keys()
for key in keys:
if " " in key:
raise ValueError(
"metadata keys aren't allowed to have spaces. Violated on key '{key}'. Information: https://developer.vectrix.io/dev/components/output".format(key=key))
if "-" in key:
raise ValueError(
"metadata keys aren't allowed to have hyphens. Violated on key '{key}'. Information: https://developer.vectrix.io/dev/components/output".format(key=key))
for char in key:
if char.isupper():
raise ValueError(
"metadata keys can't have uppercase characters. Violated on key '{key}'. Information: https://developer.vectrix.io/dev/components/output".format(key=key))
p_val = metadata[key]['priority']
if p_val > 100 or p_val < -1:
raise ValueError(
"metadata 'priority' key is only allowed to be between -1 and 100 (inclusive). Violated on key '{key}' with priority value '{val}'. Information: https://developer.vectrix.io/dev/components/output".format(key=key, val=p_val))
if 'link' in metadata[key]:
if metadata[key]['link'][:7] == "http://":
raise ValueError(
"Only secure links are allowed in metadata elements (HTTPS). Violated on key '{key}'. Information: https://developer.vectrix.io/dev/components/output".format(key=key))
if metadata[key]['link'][:8] != "https://":
raise ValueError(
"Only https links are allowed to be included in metadata elements. Violated on key '{key}'. Information: https://developer.vectrix.io/dev/components/output".format(key=key)) | 5,326,954 |
def patch_ibex(current_dir, ibex_tmp_dir, f_log):
""" Patch ibex sources. """
# TODO: Remove the need for ibex.patch
with open(os.path.join(current_dir, 'ibex.patch')) as f_patch:
subprocess.check_call(
"patch -p1",
stdin=f_patch,
stdout=f_log,
stderr=f_log,
shell=True,
cwd=ibex_tmp_dir
) | 5,326,955 |
def train(vae, optimizer, num_epochs, device, train_loader, test_loader):
""" Train and evaluate VAE """
for epoch in range(1, num_epochs+1):
# Training -----------------------------
vae.train()
train_loss = 0
for batch_idx, (data, _) in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
recon_batch, mu, log_var = vae(data)
loss = loss_function(recon_batch, data, mu, log_var)
loss.backward()
train_loss += loss.item()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item() / len(data)))
print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss / len(train_loader.dataset)))
# Evaluation -----------------------------
vae.eval()
test_loss = 0
with torch.no_grad():
for data, _ in test_loader:
data = data.to(device)
recon, mu, log_var = vae(data)
test_loss += loss_function(recon, data, mu, log_var).item()
test_loss /= len(test_loader.dataset)
print('====> Test set loss: {:.4f}'.format(test_loss)) | 5,326,956 |
def create_list(list_data, user_id, status=200):
"""Create a new todo list throught the API"""
res = app.post_json('/v1/users/{user_id}/lists'.format(user_id=user_id),
list_data,
status=status,
expect_errors=status != 200)
return res | 5,326,957 |
async def create_movie(
*,
session: aio_session.AsyncSession = fastapi.Depends(
dependencies.get_session),
movie_in: movie_model.MovieCreate,
current_patron: patron_model.Patron = fastapi.Depends( # pylint: disable=unused-argument
dependencies.get_current_active_patron),
) -> movie_model.Movie:
"""Creates a new movie."""
movie_db = await movie_crud.MovieCRUD.get_by_title(session,
movie_in.title_en)
if movie_db:
raise fastapi.HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail="An movie with this title already exists in the system.",
)
if current_patron.id != movie_in.proposed_by:
raise fastapi.HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="https://www.youtube.com/watch?v=Z4oDZCJMDeY")
movie = await movie_crud.MovieCRUD.create(session, model_in=movie_in)
return movie | 5,326,958 |
def example3():
"""
Figure 2 (left) in the paper.
Constraints:
Points lie on y = -x + 5 for the domain (-5.0, -3.0).
Points lie on y = x + 5 for the domain (3.0, 5.0).
Positive (Gaussian) constraint prior used.
"""
bnn = BNNHMCRegressor(uid='EX3', configfile="configs/EX3.json")
bnn.load(**toy2())
bnn.add_positive_constraint((-5.0, -3.0), lambda x: -x + 5)
bnn.add_positive_constraint((3.0, 5.0), lambda x: x + 5)
bnn.infer()
bnn.config["prior_type"] = "gaussian"
bnn.infer()
bnn.all_samples = bnn.all_samples[::-1]
bnn.plot_pp(plot_title="Example 3 (Positive Constraint)", domain=np.arange(-4, 4, 0.05), ylims=(0, 14)) | 5,326,959 |
def project_gdf(gdf, to_crs=None, to_latlong=False, verbose=False):
"""
https://github.com/gboeing/osmnx/blob/v0.9/osmnx/projection.py#L58
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid.
The simple calculation in this function works well for most latitudes, but
won't work for some far northern locations like Svalbard and parts of far
northern Norway.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected
to_crs : dict
if not None, just project to this CRS instead of to UTM
to_latlong : bool
if True, projects to latlong instead of to UTM
Returns
-------
GeoDataFrame
"""
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
# if gdf has no gdf_name attribute, create one now
if not hasattr(gdf, 'gdf_name'):
gdf.gdf_name = 'unnamed'
# if to_crs was passed-in, use this value to project the gdf
if to_crs is not None:
projected_gdf = gdf.to_crs(to_crs)
# if to_crs was not passed-in, calculate the centroid of the geometry to
# determine UTM zone
else:
if to_latlong:
# if to_latlong is True, project the gdf to latlong
latlong_crs = default_crs
projected_gdf = gdf.to_crs(latlong_crs)
if verbose:
print('Projected the GeoDataFrame "{}" to default_crs in {:,.2f} seconds'.format(gdf.gdf_name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, just return it
# if (gdf.crs is not None) and ('proj' in gdf.crs) and (gdf.crs['proj'] == 'utm'):
if gdf.crs.is_projected and gdf.crs.coordinate_operation.name.upper().startswith('UTM'):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the UTM
# CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {'datum': 'WGS84',
'ellps': 'WGS84',
'proj' : 'utm',
'zone' : utm_zone,
'units': 'm'}
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
if verbose:
print('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} seconds'.format(gdf.gdf_name, utm_zone, time.time()-start_time))
projected_gdf.gdf_name = gdf.gdf_name
return projected_gdf | 5,326,960 |
def get_full_schema() -> dict:
"""
Return the full schema for Jina core as a dict.
"""
from .. import __version__
from ..importer import IMPORTED
from .driver import schema_all_drivers
from .executor import schema_all_executors
from .flow import schema_flow
from .meta import schema_metas
from .request import schema_requests
from .pod import schema_pod
definitions = {}
for s in [
schema_all_drivers,
schema_all_executors,
schema_flow,
schema_metas,
schema_requests,
schema_pod,
IMPORTED.schema_executors,
IMPORTED.schema_drivers
]:
definitions.update(s)
# fix CompoundExecutor
definitions['Jina::Executors::CompoundExecutor']['properties']['components'] = {
'$ref': '#/definitions/Jina::Executors::All'
}
return {
'$id': f'https://api.jina.ai/schemas/{__version__}.json',
'$schema': 'http://json-schema.org/draft-07/schema#',
'description': 'The YAML schema of Jina objects (Flow, Executor, Drivers).',
'type': 'object',
'oneOf':
[{'$ref': '#/definitions/Jina::Flow'}] +
[{"$ref": f"#/definitions/{k}"} for k in IMPORTED.schema_executors.keys()],
'definitions': definitions
} | 5,326,961 |
def power_method(A, x0, n_iter=1):
"""Compute the first singular components by power method."""
for i in range(n_iter):
x0 = A.T @ A @ x0
v = x0 / norm(x0)
s = norm(A @ v)
u = A @ v / s
return u, s, v | 5,326,962 |
def convert_coevalcube_to_sphere_surface_inpdict(inpdict):
"""
-----------------------------------------------------------------------------
Covert a cosmological coeval cube at a given resolution (in physical comoving
distance) to HEALPIX coordinates of a specified nside covering the whole sky
or coordinates covering a spherical patch. Wrapper for
convert_coevalcube_to_sphere_surface()
Inputs:
inpdict [dictionary] Dictionary of parameters for tiling cosmological
coeval cubes to healpix lightcone cubes. It consists of the
following keys and values:
inpcube [numpy array] Cosmological cube in three dimensions
of comoving distance
inpres [scalar or tuple or list or numpy array] Input cube
pixel resolution (in comoving Mpc). If specified as
scalar, it is applied to all three dimensions.
Otherwise a three-element tuple, list or numpy array
must be specified one for each dimension
nside [scalar] HEALPIX nside parameter for output HEALPIX
map. If set theta_phi will be ignored.
theta_phi [numpy array] nsrc x 2 numpy array of theta and phi
(in degrees) at which the lightcone surface should
be evaluated. One and only one of nside or theta_phi
must be specified.
freq [scalar] Frequency (in Hz) to be processed. One and
only one of inputs freq or z (see below) must be set
in order to determined the redshift at which this
processing is to take place. Redshift is necessary
to determine the cosmology. If set to None, redshift
must be specified (see below)
redshift [scalar] Redshift to be processed. One and only one
of inputs freq (see above) or redshift must be
specified. If set to None, freq must be specified
(see above)
method [string] Method of interpolation from cube to
spherical surface pixels. Accepted values are
'nearest_rounded' (fastest but not accurate), and
those accepted by the input keyword method in
scipy.interpolate.interpn(), namely, 'linear' and
'nearest', and 'splinef2d'. 'splinef2d' is only
supported for 2-dimensional data. Default='linear'
rest_freq [scalar] Rest frame frequency (in Hz) to be used in
determination of redshift. Will be used only if
freq is set and redshift is set to None.
Default=1420405751.77 Hz (the rest frame frequency
of neutral Hydrogen spin flip transition)
cosmo [instance of class astropy.cosmology] Instance of
class astropy.cosmology to determine comoving
distance for a given redshift. By default (None) it
is set to WMAP9
Output:
Stacked lightcone surfaces covering spherical patch (whole sky using HEALPIX
if nside is specified) or just at specified theta and phi coordinates. It is
of shape npix
-----------------------------------------------------------------------------
"""
try:
inpdict
except NameError:
raise NameError('Input inpdict must be provided')
if not isinstance(inpdict, dict):
raise TypeError('Input inpdict must be a dictionary')
for key,val in inpdict.iteritems():
exec(key + '=val')
try:
inpcube, inpres
except NameError:
raise NameError('Inputs inpcube and inpres must be specified in inpdict')
try:
nside
except NameError:
nside = None
try:
theta_phi
except NameError:
theta_phi = None
try:
freq
except NameError:
freq = None
try:
redshift
except NameError:
redshift = None
try:
cosmo
except NameError:
cosmo = None
try:
method
except NameError:
method = 'linear'
try:
rest_freq
except NameError:
rest_freq = CNST.rest_freq_HI
return convert_coevalcube_to_sphere_surface(inpcube, inpres, nside=nside, theta_phi=theta_phi, freq=freq, redshift=redshift, method=method, rest_freq=rest_freq, cosmo=cosmo) | 5,326,963 |
def postmsg(message):
"""!Sends the message to the jlogfile logging stream at level INFO.
This is identical to:
@code
jlogger.info(message).
@endcode
@param message the message to log."""
return jlogger.info(message) | 5,326,964 |
def _linelabels(df, ax, x, y, label, colors=None, loc='end', label_width=100, **kwargs):
"""
Args:
df: DataFrame[x, y, label, ...]
ax: Current axes.
x: Column for x-axis.
y: Column for y-axis.
label: Column for label text.
colors: Lookup table { label: color }.
loc: Position of label.
'end': End of the individual line.
'right': Right margin of axes.
label_width: Maximum width of label (in px) used to prevent overlap.
"""
# Get layout style values from context.
ctx = sns.plotting_context()
fontsize = ctx.get('legend.fontsize')
# Calculate y-positions for labels: Data[x, y, label], with single y per x.
coords = df[[x, y, label]].groupby([label, x]).mean().reset_index()
# Need to call `get_xlim/get_ylim` to calculate ax.transLimits properly.
# Filter values to fit within x-limits so last value is calculated correctly.
_, xmax = ax.get_xlim()
_, _ = ax.get_ylim()
coords = coords.loc[coords[x] <= xmax]
coords = coords.sort_values([x]).groupby([label]).last().reset_index()
coords = coords.sort_values([y]).reset_index()
# Ensure x and y columns are floats to prevent integer truncation when
# converting to axes units.
coords = coords.astype({ x: 'float', y: 'float' })
# Convert (x, y) points from data to axes units.
text_width, text_height = _pt_to_axes_units(ax, label_width, fontsize)
for i, row in coords.iterrows():
ix, iy = _data_to_axes_units(ax, row[x], row[y])
coords.at[i, x] = _clamp(ix, 1 if loc == 'right' else 0)
coords.at[i, y] = _clamp(iy)
coords.at[i, 'height'] = len(row[label].split('\n')) * text_height
# Prevent overlap along y-axis.
coords = _nonoverlap(coords, x, y, 'height', width=text_width, dy=text_height / 8)
# Add text annotation at (x, y) point (in data units).
for i, row in coords.iterrows():
text_kwargs = dict(
# Offset text (in pts) from xy coord.
xytext=(4, 0),
textcoords='offset points',
# Style properties.
horizontalalignment='left',
verticalalignment='center',
linespacing=1,
fontsize=fontsize,
color=(colors and colors[row[label]]) or None,
)
text_kwargs.update(kwargs)
a = ax.annotate(
row[label],
(row[x], row[y]),
xycoords='axes fraction',
**text_kwargs,
) | 5,326,965 |
def test_managed_sub_account_withdraw_assets_without_asset():
"""Tests the API endpoint to withdraw asset from managed sub account without asset"""
params = {
"fromEmail": "alice@test.com",
"asset": "",
"amount": 1,
"transferDate": 1624023242,
"recvWindow": 1000,
}
client = Client(key, secret)
client.managed_sub_account_withdraw.when.called_with(**params).should.throw(
ParameterRequiredError
) | 5,326,966 |
def test_zjump_gt():
"""Tests for zjump values."""
os.chdir(os.path.dirname(__file__) + '/data')
proc = subprocess.Popen(['swc', 'find', 'pass_zjump.swc',
'-z', '19'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = proc.communicate()
assert proc.returncode == 0
assert stdout == '4 \n'
assert stderr == '' | 5,326,967 |
def dec_lap_pyr(x, levs):
""" constructs batch of 'levs' level laplacian pyramids from x
Inputs:
x -- BxCxHxW pytorch tensor
levs -- integer number of pyramid levels to construct
Outputs:
pyr -- a list of pytorch tensors, each representing a pyramid level,
pyr[0] contains the finest level, pyr[-1] the coarsest
"""
pyr = []
cur = x # Initialize approx. coefficients with original image
for i in range(levs):
# Construct and store detail coefficients from current approx. coefficients
h = cur.size(2)
w = cur.size(3)
x_small = F.interpolate(cur, (h // 2, w // 2), mode='bilinear')
x_back = F.interpolate(x_small, (h, w), mode='bilinear')
lap = cur - x_back
pyr.append(lap)
# Store new approx. coefficients
cur = x_small
pyr.append(cur)
return pyr | 5,326,968 |
def modify_hsbk_calculation():
"""
The way hsbk is calculated for canvas is close enough but not as accurate as the rest of Photons
because it doesn't need to be as accurate and the changes are optimised for performance
Unfortunately this means that comparisons in tests aren't so great, so let's modify how hsbk
is calculated to match!
"""
scaled_hue_transform = (
lambda _, v: int(0x10000 * (0 if v is sb.NotSpecified else float(v)) / 360) % 0x10000
)
scaled_to_65535_transform = lambda _, v: int(0xFFFF * (0 if v is sb.NotSpecified else float(v)))
p1 = mock.patch.object(scaled_hue, "_transform", scaled_hue_transform)
p2 = mock.patch.object(scaled_to_65535, "_transform", scaled_to_65535_transform)
with p1, p2:
yield | 5,326,969 |
def test_presplit():
"""Test stub."""
assert True | 5,326,970 |
def print_config(config, logger):
"""
Print configuration of the model
"""
for k, v in config.items():
logger.info("{}:\t{}".format(k.ljust(15), v)) | 5,326,971 |
def subinit2_readPPdb_checkkeys(PATHS, config, metadata):
"""
Reads the power plant database and determines the required input files (fundamentals and parameters):
1) Read the power plant database from disk
2) Read the database and check the required input files for:
- fuels
- efficiency curves
(could add more in the future)
This is done by:
2.1) Collect key requirements from PPdb (e.g. fuels, params, etc.)
2.2) Check if these keys are in metadata.ini
2.3) Use the corresponding instructions in metadata.ini to check if the required input files are in
./Inputs and resources
If 2.2 or 2.3 fails, raise appropriate exception.
"""
# ----------------------------------------------------------------------- 1) Read PPdb from disk
PPdb = pp.GenUnit.set_PPdb(PATHS['PP database'], readprms={key: config['power plant database'][key]
for key in config['power plant database']})
# ----------------------------------------------------------------------- 2.1) Collect the required keys
prmssh = PPdb['params']
# NOTE - When you have more metadata sections, this is where you add them
# note - dropna() here to allow WtEs to have no fuel key
# {metadata section : df[['in metadata', 'file found']]
checkKeys = {
'fuels': pd.DataFrame(index=pd.Index(prmssh['Fuel ID*'].dropna().unique())),
'efficiency curves': pd.DataFrame(index=pd.Index(prmssh['Efficiency Curve*'].dropna().unique())),
}
# df.index = required keys as in sheet (apply key.lower() to check in metadata)
# Prep for 2.3
extract_fname = lambda **args: args['filename'] # Use Python's kwargs parser :)
PATHS_tupkey = {key[1]: key for key in PATHS.keys() if isinstance(key, tuple)}
for mdsection, df in checkKeys.items():
# ------------------------------------------------------------ 2.2) Check if the keys are in metadata.ini
# logical series for filtering items that have to be checked further
df['in metadata'] = pd.Series(index=df.index,
data=(key.lower() in metadata[mdsection] for key in df.index))
sub_idx = df['in metadata'].loc[df['in metadata'] ].index
# ------------------------------------------------------ 2.3) Check if input files are in the project directory
# (only for keys found in metadata)
# 2.3.1) Build the check df's
df['file found'] = pd.Series(index=df.index)
for key in sub_idx:
mdkey = key.lower()
# a) Extract the filename
try:
fname = eval("extract_fname({})".format(metadata[mdsection][mdkey]))
except SyntaxError:
print("SyntaxError encountered while evaluating the metadata['{mdsection}']['{mdkey}'] instructions. "
"Pls. check that the following encoded argument in the metadata file is a valid expression to "
"pass to DataHandler.Metadata(): \n\n '{arg}'\n\n".format(
mdsection=mdsection, mdkey=mdkey, arg=metadata[mdsection][mdkey]))
raise
if fname is None:
raise NotImplementedError("This implies that dh.Metadata() will be called with values passed. Current "
"implementation only expects file reads.")
# b) Get the path
fp = os.path.join(PATHS[PATHS_tupkey.get(mdsection, mdsection)], fname)
# c) Check if exists and assign to series
df.loc[key, 'file found'] = os.path.exists(fp)
# ------------------------------------------------------ 2.3.2) Summarize the results
# Do this by looking for the failed keys
err_msg = "Error in checking the parameter and input keys in the power plant database: \n\n"
# a, b) Not in metadata, In metadata but file not found
Failed_metadata, Failed_file = {}, {}
for mdsection, df in checkKeys.items():
_md = tuple(key for key in df.index if not df.loc[key, 'in metadata'])
_file = tuple(key for key in df.index if not df.loc[key, 'file found'])
if _md: Failed_metadata[mdsection] = _md
if _file: Failed_file[mdsection] = _file
# c) Report
if Failed_metadata:
err_msg += "The ff. keys were not found in the metadata file: \n\n{}\n\n".format(
"\n".join("\t{}: {}".format(mdsection, ", ".join(keys)) for mdsection, keys in Failed_metadata.items()))
if Failed_file:
err_msg += "The ff. keys were not found in the appropriate project input directories: \n\n{}\n\n".format(
"\n".join("\t{}: {}".format(mdsection, ", ".join(keys)) for mdsection, keys in Failed_file.items()))
if Failed_metadata or Failed_file:
logging.debug("\n\n".join("\n{}\n{}".format(key.upper(), val) for key, val in checkKeys.items()))
raise RuntimeError(err_msg)
return PPdb | 5,326,972 |
def cal():
"""
正方形的内切圆,半径为1,面积比为pi/4,n个点落入正方形,则落在圆的概率为pi/4
"""
n = 10000000 # 随着n值的增大,精确性也会提高
r = 1.0
a, b = (0.0, 0.0)
x_neg, x_pos = a - r, a + r
y_neg, y_pos = b - r, b + r
count = 0
for i in range(0, n):
x = random.uniform(x_neg, x_pos)
y = random.uniform(y_neg, y_pos)
if x * x + y * y <= 1.0:
count += 1
print(count / float(n) * 4) | 5,326,973 |
def mif2amps(sh_mif_file, working_dir, dsi_studio_odf="odf8"):
"""Convert a MRTrix SH mif file to a NiBabel amplitudes image.
Parameters:
===========
sh_mif_file : str
path to the mif file with SH coefficients
"""
verts, _ = get_dsi_studio_ODF_geometry(dsi_studio_odf)
num_dirs, _ = verts.shape
hemisphere = num_dirs // 2
directions = verts[:hemisphere]
x, y, z = directions.T
_, theta, phi = cart2sphere(x, y, -z)
dirs_txt = op.join(working_dir, "directions.txt")
np.savetxt(dirs_txt, np.column_stack([phi, theta]))
odf_amplitudes_nii = op.join(working_dir, "amplitudes.nii")
popen_run(["sh2amp", "-quiet", "-nonnegative", sh_mif_file, dirs_txt, odf_amplitudes_nii])
if not op.exists(odf_amplitudes_nii):
raise FileNotFoundError("Unable to create %s", odf_amplitudes_nii)
amplitudes_img = nb.load(odf_amplitudes_nii)
return amplitudes_img, directions | 5,326,974 |
def write_nifti_header(hdrname, hdr, newfile=True):
#*************************************************
"""
filename is the name of the nifti header file.
hdr is a header dictionary. Contents of the native header
will be used if it is a nifti header.
Returns: 0 if no error, otherwise 1.
"""
if hdr.has_key('native_header'):
whdr = hdr['native_header']
if whdr.has_key('filetype'):
ftype = whdr['filetype']
else:
ftype = 'unknown'
else:
ftype = 'unknown'
Rout = hdr['R']
# Fix broken headers.
if hdr['mdim'] == 0:
hdr['mdim'] = 1
if hdr['tdim'] == 0:
hdr['tdim'] = 1
if hdr['zdim'] == 0:
hdr['zdim'] = 1
# Insert info for fieldmap correction if available.
modify_nifti_auxfile(hdr)
# Convert to quaternions.
if abs(Rout[:3,:3]).sum() > 0 and Rout[3,3] == 1.:
# This looks like a valid R matrix.
x = rot44_to_quatern(Rout)
else:
x = None
if isinstance(x, tuple):
qa, qb, qc, qd, qfac, qoffx, qoffy, qoffz = x
qform_code = whdr.get('qform_code',c.NIFTI_XFORM_SCANNER_ANAT)
qform_code = c.NIFTI_XFORM_SCANNER_ANAT
else:
# Conversion failed, use defaults.
qa, qb, qc, qd, qfac, qoffx, qoffy, qoffz = \
(0., 0., 0., 0., 1., 0., 0., 0.)
qform_code = c.NIFTI_XFORM_UNKNOWN
fmt = 'i10s18sihsB8hfffhhhh8ffffhcbffffii80s24shh6f4f4f4f16s4s'
lgth = struct.calcsize(fmt)
if hdr['swap']:
fmt = ">" + fmt
else:
fmt = "<" + fmt
if hdr['native_header'].has_key('ScanningSequence'):
if whdr['ScanningSequence'][0].strip() == 'EP':
slice_dim = NIFTI_SLICE_ALT_INC
else:
slice_dim = 0
if whdr['PhaseEncDir'] == 'ROW':
# dim_info = (slice_dim << 4) | (0x1 << 2) | 0x2
freq_dim = 2
phase_dim = 1
else:
# dim_info = (slice_dim << 4) | (0x2 << 2) | 0x1
freq_dim = 1
phase_dim = 2
else:
freq_dim = whdr.get('freq_dim', 0)
phase_dim = whdr.get('phase_dim', 0)
slice_dim = whdr.get('slice_dim', 0)
if not whdr.has_key('quatern_b'):
# Existing header not for a nifti file. Rewrite defaults.
whdr = {'sizeof_hdr':348, 'data_type':"", 'db_name':"", \
'extents':16384, \
'session_error':0, 'regular':"r", 'dim_info':"0", \
'dim':[1, 1, 1, 1, 1, 1, 1, 1], \
'intent_p1':0., 'intent_p2':0., 'intent_p3':0., 'intent_code':0, \
'bitpix':0, 'slice_start':0, \
'pixdim':[1., 0., 0., 0., 0., 0., 0., 0.], \
'vox_offset':0., 'scl_slope':0., 'scl_inter':0., 'slice_code':"", \
'xyzt_units':"", 'cal_max':0., 'cal_min':0., 'slice_duration':0., \
'toffset':0., 'glmax':0, 'glmin':0, 'descrip':"", \
'qform_code':qform_code, 'time_units':'msec', 'space_units':'mm', \
'misc_units':'', 'sform_code':'unknown', 'intent_name':"", \
'magic':"ni1"}
# Set orientation information.
whdr['quatern_b'] = qb
whdr['quatern_c'] = qc
whdr['quatern_d'] = qd
whdr['qoffset_x'] = qoffx
whdr['qoffset_y'] = qoffy
whdr['qoffset_z'] = qoffz
Rlpi = convert_R_to_lpi(hdr['R'], hdr['dims'], hdr['sizes'])
# Rlpi = hdr['R']
Rtmp = dot(Rlpi, diag([hdr['xsize'], hdr['ysize'], hdr['zsize'], 1.]))
whdr['srow_x'] = zeros(4, float)
whdr['srow_x'][:] = Rtmp[0, :]
whdr['srow_y'] = zeros(4, float)
whdr['srow_y'][:] = Rtmp[1, :]
whdr['srow_z'] = zeros(4, float)
whdr['srow_z'][:] = Rtmp[2, :]
# whdr['srow_x'][:3] *= hdr['xsize']
# whdr['srow_y'][:3] *= hdr['ysize']
# whdr['srow_z'][:3] *= hdr['zsize']
whdr['qfac'] = qfac
# Set undefined fields to zero. Spm puts garbage here.
whdr['glmin'] = 0
whdr['glmax'] = 0
whdr['sizeof_hdr'] = 348
whdr['descrip'] = hdr['native_header'].get('descrip','')
whdr['aux_file'] = hdr['native_header'].get('aux_file','')
if len(whdr['descrip']) > 79:
whdr['descrip'] = whdr['descrip'][:79]
whdr['dim'] = [hdr['ndim'], hdr['xdim'], hdr['ydim'], hdr['zdim'], \
hdr['tdim'], hdr['mdim'], 0, 0]
whdr['slice_end'] = hdr['zdim']-1
if hdr['sizes'][3] > 0.:
TR = hdr['sizes'][3]
else:
TR = hdr.get('TR',0.)
if TR == 0.:
TR = hdr['subhdr'].get('TR',0.)
whdr['pixdim'] = [hdr['ndim'], hdr['xsize'], hdr['ysize'], hdr['zsize'], \
TR, hdr['msize'], 0., 0.]
whdr['qoffset_x'] = qoffx
whdr['qoffset_y'] = qoffy
whdr['qoffset_z'] = qoffz
whdr['quatern_b'] = qb
whdr['quatern_c'] = qc
whdr['quatern_d'] = qd
whdr['qfac'] = float(qfac)
whdr['bitpix'] = datatype_to_lgth[hdr['datatype']]
whdr['datatype'] = nifti_type_to_datacode[hdr['datatype']]
whdr['dim_info'] = freq_dim | (phase_dim << 2) | (slice_dim << 4)
whdr['slice_code'] = nifti_slice_order_encode[ \
hdr['native_header'].get('SliceOrder', 'unknown')]
whdr['intent_code'] = nifti_intent_encode[whdr.get('intent_class', \
'unknown')]
whdr['qform_code'] = nifti_sqform_encode.get(qform_code, c.NIFTI_XFORM_UNKNOWN)
whdr['sform_code'] = nifti_sqform_encode[whdr.get('sform_code', 0)]
whdr['xyzt_units'] = nifti_units_encode[whdr.get('space_units', 'mm')] | \
nifti_units_encode[whdr.get('time_units', 'msec')] | \
nifti_units_encode[whdr.get('misc_units', '')]
if hdr['filetype'] == 'nii':
hdr['filetype'] = 'n+1'
whdr['magic'] = hdr['filetype']
if hdr['filetype'] == 'n+1':
vox_offset = 348
vox_offset = vox_offset + 4
else:
vox_offset = 0
extcode = whdr.get('extcode', '0000')
if extcode[0] != '0':
vox_offset = int(vox_offset) + 6 + len(whdr.get('edata',''))
whdr['vox_offset'] = vox_offset
binary_hdr = struct.pack(fmt, whdr['sizeof_hdr'], whdr['data_type'], \
whdr['db_name'], whdr['extents'], whdr['session_error'], whdr['regular'], \
whdr['dim_info'], whdr['dim'][0], whdr['dim'][1], whdr['dim'][2], \
whdr['dim'][3], whdr['dim'][4], whdr['dim'][5], whdr['dim'][6], \
whdr['dim'][7], whdr['intent_p1'], whdr['intent_p2'], whdr['intent_p3'], \
whdr['intent_code'], whdr['datatype'], whdr['bitpix'], \
whdr['slice_start'], whdr['qfac'], whdr['pixdim'][1], whdr['pixdim'][2], \
whdr['pixdim'][3], whdr['pixdim'][4], whdr['pixdim'][5], \
whdr['pixdim'][6], whdr['pixdim'][7], whdr['vox_offset'], \
hdr['scale_factor'], hdr['scale_offset'], whdr['slice_end'], \
whdr['slice_code'], whdr['xyzt_units'], whdr['cal_max'], whdr['cal_min'], \
whdr['slice_duration'], whdr['toffset'], whdr['glmax'], whdr['glmin'], \
whdr['descrip'], whdr['aux_file'], whdr['qform_code'], whdr['sform_code'], \
whdr['quatern_b'], whdr['quatern_c'], whdr['quatern_d'], \
whdr['qoffset_x'], whdr['qoffset_y'], whdr['qoffset_z'], \
whdr['srow_x'][0], whdr['srow_x'][1], whdr['srow_x'][2], whdr['srow_x'][3], \
whdr['srow_y'][0], whdr['srow_y'][1], whdr['srow_y'][2], whdr['srow_y'][3], \
whdr['srow_z'][0], whdr['srow_z'][1], whdr['srow_z'][2], whdr['srow_z'][3], \
whdr['intent_name'], whdr['magic'])
# try:
if True:
if newfile:
f = open(hdrname, 'w')
else:
f = open(hdrname, 'r+')
f.seek(0)
# except IOError:
# raise IOError(\
# "\nfile_io::write_nifti: Could not open %s\n\n"%hdrname)
try:
f.write(binary_hdr)
except IOError:
raise IOError(\
"\nfile_io::write_nifti: Could not write to %s\n\n"%hdrname)
if hdr['filetype'] == 'n+1':
ecodes = whdr.get('extcode', zeros(4,byte))
if isinstance(ecodes, list):
ecodes = array(ecodes)
if ecodes[0]:
# Extension is present.
exthdr = struct.pack('ccccii', ecodes[0], ecodes[1], \
ecodes[2], ecodes[3], whdr['esize'], \
nifti_ecode_encode[whdr['ecode']]) + whdr['edata']
else:
exthdr = fromstring(ecodes,byte)
# Write the extension header.
f.write(exthdr)
f.close()
return 0 | 5,326,975 |
def get_array_of_float(num, data):
"""Read array of floats
Parameters
----------
num : int
Number of values to be read (length of array)
data : str
4C binary data file
Returns
-------
str
Truncated 4C binary data file
list
List of floats
"""
length = 4
results = struct.unpack('f' * num, data[:num * length])
pos = num * length
new_data = data[pos:]
return new_data, list(results) | 5,326,976 |
def test_operations_in_a_time_round__assert_peer_number_normal(
scenario: Scenario, engine: Engine, performance: Performance
) -> None:
"""
This tests operations_in_a_time_round().
It asserts if the peer number is correct after a time round of operations.
"""
# Arrange.
single_run_instance: SingleRun = create_single_run_with_initial_peers(
scenario, engine, performance
)
# Act.
peer_arr_num = 37
peer_dept_num = 5
order_arr_num = 8
single_run_instance.operations_in_a_time_round(
peer_arr_num=peer_arr_num,
peer_dept_num=peer_dept_num,
order_arr_num=order_arr_num,
)
assert len(single_run_instance.peer_full_set) == scenario.init_size - 5 + 37 | 5,326,977 |
def AUcat(disk=None, first=1, last=1000, Aname=None, Aclass=None, Aseq=0,
giveList=False):
"""
Catalog listing of AIPS UV data files on disk disk
Strings use AIPS wild cards:
* blank => any
'?' => one of any character
"*" => arbitrary string
If giveList then return list of CNOs
* disk = AIPS disk number to list
* first = lowest slot number to list
* last = highest slot number to list
* Aname = desired AIPS name, using AIPS wildcards, None -> don't check
* Aclass = desired AIPS class, using AIPS wildcards, None -> don't check
* Aseq = desired AIPS sequence, 0=> any
* giveList = If true, return list of CNOs matching
"""
################################################################
global Adisk
if disk==None:
disk = Adisk
else:
Adisk = disk
# Get catalog
cat = AIPSData.AIPSCat(disk)
olist = AIPSDir.PListCat(cat.catalog, disk, type="UV", first=first, last=last,
Aname=Aname, Aclass=Aclass, Aseq=Aseq,
giveList=giveList)
OErr.printErrMsg(err, "Error with AIPS catalog")
return olist
# end AUcat | 5,326,978 |
def process_threat_results(matching_threats, context):
""" prepare response from threat results """
threats = [ThreatSerializer(threat).data for threat in matching_threats]
response_data = {
"id": context.id,
"hits": threats,
}
status_code = status.HTTP_200_OK
if context.pending_searches:
response_data["retry_secs"] = 60
status_code = status.HTTP_303_SEE_OTHER
return Response(response_data, status_code) | 5,326,979 |
def add_WMA(self, timeperiod=20, type="line", color="secondary", **kwargs):
"""Weighted Moving Average."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if "kind" in kwargs:
type = kwargs["kind"]
name = "WMA({})".format(str(timeperiod))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.WMA(self.df[self.cl].values, timeperiod) | 5,326,980 |
def main():
"""
Main entry point
"""
t_6hrPlev = DataRequest.objects.filter(
institution_id__name='MPI-M',
table_id = '6hrPlev',
cmor_name__in = ['wap4']
).exclude(
experiment_id__name='highresSST-present'
)
t_6hrPlevPt = DataRequest.objects.filter(
institution_id__name='MPI-M',
table_id = '6hrPlevPt',
cmor_name__in = ['hus7h', 'psl', 'ta', 'ua', 'uas', 'va', 'vas', 'zg7h']
).exclude(
experiment_id__name='highresSST-present'
)
t_Amon = DataRequest.objects.filter(
institution_id__name='MPI-M',
table_id = 'Amon',
cmor_name__in = ['cl', 'cli', 'clivi', 'clt', 'clw', 'clwvi',
'evspsbl', 'hfls', 'hfss', 'hur', 'hurs', 'hus',
'huss', 'pr', 'prc', 'prsn', 'prw', 'ps', 'psl',
'rlds', 'rldscs', 'rlus', 'rlut', 'rlutcs', 'rsds',
'rsdscs', 'rsdt', 'rsus', 'rsuscs', 'rsut', 'rsutcs',
'rtmt', 'sfcWind', 'ta', 'tas', 'tasmax', 'tasmin',
'tauu', 'tauv', 'ts', 'ua', 'uas', 'va', 'vas', 'wap',
'zg']
).exclude(
experiment_id__name='highresSST-present'
)
t_Eday = DataRequest.objects.filter(
institution_id__name='MPI-M',
table_id = 'Eday',
cmor_name__in = ['tauu', 'tauv']
).exclude(
experiment_id__name='highresSST-present'
)
t_LImon = DataRequest.objects.filter(
institution_id__name='MPI-M',
table_id = 'LImon',
cmor_name__in = ['snw']
).exclude(
experiment_id__name='highresSST-present'
)
t_Lmon = DataRequest.objects.filter(
institution_id__name='MPI-M',
table_id = 'Lmon',
cmor_name__in = ['mrso']
).exclude(
experiment_id__name='highresSST-present'
)
t_Prim6hr = DataRequest.objects.filter(
institution_id__name='MPI-M',
table_id = 'Prim6hr',
cmor_name__in = ['clt', 'hus4', 'pr', 'ps', 'rsds', 'ua4', 'va4']
).exclude(
experiment_id__name='highresSST-present'
)
t_PrimSIday = DataRequest.objects.filter(
institution_id__name='MPI-M',
table_id = 'PrimSIday',
cmor_name__in = ['siu', 'siv']
).exclude(
experiment_id__name='highresSST-present'
)
t_Primday = DataRequest.objects.filter(
institution_id__name='MPI-M',
table_id = 'Primday',
cmor_name__in = ['evspsbl', 'hus23', 'mrlsl', 'mrso', 'ta23', 'ts',
'ua23', 'va23', 'wap23', 'zg23']
).exclude(
experiment_id__name='highresSST-present'
)
t_PrimdayPt = DataRequest.objects.filter(
institution_id__name='MPI-M',
table_id = 'PrimdayPt',
cmor_name__in = ['ua', 'va']
).exclude(
experiment_id__name='highresSST-present'
)
t_SIday = DataRequest.objects.filter(
institution_id__name='MPI-M',
table_id = 'SIday',
cmor_name__in = ['sithick']
).exclude(
experiment_id__name='highresSST-present'
)
t_SImon = DataRequest.objects.filter(
institution_id__name='MPI-M',
table_id = 'SImon',
cmor_name__in = ['sidconcdyn', 'sidconcth', 'sidmassdyn', 'sidmassth',
'sifb', 'siflcondbot', 'siflcondtop', 'siflfwbot',
'sihc', 'simass', 'sisaltmass', 'sisnconc', 'sisnhc',
'sisnmass', 'sisnthick', 'sispeed', 'sistrxubot',
'sistryubot', 'sithick', 'sitimefrac', 'sivol',
'sndmassdyn', 'sndmasssnf']
).exclude(
experiment_id__name='highresSST-present'
)
t_day = DataRequest.objects.filter(
institution_id__name='MPI-M',
table_id = 'day',
cmor_name__in = ['clt', 'hfls', 'hfss', 'hur', 'hurs', 'hus', 'huss',
'pr', 'prc', 'prsn', 'psl', 'rlds', 'rlus', 'rlut',
'rsds', 'rsus', 'sfcWind', 'sfcWindmax', 'snw', 'ta',
'tas', 'tasmax', 'tasmin', 'ua', 'uas', 'va', 'vas',
'wap', 'zg']
).exclude(
experiment_id__name='highresSST-present'
)
data_reqs = (t_6hrPlev | t_6hrPlevPt | t_Amon | t_Eday | t_LImon | t_Lmon |
t_Prim6hr | t_PrimSIday | t_Primday | t_PrimdayPt | t_SIday |
t_SImon | t_day)
ext_var_cella = FileFix.objects.get(name='ExternalVariablesAreacella')
# This next line could be done more quickly by:
# further_info_url_fix.datarequest_set.add(*data_reqs)
# but sqlite3 gives an error of:
# django.db.utils.OperationalError: too many SQL variables
for data_req in data_reqs:
data_req.fixes.add(ext_var_cella)
logger.debug('FileFix {} added to {} data requests.'.
format(ext_var_cella.name, data_reqs.count())) | 5,326,981 |
def entropy_logits(logits):
"""
Computes the entropy of an unnormalized probability distribution.
"""
probs = F.softmax(logits, dim=-1)
return entropy(probs) | 5,326,982 |
def show_overview(gfile,dfile,xyd,xyref,xh,yh,ww,objname,save=False,container=None,
params={'figsize':(10,10),
'221':{'minmax':(5.,99.),'cmap':'viridis','s':30,'facecolor':'None','edgecolor':'red','fontsize':12,'title':'default'},
'222':{'minmax':(5.,99.),'cmap':'viridis','s':30,'facecolor':'None','edgecolor':'red','padxy':(50,50),'fontsize':12,'title':'default'},
'223':{'minmax':(5.,99.),'cmap':'viridis','s':30,'facecolor':'None','edgecolor':'red','color':'red','ls':'-','lw':4,'alpha':0.6,'fontsize':12,'title':'default'},
'224':{'minmax':(5.,99.),'cmap':'viridis','s':30,'facecolor':'None','edgecolor':'red','color':'red','ls':'-','lw':4,'alpha':0.6,'padxy':(50,50),'tickperx':50,'annotate_marker':'ro','annotate_color':'red','annotate_fontsize':8,'annotate_rotation':30.,'fontsize':12,'title':'default'},
}
):
"""
show_overview plots 2x2 of direct and grism images with zoom-in to the object location.
- gfile = (path to grism file, extension)
- dfile = (path to direct file, extension)
- xyd = (pixX,pixY) of the object in the direct image
- xyref = (xref,yref) of the object in the grism image
- xh,yh,ww = trace and wavelength map
- objname = object name which will be used for the plot title
- save = True if user wants an output file for the plot, given container
- params = customizable plot parameters
"""
pixx,pixy = xyd
xref,yref = xyref
xg,yg = xh+xref,yh+yref
plt.figure(figsize=params['figsize'])
# 221
ax1 = plt.subplot(2,2,1)
fontsize = params['221']['fontsize']
tmpdata = fits.open(dfile[0])[dfile[1]].data
m = np.isfinite(tmpdata)
vmin,vmax = np.percentile(tmpdata[m],params['221']['minmax'][0]),np.percentile(tmpdata[m],params['221']['minmax'][1])
ax1.imshow(tmpdata,origin='lower',cmap=params['221']['cmap'],vmin=vmin,vmax=vmax)
ax1.scatter(pixx,pixy,s=params['221']['s'],facecolor=params['221']['facecolor'],edgecolor=params['221']['edgecolor'])
if params['221']['title'] == 'default':
tmpheader = fits.open(dfile[0])[0].header
string = '{0} {1} {2} {3}'.format(objname,tmpheader['ROOTNAME'],tmpheader['DATE-OBS'],tmpheader['FILTER'])
string += '\nEXPSTART={0:.3f} EXPTIME={1:.3f}'.format(tmpheader['EXPSTART'],tmpheader['EXPTIME'])
else:
string = params['221']['title']
ax1.set_title(string,fontsize=fontsize)
# 222
ax2 = plt.subplot(2,2,2)
fontsize = params['222']['fontsize']
dx,dy = params['222']['padxy']
tmpdata = fits.open(dfile[0])[dfile[1]].data
m = np.isfinite(tmpdata)
vmin,vmax = np.percentile(tmpdata[m],params['222']['minmax'][0]),np.percentile(tmpdata[m],params['222']['minmax'][1])
ax2.imshow(tmpdata,origin='lower',cmap=params['222']['cmap'],vmin=vmin,vmax=vmax)
ax2.scatter(pixx,pixy,s=params['222']['s'],facecolor=params['222']['facecolor'],edgecolor=params['222']['edgecolor'])
ax2.set_xlim(pixx-dx,pixx+dx)
ax2.set_ylim(pixy-dy,pixy+dy)
if params['222']['title'] == 'default':
string = 'xyd = {0:.1f},{1:.1f}'.format(pixx,pixy)
else:
string = params['222']['title']
ax2.set_title(string,fontsize=fontsize)
# 223
ax3 = plt.subplot(2,2,3)
fontsize = params['223']['fontsize']
tmpdata = fits.open(gfile[0])[gfile[1]].data
m = np.isfinite(tmpdata)
vmin,vmax = np.percentile(tmpdata[m],params['223']['minmax'][0]),np.percentile(tmpdata[m],params['223']['minmax'][1])
ax3.imshow(tmpdata,origin='lower',cmap=params['223']['cmap'],vmin=vmin,vmax=vmax)
ax3.plot(xg,yg,color=params['223']['color'],ls=params['223']['ls'],lw=params['223']['lw'],alpha=params['223']['alpha'])
if params['223']['title'] == 'default':
tmpheader = fits.open(gfile[0])[0].header
string = '{0} {1} {2} {3}'.format(objname,tmpheader['ROOTNAME'],tmpheader['DATE-OBS'],tmpheader['FILTER'])
string += '\nEXPSTART={0:.3f} EXPTIME={1:.3f}'.format(tmpheader['EXPSTART'],tmpheader['EXPTIME'])
else:
string = params['223']['title']
ax3.set_title(string,fontsize=fontsize)
# 224
ax4 = plt.subplot(2,2,4)
fontsize = params['224']['fontsize']
tickperx = params['224']['tickperx']
dx,dy = params['224']['padxy']
annotate_marker = params['224']['annotate_marker']
annotate_color = params['224']['annotate_color']
annotate_fontsize = params['224']['annotate_fontsize']
annotate_rotation = params['224']['annotate_rotation']
tmpdata = fits.open(gfile[0])[gfile[1]].data
m = np.isfinite(tmpdata)
vmin,vmax = np.percentile(tmpdata[m],params['224']['minmax'][0]),np.percentile(tmpdata[m],params['224']['minmax'][1])
ax4.imshow(tmpdata,origin='lower',cmap=params['224']['cmap'],vmin=vmin,vmax=vmax)
ax4.plot(xg,yg,color=params['224']['color'],ls=params['224']['ls'],lw=params['224']['lw'],alpha=params['224']['alpha'])
for i,ii in enumerate(xg):
if (i in {0,len(xg)-1}) or (np.mod(i,tickperx)==0):
label = '{0}A'.format(int(ww[i]))
ax4.plot(xg[i],yg[i],annotate_marker)
ax4.annotate(label,(xg[i],yg[i]),
textcoords='offset points',
xytext=(0,10),
ha='center',
fontsize=annotate_fontsize,
rotation=annotate_rotation,
color=annotate_color
)
ax4.set_xlim(xg.min()-dx,xg.max()+dx)
ax4.set_ylim(yg.min()-dy,yg.max()+dy)
if params['224']['title'] == 'default':
string = 'xyref = {0:.1f},{1:.1f}'.format(xref,yref)
else:
string = params['224']['title']
ax4.set_title(string,fontsize=fontsize)
plt.tight_layout()
if save:
if container is None:
raise ValueEror('container must be specified to save.')
saveprefix = container.data['saveprefix']
savefolder = container.data['savefolder']
saveformat = container.data['plotformat']
string = './{2}/{0}_overview.{1}'.format(saveprefix,saveformat,savefolder)
plt.savefig(string,format=saveformat,bbox_inches='tight')
print('Save {0}\n'.format(string)) | 5,326,983 |
def convert_int_to_str(number: int, char: str = "'"):
"""Converts an ugly int into a beautiful and sweet str
Parameters:
nb: The number which is gonna be converted.
char: The characters which are gonna be inserted between every 3 digits.
Example: 2364735247 --> 2'364'735'247"""
number = str(number)
for index in range(len(number) - 3, 0, -3):
number = number[:index] + char + number[index:]
return number | 5,326,984 |
def exists(index, doc_type, id, **kwargs):
"""
Returns a boolean indicating whether or not given document exists in Elasticsearch.
http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html
"""
res = request("exists", None, index, doc_type, id, **kwargs)
jsonprint(res)
return res | 5,326,985 |
def deserialize_cookie(string):
"""Deserialize cookie"""
parts = string.split("#")
length = len(parts)
if length == 0 or length < 3:
return None
if not is_int(parts[2]):
return None
return create_internal_cookie(
unquote(parts[0]),
unquote(parts[1]),
parse_int(parts[2])
) | 5,326,986 |
def multivolume_record(self, key, value):
"""Mark record with many volumes inside."""
val_a = clean_val("a", value, str)
_migration = self["_migration"]
if val_a == "MULTIVOLUMES1":
parsed = False
elif val_a == "MULTIVOLUMESX" or val_a == "MULTIVOLUMESx":
parsed = True
elif val_a == "MULTIVOLUMES-MANUAL":
raise Exception("This record should not be migrated!")
else:
raise UnexpectedValue(
subfield="a", message=" unrecognized migration multipart tag",
)
_migration["multivolume_record"] = parsed
raise IgnoreKey("multivolume_record") | 5,326,987 |
def choose(n, k):
"""This is a binomial coeficient nCk used in binomial probablilty
this funtion uses factorial()
Usage: choose(n, k)
args:
n = total number
k = total number of sub-groups """
try:
return factorial(n)/(factorial(k) * factorial(n - k))
except(ValueError, ZeroDivisionError, TypeError):
print("""This is a binomial coeficient nCk used in binomial probablilty
this funtion uses factorial()
Usage: choose(n, k)
args:
n = total number
k = total number of sub-groups """) | 5,326,988 |
def BiRNN(x, seq_lens):
"""TODO: full docstring; seq_lens is np_array of actual input seq lens.
Actually seq_lens is a tf.placeholder"""
# data input shape: (batch_size, seq_lens, n_input)
# Define lstm cells with tensorflow
# Forward direction cell
lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Backward direction cell
lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=lstm_fw_cell,
cell_bw=lstm_bw_cell,
inputs=x,
sequence_length=seq_lens,
dtype=tf.float32)
# concatenate the forward and backward layer outputs
recurrent_layer_output = tf.concat([output_fw, output_bw], 2)
output = tf.layers.dense(
inputs=recurrent_layer_output,
units=n_classes)
return output | 5,326,989 |
def get_os_group(name: _STR_OR_INT_OR_NONE = None) -> grp.struct_group:
"""Get an operating system group object.
Args:
name (:obj:`str` or :obj:`int`, optional): The "group name" or ``gid``.
Defaults to the current users's group.
Raises:
OSError: If the given ``name`` does not exist as a "group
name" for this operating system.
OSError: If the given ``name`` is a ``gid`` and it does not
exist.
:rtype:
:obj:`struct_group <grp>`
* A tuple like object.
Example:
>>> from flutils.pathutils import get_os_group
>>> get_os_group('bar')
grp.struct_group(gr_name='bar', gr_passwd='*', gr_gid=2001,
gr_mem=['foo'])
"""
if name is None:
name = get_os_user().pw_gid
name = cast(int, name)
if isinstance(name, int):
try:
return grp.getgrgid(name)
except KeyError:
raise OSError(
'The given gid: %r, is not a valid gid for this operating '
'system.' % name
)
try:
return grp.getgrnam(name)
except KeyError:
raise OSError(
'The given name: %r, is not a valid "group name" '
'for this operating system.' % name
) | 5,326,990 |
def _process_output(response, context):
"""Post-process TensorFlow Serving output before it is returned to the client.
Args:
response (obj): the TensorFlow serving response
context (Context): an object containing request and configuration details
Returns:
(bytes, string): data to return to client, response content type
"""
if response.status_code != 200:
_return_error(response.status_code, response.content.decode('utf-8'))
response_content_type = context.accept_header
print("response.json():", response.json())
# remove whitespace from output JSON string
prediction = response.content.decode('utf-8').translate(dict.fromkeys(map(ord,whitespace)))
return prediction, response_content_type | 5,326,991 |
def auto_declare_serializers(models_module, context):
""" Automatically declares classes from serializers
:param models_module: Passes the module to search model classes.
:param context: Context module to export classes, should passes locals().
"""
for model in models_module.__dict__.values():
if not inspect.isclass(model) \
or not issubclass(model, models.Model) \
or model._meta.abstract \
or not model._meta.managed \
or model._meta.app_label != context['__package__']:
continue
serializer_name = model.__name__ + 'Serializer'
# Do not override
if serializer_name in context:
continue
# Derive subclass of serializers.Serializer
serializer = type(
serializer_name,
(serializers.ModelSerializer,),
dict(
Meta=type('Meta', (object,), dict(model=model, fields='__all__')),
)
)
logger.debug(f'>>> Automatically declared <class \'{serializer.__name__}\'>')
context[serializer.__name__] = serializer | 5,326,992 |
def execute_dscl(option="-plist", datasource=".", command="-read", parameters=""):
"""Execute dscl and return the values
Args:
option (str, optional): The option to use. Defaults to "-plist".
datasource (str, optional): The node to query. Defaults to ".".
command (str, optional): The dscl command to run. Defaults to "-read".
parameters (str, optional): Parameters that will be passed to the command option. Defaults to "".
Returns:
dict: A dict of the results from dscl
"""
results = execute_process(f"/usr/bin/dscl {option} {datasource} {command} {parameters}")
# Verify command result
if not results['success']:
print("Failed to admin group membership!")
print(results['stderr'])
sys.exit(2)
return plistlib.loads(results['stdout'].encode()) | 5,326,993 |
def main():
""" We train a model and then use it to predict on the specified videos
"""
try:
# This process periodically uploads the stdout and stderr files
# to the S3 bucket. The website uses these to display stdout and stderr
pid = os.getpid()
upload_process = upload_stdout.start_uploading(pid)
# Get training hyperparameters, insert new entries for new model in
# users and model_versions tables
model, model_params = get_model_and_params()
user_model, new_version = get_user_model(model_params)
model_user_id = create_model_user(new_version, model_params, user_model)
# This removes all of the [INFO] outputs from tensorflow.
# We still see [WARNING] and [ERROR], but there's a lot less clutter
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
# If set, training will sometimes be unable to save the model
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
concepts = model["concepts"]
verify_videos = model["verificationvideos"]
# If error occurs during training, remove entries for model in users
# and model_versions tables
try:
start_training(user_model, concepts, verify_videos, model_params)
except Exception as e:
delete_model_user(model_user_id)
raise e
setup_predict_progress(verify_videos)
evaluate_videos(concepts, verify_videos, user_model)
finally:
# Cleanup training hyperparameters and shut server down regardless
# whether this process succeeded
reset_model_params()
shutdown_server() | 5,326,994 |
def extract_use_action_msgs(outfile, use_action, it_name, kwargs):
"""Extract messages for iuse_actor objects. """
for f in sorted(use_action_msgs):
if type(use_action) is dict and f in use_action:
if it_name:
use_action[f] = writestr(use_action[f], **kwargs)
# Recursively check sub objects as they may contain more messages.
if type(use_action) is list:
for i in use_action:
extract_use_action_msgs(outfile, i, it_name, kwargs)
elif type(use_action) is dict:
for (k, v) in sorted(use_action.items(), key=lambda x: x[0]):
extract_use_action_msgs(outfile, v, it_name, kwargs) | 5,326,995 |
def make_soup(text: str, mode: str="url", parser: str=PARSER) -> BeautifulSoup:
""" Returns a soup. """
if mode == "url" or isinstance(mode, dict):
params = mode if isinstance(mode, dict) else {}
text = requests.get(text, params=params).text
elif mode == "file":
text = open(text)
return BeautifulSoup(text, parser) | 5,326,996 |
def _merge_inner_function(
class_def, infer_type, intermediate_repr, merge_inner_function
):
"""
Merge the inner function if found within the class, with the class IR
:param class_def: Class AST
:type class_def: ```ClassDef```
:param infer_type: Whether to try inferring the typ (from the default)
:type infer_type: ```bool```
:param intermediate_repr: a dictionary of form
{ "name": Optional[str],
"type": Optional[str],
"doc": Optional[str],
"params": OrderedDict[str, {'typ': str, 'doc': Optional[str], 'default': Any}]
"returns": Optional[OrderedDict[Literal['return_type'],
{'typ': str, 'doc': Optional[str], 'default': Any}),)]] }
:type intermediate_repr: ```dict```
:param merge_inner_function: Name of inner function to merge. If None, merge nothing.
:type merge_inner_function: ```Optional[str]```
:returns: a dictionary of form
{ "name": Optional[str],
"type": Optional[str],
"doc": Optional[str],
"params": OrderedDict[str, {'typ': str, 'doc': Optional[str], 'default': Any}]
"returns": Optional[OrderedDict[Literal['return_type'],
{'typ': str, 'doc': Optional[str], 'default': Any}),)]] }
:rtype: ```dict```
"""
function_def = next(
filter(
lambda func: func.name == merge_inner_function,
filter(rpartial(isinstance, FunctionDef), ast.walk(class_def)),
),
None,
)
if function_def is not None:
function_type = (
"static" if not function_def.args.args else function_def.args.args[0].arg
)
inner_ir = function(
function_def,
function_name=merge_inner_function,
function_type=function_type,
infer_type=infer_type,
)
ir_merge(other=inner_ir, target=intermediate_repr)
return intermediate_repr | 5,326,997 |
def dump_single_value(path_and_file, value, append=False):
"""
Open a file in the binary mode, dump a single value and close file
>>> dump_single_value("test_file_single.tmp", "test_value")
:param path_and_file: path to file
:param value: value to dump
:param append: True to open file in "ab" mode, False to open in "rb" mode.
By default: False (rb)
:return: None
"""
with open(path_and_file, mode='ab' if append else 'wb') as f:
pickle.dump(value, f, protocol=pickle.HIGHEST_PROTOCOL) | 5,326,998 |
def fnmatch_fnmatch():
"""判断文件是否匹配"""
pattern = "*.PY"
print("Pattern: ", pattern)
print()
files = os.listdir(".")
for name in files:
print("Filename: {:<25} {}".format(name, fnmatch.fnmatch(name, pattern)))
"""
Pattern: *.py
Filename: a False
Filename: dir False
Filename: fnmatch_lib.py True
Filename: global_lib.py True
Filename: os_lib.py True
Filename: pathlib_lib.py True
Filename: testfile False
Filename: test_file False
Filename: test_for_chmod.txt False
Filename: test_for_rglobal False
Filename: unit_test.py True
""" | 5,326,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.