content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def show_nicks(handles):
""" return terminal sequence for /users result. """
from x84.bbs import getterminal
term = getterminal()
return u''.join((
time.strftime('%H:%M'), u' ',
term.blue('-'), u'!', term.blue('-'),
u' ', term.bold_cyan('%d' % (len(handles))), u' ',
u'user%s: ' % (u's' if len(handles) > 1 else u''),
u', '.join(handles) + u'\n',))
| 5,337,400
|
def cvCmp(*args):
"""cvCmp(CvArr src1, CvArr src2, CvArr dst, int cmp_op)"""
return _cv.cvCmp(*args)
| 5,337,401
|
def convert_to_dtype(data, dtype):
"""
A utility function converting xarray, pandas, or NumPy data to a given dtype.
Parameters
----------
data: xarray.Dataset, xarray.DataArray, pandas.Series, pandas.DataFrame,
or numpy.ndarray
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
"""
if dtype is None: # Don't convert the data type.
return data
return data.astype(dtype)
| 5,337,402
|
def process_files(data_path, output_path):
"""Returns a pipeline which rebalances data shards.
Args:
data_path: File(s) to read.
output_path: Path to which output CSVs are written, if necessary.
"""
def csv_pipeline(root):
_ = (
root
| beam.io.ReadFromText(data_path)
| beam.io.WriteToText(output_path,
num_shards=FLAGS.num_output_files))
def tfrecord_pipeline(root):
"""Pipeline instantiation function.
Args:
root: Source pipeline from which to extend.
"""
example_coder = beam.coders.ProtoCoder(tf.train.Example)
_ = (
root
| beam.io.ReadFromTFRecord(data_path, coder=example_coder)
| beam.io.WriteToTFRecord(output_path, file_name_suffix="tfrecord",
coder=example_coder,
num_shards=FLAGS.num_output_files))
pipeline = tfrecord_pipeline if FLAGS.filetype == "tfrecord" else csv_pipeline
return pipeline
| 5,337,403
|
def delete_tasklog_cached(dc_id, user_id=None):
"""
Remove tasklog cache entry.
"""
if user_id:
key = _cache_log_key(user_id, dc_id)
else:
key = _cache_log_key(settings.TASK_LOG_STAFF_ID, dc_id)
return cache.delete(key)
| 5,337,404
|
def strip_headers(data):
""" Strips headers from data #depreciate"""
try:
return data['items']
except (TypeError, KeyError) as e:
print(e)
return data
| 5,337,405
|
def sns_plot(chart_type: str, df):
""" return seaborn plots """
fig, ax = plt.subplots()
if chart_type == "Scatter":
with st.echo():
sns.scatterplot(
data=df,
x="bill_depth_mm",
y="bill_length_mm",
hue="species",
)
plt.title("Bill Depth by Bill Length")
elif chart_type == "Histogram":
with st.echo():
sns.histplot(data=df, x="bill_depth_mm")
plt.title("Count of Bill Depth Observations")
elif chart_type == "Bar":
with st.echo():
sns.barplot(data=df, x="species", y="bill_depth_mm")
plt.title("Mean Bill Depth by Species")
elif chart_type == "Boxplot":
with st.echo():
sns.boxplot(data=df)
plt.title("Bill Depth Observations")
elif chart_type == "Line":
with st.echo():
sns.lineplot(data=df, x=df.index, y="bill_length_mm")
plt.title("Bill Length Over Time")
elif chart_type == "3D Scatter":
st.write("Seaborn doesn't do 3D ☹️. Here's 2D.")
sns.scatterplot(data=df, x="bill_depth_mm", y="bill_length_mm", hue="island")
plt.title("Just a 2D Scatterplot")
return fig
| 5,337,406
|
def decode_fields(source_str: str, resp_type):
""" This is the lower level decode of fields, no automatic guess of type is performed."""
field_decoding = FIELD_MAPPING[resp_type]
unpacked_fields = {}
for field_name, field_type, field_subtype in field_decoding:
search_term = f"{field_name}:".encode()
field_location = source_str.find(search_term)
assert field_location >= 0
# Attempt to extract the value
field_value_start = field_location + len(search_term)
if field_type is list:
# Handle as a list
field_value_end = source_str.find(b']', field_value_start)
assert field_value_end > field_value_start
list_str = source_str[field_value_start + 1:field_value_end].strip()
if len(list_str) == 0:
field_list = []
else:
if field_subtype is int:
list_base = 16 if b'x' in list_str else 10
field_list = [int(x,list_base) for x in list_str.split(b',')]
elif field_subtype is str:
field_list = [x.replace(b"'", b"").replace(b'"',b"").decode() for x in list_str.split(b',')]
unpacked_fields[field_name] = field_list
else:
# Handle as a single value
field_value_end = source_str.find(b',', field_value_start)
assert field_value_end > field_value_start
if field_type is not bool:
field_value = field_type(source_str[field_value_start:field_value_end])
else:
field_value = source_str[field_value_start:field_value_end] == b'1'
unpacked_fields[field_name] = field_value
return unpacked_fields
| 5,337,407
|
def make_egg(a=-1.25, b=7):
"""
Return x, y points that resemble an egg.
Egg equation is:
r = cos(2θ) + a * cos(θ) + b
@param a: Number.
@param b: Number.
"""
theta = np.linspace(0, 2 * np.pi, 100)
r = np.cos(2 * theta) + a * np.cos(theta) + b
y = r * np.cos(theta)
x = r * np.sin(theta)
return np.array([x, y]).T.tolist()
| 5,337,408
|
def test_styling_object_which_implements_str_proto():
"""
Test styling an object which implements the str protocol
"""
class Dummy(object):
def __str__(self):
return 'I am a dummy object'
colorful = core.Colorful(colormode=terminal.ANSI_8_COLORS)
assert str(colorful.black(Dummy())) == '\033[30mI am a dummy object\033[39m'
| 5,337,409
|
def determine_auto_approval(financial_aid, tier_program):
"""
Takes income and country code and returns a boolean if auto-approved. Logs an error if the country of
financial_aid does not exist in CountryIncomeThreshold.
Args:
financial_aid (FinancialAid): the financial aid object to determine auto-approval
tier_program (TierProgram): the TierProgram for the user's income level
Returns:
boolean: True if auto-approved, False if not
"""
try:
country_income_threshold = CountryIncomeThreshold.objects.get(country_code=financial_aid.country_of_income)
income_threshold = country_income_threshold.income_threshold
except CountryIncomeThreshold.DoesNotExist:
log.error(
"Country code %s does not exist in CountryIncomeThreshold for financial aid id %s",
financial_aid.country_of_income,
financial_aid.id
)
income_threshold = DEFAULT_INCOME_THRESHOLD
if tier_program.discount_amount == 0:
# There is no discount so no reason to go through the financial aid workflow
return True
elif income_threshold == 0:
# There is no income which we need to check the financial aid application
return True
else:
return financial_aid.income_usd > income_threshold
| 5,337,410
|
def crustal_model_files(alt = [200, 1000], anomaly = 'Global', lim = [0., 360. -90., 90.], binsize = 0.1):
""""
Reads the .bin IDL files of the crustal magnetic field model (Langlais) for a range of altitudes and creates a function based on a linear interpolation.
Parameters:
alt: 2-elements array, optional
The array containing the altitude range. Default is [200, 1000] km.
anomaly: string, optional
The anomaly index, e. g., A1, A2, A6, etc. This string is used to find the directory where the model matrices are located. Default is 'Global'.
lim: 4-elements array, optional
An array cointaining the limits for latitude and longitude data, in which: [lon_min, lon_max, lat_min, lat_max]. Default is the whole range of Mars.
binsize: double, optional
The size of the lon and lat bins (must be the same size). Default is 0.1 degrees.
Returns:
A function and a matrix containing the data.
"""
longitude = np.linspace(lim[0], lim[1], int((lim[1] - lim[0]) / binsize + 1))
latitude = np.linspace(lim[2], lim[3], int((lim[3] - lim[2]) / binsize + 1))
altitude = np.linspace(alt[0], alt[1], int(alt[1] - alt[0] + 1))
br = np.empty((len(longitude), len(latitude), len(altitude)))
for i in range(len(altitude)):
h = int(i + alt[0])
data = sp.io.readsav('/home/oliveira/ccati_mexuser/LANGLAIS_Matrices/'+anomaly+'/LANGLAIS_BR_ALT_' + \
str(h) + '_RES_01.bin')
br[:, :, i] = data['zbins'].T
fn = rgi((longitude, latitude, altitude), br)
return fn, br
| 5,337,411
|
def write_gain_expressions_for_table():
"""Write expressions for google tables into file"""
seism_NRL = load_seism_NRL()
digi_NRL, _ = load_digi_NRL()
nrl = NRL()
for v in digi_NRL.values():
if v:
v[-1] = v[-1].format(sr=100)
# get sensitivities
digi_sens = {k: nrl.get_datalogger_response(v).instrument_sensitivity.value for k, v in digi_NRL.items() if v}
seism_sens = {k: nrl.get_sensor_response(v).instrument_sensitivity.value for k, v in seism_NRL.items()}
expr = ('expression for digi_NRL:\n=SWITCH(A2, {}, "")\n\n'
'expression for seism_NRL:\n=SWITCH(A2, {}, "")')
ins1 = ', '.join('"{}", {}'.format(k, v) for k, v in digi_sens.items())
ins2 = ', '.join('"{}", {}'.format(k, v) for k, v in seism_sens.items())
with open(OUT + 'expressions_for_table.txt', 'w') as f:
f.write(expr.format(ins1, ins2))
| 5,337,412
|
def f1(y_true, y_pred):
"""
Function for computing the unweighted f1 score using tensors. The Function
handles only the binary case and compute the unweighted f1 score
for the positive class only.
Args:
- y_true: keras tensor, ground truth labels
- y_pred: keras tensord, labels estimated by the model
Returns:
- f1: float, unweighted f1 score for the positive class
"""
precision_v = precision(y_true, y_pred)
recall_v = recall(y_true, y_pred)
nominator = 2 * (precision_v * recall_v)
denominator = (precision_v + recall_v + K.epsilon())
f1 = nominator / denominator
return f1
| 5,337,413
|
def get_selector_score(key, selector, use_median, best_based_on_final):
"""
:param key: Thing to measure (e.g. Average Returns, Loss, etc.)
:param selector: Selector instance
:param use_median: Use the median? Else use the mean
:param best_based_on_final: Only look at the final value? Else use all
values.
:return: A single number that gives the score of `key` inside `selector`
"""
data = selector.extract()
if best_based_on_final:
values = [
exp.progress.get(key, np.array([np.nan]))[-1]
for exp in data
]
else:
values = np.concatenate([
exp.progress.get(key, np.array([np.nan]))
for exp in data
] or [[np.nan]])
if len(values) == 0 or not np.isfinite(values).all():
return np.nan
if use_median:
return np.nanpercentile(values, q=50, axis=0)
else:
return np.nanmean(values)
| 5,337,414
|
def str_to_timezone(tz):
"""
从字符串构建时区
"""
return pytz.timezone(tz) if tz else pytz.utc
| 5,337,415
|
def _get_previous_index_search_col(
m, col, nested_list, trans_function=None, transformation=False
):
"""Return previous index of a a key, from a sorted nested list where a key is being seached in the col number.Returns -1 if value is not found.
Args:
m (comparable): comparable being searched
col (int): Column number to be searched.
nested_list (list): Nested List with the values being searched. Ex [[0,1,2,2][0,1,2,2]] First inner list represents a row of attributes of an instance.
trans_function (func): Function to transform the comparison value of the column.
transformation (boolean): If true uses a tranformation in the column value before comparison of the values.
Returns:
int: Index of the value being searched.
"""
ix = _search_previous_col(m, col, nested_list, trans_function, transformation)
assert ix != -1, f"Previous keyword to {m} was not found."
return ix
| 5,337,416
|
def guest_import(hypervisor, host):
"""
Import a new guest
::
POST /:hypervisor/:host/guests
"""
response.content_type = "application/json"
manager = create_manager(hypervisor, host)
guest = manager.guest_import(
request.environ['wsgi.input'],
request.content_length
)
location = "/%s/%s/guests/%s" % (hypervisor, host, guest["id"])
response.set_header("Location", location)
manager.logout()
return json.dumps(guest)
| 5,337,417
|
def get_doc_count(
group_by: List[str] = ["year", "country"],
sort_by: List[metadata.SortOn] = [
metadata.SortOn(field="year", order=metadata.SortOrder.desc),
metadata.SortOn(field="count", order=metadata.SortOrder.desc)],
limit: int = 10):
"""This endpoint provides a generic interface to get the count of documents given an arbitrary set of `group_by` fields. The return value can be sorted based on the `sort_by` fields input. The number of returned groups is limited by the `limit` parameter.
"""
assert len(set(so.field
for so in sort_by).difference(group_by + ['count'])) == 0
group_id = {b: f"${b}" for b in group_by}
sort_by = SON(
[(so.field, -1 if so.order == metadata.SortOrder.desc else 1) for so in sort_by])
projection = {b: f"$_id.{b}" for b in group_by}
projection["count"] = "$count"
projection["_id"] = 0
# Identify fields that needs unwinding, if any
list_fields = set(["adm_region", "author", "country", "der_acronyms", "doc_type",
"geo_region", "major_doc_type", "topics_src", "wb_subtopic_src"])
unwind_fields = [{"$unwind": f"${b}"}
for b in list_fields.intersection(group_by)]
pipeline = []
if unwind_fields:
pipeline = unwind_fields
pipeline.extend([
{"$group": {"_id": group_id, "count": {"$sum": 1}}},
{"$project": projection},
{"$sort": sort_by},
{"$limit": limit},
])
agg = mongodb.get_docs_metadata_collection().aggregate(
pipeline
)
values = [{"rank": ix, **result} for ix, result in enumerate(agg, 1)]
return values
| 5,337,418
|
def get_author_list(text):
"""function to extract authors from some text that will also include
associations
example input:
`J. C. Jan†, F. Y. Lin, Y. L. Chu, C. Y. Kuo, C. C. Chang, J. C. Huang and C. S. Hwang,
National Synchrotron Radiation Research Center, Hsinchu, Taiwan, R.O.C`
or
`M.B. Behtouei, M. Migliorati, L. Palumbo, B. Spataro, L. Faillace`
assumptions:
- if you split by ', ' and the second character of a token is a '.' period
then its probably a valid token (an author) but this is not guaranteed
(see above example that ends in 'R.O.C')
- There can be multiple initials as evidenced above.
- Initials may not necessarily be split by a space.
watch out for:
- hypenated names: 'B. Walasek-Hoehne'
- hyphenated initials: 'E. J-M. Voutier' 'J.-L. Vay'
- multiple surnames: 'M.J. de Loos' 'S.B. van der Geer' 'A. Martinez de la Ossa' 'N. Blaskovic Kraljevic' 'G. Guillermo Cant�n' 'C. Boscolo Meneguolo'
- surname with apostrophes: 'G. D'Alessandro'
- extra stuff tacked on: 'S.X. Zheng [on leave]' 'G.R. Li [on leave]' (from the csv file)
- one rare instance of non-period separated initials: 'Ph. Richerot (from csv file)
my pattern of a name which should match vast majority of names while not matching vast majority of non-names:
single letter, followed by a period, potentially followed by a space but
not always, repeated n times, and ending in a word of more than one character
which may contain hyphens, apostrophes, repeated n times, and finally
finishing with a comma
word character followed by dot and potentially space, repeated n times
then
word character repeated n times
/(\\w\\.\\ ?)+(\\w+\\ ?)+/g (note this comment had to double up the escape backslashes)
(https://regexr.com/)
"""
newline_fixed_text = text
for newline_char in LINE_TERMINATOR_CHARS:
newline_fixed_text = newline_fixed_text.replace(newline_char, ', ')
potential_authors = newline_fixed_text.replace(NON_BREAKING_SPACE, ' ').replace(' and ', ', ').split(', ')
filtered_authors = list()
my_name_pattern = re.compile("(-?\\w\\.\\ ?)+([\\w]{2,}\\ ?)+")
# the allowance of an optional hyphen preceding an initial is to satisfy a
# common pattern observed with the papers coming out of asia.
for author in potential_authors:
if my_name_pattern.match(author): # match has an implied ^ at the start
# which is ok for our purposes.
filtered_authors.append(author)
return filtered_authors
| 5,337,419
|
def start_transfer(protocol, chunk_size, ip, port, file, stop_and_wait):
""" Benchmark client """
client_class = cls_client[protocol]
client = client_class(ip=ip, port=port, stop_and_wait=stop_and_wait)
print('[CLIENT] Started client {}, chunk size {} for server {}:{} ... '.format(protocol, chunk_size, ip, port))
start_time = time.time()
chunk_size, sent_chunks_count, chunks_count, sent_size = client.send_file(file, chunk_size)
end_time = time.time()
delta_time = float(end_time) - start_time
print('Out: PROTOCOL, CHUNK_SIZE, SENT_CHUNKS, TOTAL_CHUNKS, SENT_BYTES, TIME')
print('Out: {}, {}, {}, {}, {}, {}\n'.format(
protocol,
chunk_size,
sent_chunks_count,
chunks_count,
sent_size,
delta_time,
))
| 5,337,420
|
def find_matching_format_function(word_with_formatting, format_functions):
""" Finds the formatter function from a list of formatter functions which transforms a word into itself.
Returns an identity function if none exists """
for formatter in format_functions:
formatted_word = formatter(word_with_formatting)
if word_with_formatting == formatted_word:
return formatter
return lambda word: word
| 5,337,421
|
def mean (inlist:List(float))->float:
"""
Returns the arithematic mean of the values in the passed list.
Assumes a '1D' list, but will function on the 1st dim of an array(!).
Usage: lmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + item
return sum/float(len(inlist))
| 5,337,422
|
def save_model(model_file_name, model):
"""
save_model(model_file_name, model) -> None
Save a LIBLINEAR model to the file model_file_name.
"""
liblinear.save_model(model_file_name.encode(), model)
| 5,337,423
|
def print_files(files_from_disk, comparison_info):
"""Print both lists of files.
"""
print("Files in both database and on disk:\n")
for fn in list(files_from_disk.keys()):
if fn not in comparison_info['dbonly'] and fn not in comparison_info['diskonly']:
print(" %s" % (fn))
print("\n")
print(" Files only found in database:\n")
if len(comparison_info['dbonly']) == 0:
print("None\n\n")
else:
for fn in comparison_info['dbonly']:
print(" %s" % (fn))
print("\n")
print(" Files only found on disk:\n")
if len(comparison_info['diskonly']) == 0:
print("None\n\n")
else:
for fn in comparison_info['diskonly']:
print(" %s" % (fn))
print("\n")
| 5,337,424
|
def create_file_download_url(file_path: str) -> str:
"""
Creates Telegram URL for downloading of file.
- contains secret information (bot token)!
:param file_path: `file_path` property of `File` object.
"""
token = environ["TELEGRAM_API_BOT_TOKEN"]
return create_url(
"https://api.telegram.org/file",
f"bot{token}",
file_path
)
| 5,337,425
|
def event_post():
"""this sample is to post not only one event"""
event_post_request = EventPostRequest.builder().set_event_identifier('Error') \
.add_value("power", 124) \
.add_values(SampleHelper.EVENTS_VALUE) \
.build()
event_post_response = client.publish(event_post_request)
if event_post_response:
print('event_post_response: %s' % event_post_response.get_code())
| 5,337,426
|
def convert_to_csv(items):
"""
Args:
items: all arns in a region from the DynamoDB query as a list
returns:
csv_body: body of the csv file to write out
"""
fieldnames = ["Package", "Package Version", "Status", "Expiry Date", "Arn"]
# sort by package, and then created date (oldest to newest)
sorted_items = sorted(items, key=lambda i: (i["pckg"].lower(), i["crtdDt"]))
with open("/tmp/packages.csv", "w", newline="") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for item in sorted_items:
# convert datetime to human readable
try:
if item["exDt"]:
item["exDt"] = datetime.utcfromtimestamp(item["exDt"]).isoformat()
except KeyError:
item["exDt"] = ""
csv_item = {
"Package": item["pckg"],
"Package Version": item["pckgVrsn"],
"Arn": item["arn"],
"Status": item["dplySts"],
"Expiry Date": item["exDt"],
}
writer.writerow(csv_item)
with open("/tmp/packages.csv", "r") as csvfile:
csv_text = csvfile.read()
return csv_text
| 5,337,427
|
def _check_resample_inputs(x, f, direction, shift):
"""Checks the inputs to _downsample() and _upsample()."""
if len(x.shape) != 3:
raise ValueError('Expected `x` to have rank 3, but is of size {}'.format(
x.shape))
if len(f.shape) != 1:
raise ValueError('Expected `f` to have rank 1, but is of size {}'.format(
f.shape))
if not (direction == 0 or direction == 1):
raise ValueError(
'Expected `direction` to be 0 or 1, but is {}'.format(direction))
if not (shift == 0 or shift == 1):
raise ValueError('Expected `shift` to be 0 or 1, but is {}'.format(shift))
| 5,337,428
|
async def test_todo_api(app, test_cli):
"""
testing todo api
"""
# GET
resp = await test_cli.get('/api/todo')
assert resp.status == 200
resp_json = await resp.json()
assert len(resp_json['todo_list']) == 0
# POST
resp = await test_cli.post(
'/api/todo',
data=json.dumps({
'name': 'new_todo',
}),
headers={'Content-Type': 'application/json'}
)
assert resp.status == 201
# GET
resp = await test_cli.get('/api/todo')
assert resp.status == 200
resp_json = await resp.json()
assert len(resp_json['todo_list']) == 1
assert resp_json['todo_list'][0]['name'] == 'new_todo'
# DELETE
resp = await test_cli.delete(
'/api/todo/1',
)
assert resp.status == 200
# GET
resp = await test_cli.get('/api/todo')
assert resp.status == 200
resp_json = await resp.json()
assert len(resp_json['todo_list']) == 0
| 5,337,429
|
def manhattan(train_X, val_X):
"""
:param train_X: one record from the training set
(type series or dataframe including target (survived))
:param val_X: one record from the validation set
series or dataframe include target (survived)
:return: the Manhattan distance between train_X and val_X
"""
diff = train_X - val_X
# Remove survived column
diff = diff.iloc[:, :-1]
dist = np.sqrt((np.abs(diff)).sum(axis=1))
return dist
| 5,337,430
|
def generate_input(start, stop, step):
"""
Create input arguments for the function to be timed.
"""
for n in range(start, stop, step):
l = [random.randint(0, n) for _ in range(n)]
yield (l, ), n
| 5,337,431
|
def author_idea_list(request, registrant_id):
"""
Returns author ideas
"""
registrant = get_object_or_404(Registrant, pk=registrant_id)
ideas = Idea.objects.filter(author=registrant)
serializer = IdeaSerializer(ideas, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
| 5,337,432
|
def compare_distilled(dir):
"""Compare all the distilled contents from the original pages with those from the mhtml archives
Args:
dir (str): directory containing all the extracted features
"""
files = [os.path.join(dir, f) for f in os.listdir(dir)]
mdfeatures = [f for f in files if os.path.isfile(f) and os.path.splitext(f)[1] == '.mdfeature']
err = 0
for mdfeature in mdfeatures:
dfeature = os.path.splitext(mdfeature)[0] + '.dfeature'
if not compare_innerText(dfeature, mdfeature):
err += 1
print '%d/%d have different distilled content from mhtml' % (err, len(mdfeatures))
| 5,337,433
|
def match(text: str, pattern: str) -> bool:
"""
Match a text against a given regular expression.
:param text: string to examine.
:param pattern: regular expression.
:returns: ``True`` if pattern matches the string.
"""
return re.match(pattern, text) is not None
| 5,337,434
|
def find_marker(log):
"""Function: find_marker
Description: Locates the file marker.
Arguments:
(input) log -> LogFile class instance
"""
if log.marker:
log.find_marker(update=True)
| 5,337,435
|
async def pp_(message: discord.Message, beatmap_url: str, *options):
""" Calculate and return the would be pp using `rosu-pp`.
Options are a parsed set of command-line arguments: /
`([acc]% | [num_100s]x100 [num_50s]x50) +[mods] [combo]x [misses]m hp[hp] ar[ar] od[od] cs[cs] [clock_rate]*`
**Additionally**, PCBOT includes a *find closest pp* feature. This works as an
argument in the options, formatted like `[pp_value]pp`
"""
try:
beatmap_info = api.parse_beatmap_url(beatmap_url)
assert beatmap_info.beatmap_id, "Please link to a specific difficulty."
assert beatmap_info.gamemode, "Please link to a specific mode."
params = {
"beatmap_id": beatmap_info.beatmap_id,
}
beatmap = (await api.beatmap_lookup(params=params, map_id=beatmap_info.beatmap_id,
mode=beatmap_info.gamemode.name))
assert not beatmap["convert"], "Converts are not supported by the PP calculator."
pp_stats = await pp.calculate_pp(beatmap_url, *options, mode=beatmap_info.gamemode,
ignore_osu_cache=not bool(beatmap["status"] == "ranked" or
beatmap["status"] == "approved"))
except ValueError as e:
await client.say(message, str(e))
return
options = list(options)
if isinstance(pp_stats, pp.ClosestPPStats):
# Remove any accuracy percentage from options as we're setting this manually, and remove unused options
for opt in options.copy():
if opt.endswith("%") or opt.endswith("pp") or opt.endswith("x300") or opt.endswith("x100") or opt.endswith(
"x50"):
options.remove(opt)
options.insert(0, f"{pp_stats.acc}%")
for opt in options.copy():
if opt.startswith("+"):
options.append(opt.upper())
options.remove(opt)
await client.say(message,
"*{artist} - {title}* **[{version}] {0}** {stars:.02f}\u2605 would be worth `{pp:,.02f}pp`."
.format(" ".join(options), artist=beatmap["beatmapset"]["artist"],
title=beatmap["beatmapset"]["title"], version=beatmap["version"], stars=pp_stats.stars,
pp=pp_stats.pp))
| 5,337,436
|
def _compute_hash_check(input_strings: tf.Tensor, field_size: int, seed: int,
dtype: tf.dtypes.DType) -> tf.Tensor:
"""Returns the hash_check for input_strings modulo field_size."""
hash_check_salt = _get_hash_check_salt(seed)
salted_input = tf.strings.join([hash_check_salt, input_strings])
hash_check = tf.strings.to_hash_bucket_fast(
salted_input, num_buckets=field_size)
hash_check = tf.reshape(hash_check, shape=[tf.size(hash_check), 1])
hash_check = tf.cast(hash_check, dtype=dtype)
return hash_check
| 5,337,437
|
def _trademark(request):
"""
access to the produt database is available here, making a request to save/check the data
for storage inside the database
"""
# site data from scrap program
websitename = WebsiteClassName().getProducts( WebsiteClassName().getCategoryLinks() )
# access the data structure we need to save in the db
websitename_data = DataCompiler().getPythonData( websitename )
# Show the name of items inserted in DB
items_inserted = []
# counter for each item scrapped in total
items_counter = 0
with transaction.atomic():
for item in websitename_data:
try:
# creates the data objects and places them in the apporiate tables/rows. The website id will be assigned in Step 1.
# See the Readme.md in algorithm_scrape in github repo this id will assign the products to the correct registered website.
# To see website id all see the docs in the repo
data_store = Product.objects.get_or_create( product_slug_url=item['product_slug_url'], website_id=int, defaults=item )
if data_store:
# Logging for Django purposes
logger.debug('Inserting %r into products', item )
items_inserted.append( item['product_name'] )
items_counter += 1
# Gives a count of how many items are in the database
data_count = Product.objects.filter( website_id=int ).count()
# saves the instance of all the products inside the database
data_store.save()
else:
# updates any new items of fields inside the database
data_store.update(**item)
except Exception:
# "Not inserted ==>", into database
logger.exception('Something went wrong inserting a new entry %r', item )
return HttpResponse('<h1>Products saved!</h1><br\>'
'<h2> %r Total Products Scrapped</h2><br\>'
'<h4> %r Products Currently in db</h4><br\>'
'<div><ul> <li>%s</li> </ul></div>' % (items_counter, data_count, items_inserted )
)
| 5,337,438
|
def calc_extinction(radius:float, mosaicity:float, model:str,
a:float, b:float, c:float, alpha:float, beta:float, gamma:float,
h:float, k:float, l:float,
f_sq:float, wavelength:float, flag_derivative_f_sq=False):
"""
Isotropical extinction coorection y:
$$
|F|^2_{\text{corrected}} = y \cdot |F|^2
$$
radius primary extinction ???
mosaisity secondary extinction
model= "gauss" or "lorentz"
a,b,c,alpha,beta,gamma are unit cell parameters (in angstrem and radians)
h, k, l are Miller indices
f_sq is square of structure factor (in 10-12cm)
wavelength is neutron wavelength in angstrems
flag_derivative_radius
flag_derivative_mosaicity
flag_derivative_a
flag_derivative_b
flag_derivative_c
flag_derivative_alpha
flag_derivative_beta
flag_derivative_gamma
flag_derivative_f_sq
flag_derivative_wavelength
"""
r = float(radius)
g = float(mosaicity)
g_sq = numpy.square(g)
kk = 1.
c_a, c_b, c_g = numpy.cos(alpha), numpy.cos(beta), numpy.cos(gamma)
volume_unit_cell = calc_volume_uc_by_abc_cosines(a, b, c, c_a, c_b, c_g)
sthovl = calc_sthovl_by_hkl_abc_cosines(h, k, l, a, b, c, c_a, c_b, c_g)
yext, dder = calc_extinction_2(radius, mosaicity, model,
f_sq, volume_unit_cell, sthovl, wavelength)
return yext, dder
| 5,337,439
|
def package_software(pack_type, name_idx, version, debug):
"""
打包压缩软件
@param pack_type: 打包类型,update全量还是patch增量
@param name_idx: 名称索引
@param version: 版本
@param debug: 调试使能
@return:
"""
for item in pack_proc:
if item.option == name_idx:
item.proc(pack_type, item.name, version, debug)
return
print("不支持打包选项: ", name_idx)
print("使用 --help 查看帮助")
| 5,337,440
|
def smape(y_true: Yannotation, y_pred: Yannotation):
"""
Calculate the symmetric mean absolute percentage error between `y_true`and `y_pred`.
Parameters
----------
y_true : array, `dataframe`, list or `tensor`
Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred : array, `dataframe`, list or `tensor`
The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns
-------
error : `tensor`
Symetric mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`.
Examples
--------
>>> from autopycoin.losses import smape
>>> import tensorflow as tf
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [1., 0.]]
>>> smape(y_true, y_pred).numpy()
array([99.999985, 99.999985], dtype=float32)
"""
if not isinstance(y_pred, tf.RaggedTensor):
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, dtype=y_pred.dtype)
error = tf.abs(y_true - y_pred) / (
tf.maximum(tf.abs(y_true), epsilon()) + tf.abs(y_pred)
)
return 200.0 * tf.reduce_mean(error, axis=-1)
| 5,337,441
|
def acosh(rasters, extent_type="FirstOf", cellsize_type="FirstOf", astype=None):
"""
The ACosH operation
The arguments for this function are as follows:
:param rasters: array of rasters. If a scalar is needed for the operation, the scalar can be a double or string
:param extent_type: one of "FirstOf", "IntersectionOf", "UnionOf", "LastOf"
:param cellsize_type: one of "FirstOf", "MinOf", "MaxOf, "MeanOf", "LastOf"
:param astype: output pixel type
:return: the output raster
"""
return local(rasters, 59, extent_type=extent_type, cellsize_type=cellsize_type, astype=astype)
| 5,337,442
|
def load_does(
filepath: PathType, defaults: Optional[Dict[str, bool]] = None
) -> Tuple[Any, Any]:
"""Load_does from file."""
does = {}
defaults = defaults or {"do_permutation": True, "settings": {}}
data = OmegaConf.load(filepath)
data = OmegaConf.to_container(data)
mask = data.pop("mask")
for doe_name, doe in data.items():
for k in defaults:
if k not in doe:
doe[k] = defaults[k]
does[doe_name] = doe
return does, mask
| 5,337,443
|
def causal_segment_mask(segment_ids: JTensor,
dtype: jnp.dtype = jnp.float32) -> JTensor:
"""Computes the masks which combines causal masking and segment masks.
Args:
segment_ids: a JTensor of shape [B, T], the segment that each token belongs
to.
dtype: data type of the input.
Returns:
A JTensor of shape [B, 1, T, T].
"""
# [B, 1, T, T]
segment_mask_t = segment_mask(segment_ids, dtype=dtype)
# [1, 1, T, T]
b, t = segment_ids.shape
causal_mask_t = causal_mask(jnp.zeros([b, t, 1], dtype=dtype))
return jnp.minimum(segment_mask_t, causal_mask_t)
| 5,337,444
|
def __docs__(self):
"""THIS is a libray used for creating all required filrs for making liabrary also for creating normal files"""
pass
| 5,337,445
|
def remove_outliers(peaks: np.ndarray, **kwargs):
"""
https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.LocalOutlierFactor.html#sklearn.neighbors.LocalOutlierFactor
https://scikit-learn.org/stable/modules/outlier_detection.html
Parameters
----------
peaks
kwargs
Returns
-------
"""
clf = LocalOutlierFactor(**kwargs)
is_inlier = clf.fit_predict(peaks) # 1 inliers, -1 is outliers
mask = is_inlier == 1
return peaks[mask], peaks[np.invert(mask)]
| 5,337,446
|
def powerset(iterable):
""" Calcualtes the powerset, copied from https://docs.python.org/3/library/itertools.html#itertools-recipes """
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
| 5,337,447
|
def get_rotational_vector(skew_symmetric):
"""Get the rotational vector from a skew symmetric matrix.
Parameters
----------
skew_symmetric: numpy.ndarray
the skew symmetric matrix.
Returns
-------
rotational_vector:
the rotational vector.
"""
# make sure that the input is skew symmetric
if np.linalg.norm(skew_symmetric + skew_symmetric.T) > 1e-12:
raise ValueError("The input is not skew symmetric!")
rotational_vector = np.zeros((3, 1), dtype=float)
rotational_vector[0] = skew_symmetric[2, 1]
rotational_vector[1] = skew_symmetric[0, 2]
rotational_vector[2] = skew_symmetric[1, 0]
return rotational_vector
| 5,337,448
|
def diaperchange_lifetimes(changes):
"""
Create a graph showing how long diapers last (time between changes).
:param changes: a QuerySet of Diaper Change instances.
:returns: a tuple of the the graph's html and javascript.
"""
changes = changes.order_by("time")
durations = []
last_change = changes.first()
for change in changes[1:]:
duration = change.time - last_change.time
if duration.seconds > 0:
durations.append(duration)
last_change = change
trace = go.Box(
y=[round(d.seconds / 3600, 2) for d in durations],
name=_("Changes"),
jitter=0.3,
pointpos=-1.8,
boxpoints="all",
)
layout_args = utils.default_graph_layout_options()
layout_args["height"] = 800
layout_args["title"] = _("<b>Diaper Lifetimes</b>")
layout_args["yaxis"]["title"] = _("Time between changes (hours)")
layout_args["yaxis"]["zeroline"] = False
layout_args["yaxis"]["dtick"] = 1
fig = go.Figure({"data": [trace], "layout": go.Layout(**layout_args)})
output = plotly.plot(fig, output_type="div", include_plotlyjs=False)
return utils.split_graph_output(output)
| 5,337,449
|
def batch_norm_conv(x, n_out, phase_train, scope='bn'):
"""
Batch normalization on convolutional maps.
Args:
x: Tensor, 4D BHWD input maps
n_out: integer, depth of input maps
phase_train: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
with tf.variable_scope(scope):
beta = tf.Variable(tf.constant(0.0, shape=[n_out]),
name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),
name='gamma', trainable=True)
batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
| 5,337,450
|
def strip_variants(address):
"""Return a copy of the given address with the variants (if any) stripped from the name.
:rtype: :class:`pants.build_graph.address.Address`
"""
address, _ = parse_variants(address)
return address
| 5,337,451
|
def list_rse_usage_history(rse, issuer, source=None, vo='def'):
"""
List RSE usage history information.
:param rse: The RSE name.
:param issuer: The issuer account.
:param source: The source of the usage information (srm, rucio).
:param vo: The VO to act on.
:returns: A list of historic RSE usage.
"""
rse_id = rse_module.get_rse_id(rse=rse, vo=vo)
for u in rse_module.list_rse_usage_history(rse_id=rse_id, source=source):
yield api_update_return_dict(u)
| 5,337,452
|
def assure_dir(outd):
"""Create directory 'outd' if it does not exist"""
if not os.path.exists(outd):
os.makedirs(outd)
| 5,337,453
|
def deleteIdentifiedStock(bot, update):
"""Deletes the user's selected stock.
If the user's selected stock is valid, proceed to delete it.
Returns:
Return MENU state with normal keyboard.
"""
if update.message.chat.username is None:
# User has no username
update.message.reply_text(
"It seems you do not have a Telegram Username.\nI'll need your username in order to function :( /start me up when you have one! (You can set your username in Settings.)")
else:
# User has username
text = update.message.text
message = bots.deleteUserStock(update.message.chat.username, text)
update.message.reply_text(message, parse_mode='HTML')
update.message.reply_text(
"What would you like to do next?", reply_markup=markup_one)
return MENU
| 5,337,454
|
def z_inc_down(grid):
"""Return True if z increases downwards in the coordinate reference system used by the grid geometry
:meta common:
"""
if grid.crs is None:
assert grid.crs_uuid is not None
grid.crs = rqc.Crs(grid.model, uuid = grid.crs_uuid)
return grid.crs.z_inc_down
| 5,337,455
|
def update_display_caption():
"""set the window title"""
pygame.display.set_caption("SoundRTS %s %s %s" % (VERSION, res.mods, res.soundpacks))
| 5,337,456
|
def load_coord_var(prob_data_type):
"""
Loads a coordinate variable from the source data and returns it.
:param prob_data_type:
:return:
"""
fpath = "{}/source_others/a1b_tas_jja_EAW_1961-1990.dat".format(BASEDIR)
with open(fpath, 'rb') as reader:
data = cPickle.load(reader)
key = prob_data_map[prob_data_type]
if key == 'prob':
return np.array((data[key] * 100), np.float)
else:
return np.array(data[key], np.int32)
| 5,337,457
|
def get_os():
"""Get the current operating system.
:returns: The OS platform (str).
"""
return platform.system()
| 5,337,458
|
async def test_pipe_operator():
"""Overload the ``__or__`` operator to make piping streams look cool."""
logging.debug('+++++++++++++++++++++>> BEGIN TEST_PIPE_OPERATOR')
# Spool some commands
read_file = reel.Spool(f'cat {__file__}')
remove_grep = reel.Spool(f'grep -v grep')
find_cat = reel.Spool('grep cat')
# One way to make a transport
transport = reel.Transport(read_file, remove_grep, find_cat)
# Another way
chain = [read_file, remove_grep, find_cat]
transport_chain = reel.Transport(chain)
# Or:
async with read_file | remove_grep | find_cat as out:
assert repr(transport) == repr(transport_chain) == repr(out)
lines = await out.readlines()
assert len(lines) == 2
for line in lines:
assert 'cat' in line
| 5,337,459
|
def get_config():
"""This function retrieves API keys, access tokens, and other key data from the config file."""
global LOG_NAME, TARGET, URL_NUMBER, WHERE, BOT
print("Building OAuth header...")
if 'XKCD_APPNAME' in os.environ: # Running on a cloud server
key = [os.environ.get('API_KEY', None),
os.environ.get('API_SECRET_KEY', None),
os.environ.get('ACCESS_TOKEN', None),
os.environ.get('ACCESS_TOKEN_SECRET', None)]
LOG_NAME = os.environ.get('LOG_NAME', None)
TARGET = os.environ.get('TARGET', None)
URL_NUMBER = int(os.environ.get('URL_NUMBER', None))
WHERE = int(os.environ.get('WHERE', None))
BOT = os.environ.get('BOT', None)
else: # Running locally
with open('config.yaml') as config_file:
CONFIG = yaml.safe_load(config_file)
key = [CONFIG['API Key'],
CONFIG['API Secret Key'],
CONFIG['Access Token'],
CONFIG['Access Token Secret']]
LOG_NAME = CONFIG['Target name in logs']
TARGET = CONFIG['Target account handle']
URL_NUMBER = int(CONFIG['Tweet URL location'])
WHERE = int(CONFIG['Target image location on site'])
BOT = CONFIG['Your account handle']
for i in key:
if i is None: # Verify keys were loaded
print("OAuth initiation failed: API key or access token not found")
del key
return 'crash' # Enter log protection mode
auth = OAuth1(key[0], key[1], key[2], key[3])
print('OAuth initiation successful!')
del key
return auth
| 5,337,460
|
def count_hits(space, positions, pi_plus_4_vecs_lab, pi_null_4_vecs_lab, r):
"""returns a list of hit counts for z values in space"""
return [count_double_hits(positions, pi_plus_4_vecs_lab, pi_null_4_vecs_lab, r=r, z_detector=z) for z in space]
| 5,337,461
|
def time_since(since, m_padding=2, s_padding=2):
"""Elapsed time since last record point."""
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '{}m:{}s'.format(str(int(m)).zfill(m_padding),
str(int(s)).zfill(s_padding))
| 5,337,462
|
def run_cmd(cmd, encoding=DEFAULT_ENCODING):
"""
Run a command as a subprocess.
# Arguments
* `cmd` (list<str>): The command to run.
* `encoding` (str): The encoding to use for communicating with the
subprocess.
# Returns
A named tuple with the following fields:
- returncode: The returned value from the subproccess.
- stderr: The stderr output from the subprocess.
- stdout: The stdout output from the subprocess.
"""
p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return collections.namedtuple("CmdResult",
["returncode", "stderr", "stdout"])(
p.returncode,
p.stderr.decode(encoding).strip(),
p.stdout.decode(encoding).strip())
| 5,337,463
|
def setconfig_list(ui, section, items):
"""
Alternative form of setting many configuration items with one call.
Here items are given as list of key,value pairs. Contrary to
setconfig_dict, this guarantees ordering.
>>> import mercurial.ui; ui = mercurial.ui.ui()
>>> setconfig_list(ui, "sect1",
... [('a', 7), ('bbb', 'xxx'), ('c', '-'), ('a', 8)])
>>> setconfig_list(ui, "sect2", [('v', 'vvv')])
>>> ui.config("sect1", 'a')
8
>>> ui.config("sect2", 'v')
'vvv'
:param section: configuration section tofill
:param items: dictionary of items to set
"""
for key, value in items:
ui.setconfig(section, key, value)
| 5,337,464
|
def record_check(record):
"""
record dict check
--- a dictionary is required as the input ---
"""
assert isinstance(
record, dict), 'record should be dict, while the input is {}'.format(type(record))
cnn_json_struct = JsonFormatSetting.CNN_JSON_STRUCTURE
record_struct = cnn_json_struct["record"][0]
return check_dict(record, record_struct)
| 5,337,465
|
def encrypt_chunk(chunk, password=None):
"""Encrypts the given chunk of data and returns the encrypted chunk.
If password is None then saq.ENCRYPTION_PASSWORD is used instead.
password must be a byte string 32 bytes in length."""
if password is None:
password = saq.ENCRYPTION_PASSWORD
assert isinstance(password, bytes)
assert len(password) == 32
iv = Crypto.Random.OSRNG.posix.new().read(AES.block_size)
encryptor = AES.new(password, AES.MODE_CBC, iv)
original_size = len(chunk)
if len(chunk) % 16 != 0:
chunk += b' ' * (16 - len(chunk) % 16)
result = struct.pack('<Q', original_size) + iv + encryptor.encrypt(chunk)
return result
| 5,337,466
|
def freeze_graph(checkpoints_path, output_graph):
"""
:param checkpoints_path: ckpt文件路径
:param output_graph: pb模型保存路径
:return:
"""
with tf.Graph().as_default():
image = tf.placeholder(shape=[None, 608, 608, 3], dtype=tf.float32, name='inputs')
# 指定输出的节点名称,该节点名称必须是原模型中存在的节点
output_node_names = "reorg_layer/obj_probs,reorg_layer/class_probs,reorg_layer/bboxes_probs"
# 从模型代码中获取结构
Model = network.Network(is_train=False)
logits = Model.build_network(image)
output = Model.reorg_layer(logits, model_params['anchors'])
# 从meta中获取结构
#saver = tf.train.import_meta_graph(checkpoints_path + '.meta', clear_devices=True)
# 获得默认的图
graph = tf.get_default_graph()
# 返回一个序列化的图代表当前的图
input_graph_def = graph.as_graph_def()
with tf.Session() as sess:
saver = tf.train.Saver()
# 恢复图并得到数据
saver.restore(sess, checkpoints_path)
# 模型持久化,将变量值固定
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=input_graph_def,
output_node_names=output_node_names.split(","))
# 删除训练层,只保留主干
output_graph_def = graph_util.remove_training_nodes(
output_graph_def)
# 保存模型
with tf.gfile.GFile(output_graph, "wb") as f:
# 序列化输出
f.write(output_graph_def.SerializeToString())
# 得到当前图有几个操作节点
print("%d ops in the final graph." %len(output_graph_def.node))
# for op in graph.get_operations():
# print(op.name, op.values())
| 5,337,467
|
def convert_to_pj_lop_plus(lops):
"""
Converts the list of PlayerStates to an LOP+
:param lops: The PlayerStates to be converted
:type lops: [PlayerState, ...]
:return: The LOP+
:rtype: PyJSON
"""
return [convert_to_pj_player_plus(ps) for ps in lops]
| 5,337,468
|
def euler_to_quaternion(roll: float = 0, pitch: float = 0, yaw: float = 0) -> Tuple[float, float, float, float]:
"""
Convert Euler to Quaternion
Args:
roll (float): roll angle in radian (x-axis)
pitch (float): pitch angle in radian (y-axis)
yaw (float): yaw angle in radian (z-axis)
Returns:
Tuple[float, float, float, float]: x, y, z, w
"""
# Abbreviations for the various angular functions
cy = math.cos(yaw * 0.5)
sy = math.sin(yaw * 0.5)
cp = math.cos(pitch * 0.5)
sp = math.sin(pitch * 0.5)
cr = math.cos(roll * 0.5)
sr = math.sin(roll * 0.5)
# Quaternion
w = cr * cp * cy + sr * sp * sy
x = sr * cp * cy - cr * sp * sy
y = cr * sp * cy + sr * cp * sy
z = cr * cp * sy - sr * sp * cy
return x, y, z, w
| 5,337,469
|
def supervisorctl_url():
"""This parses supervisord.conf which contains the URL of supervisorctl."""
parsed_config = _get_parsed_configuration()
# For example 'http://localhost:9001'
control_url = _clean_config_value(parsed_config['supervisorctl']['serverurl'])
logging.debug("control_url=%s" % control_url)
return control_url
| 5,337,470
|
def create_generic_constant(type_spec, scalar_value):
"""Creates constant for a combination of federated, tuple and tensor types.
Args:
type_spec: Instance of `computation_types.Type` containing only federated,
tuple or tensor types for which we wish to construct a generic constant.
May also be something convertible to a `computation_types.Type` via
`computation_types.to_type`.
scalar_value: The scalar value we wish this constant to have.
Returns:
Instance of `computation_building_blocks.ComputationBuildingBlock`
representing `scalar_value` packed into `type_spec`.
Raises:
TypeError: If types don't match their specification in the args section.
Notice validation of consistency of `type_spec` with `scalar_value` is not
the rsponsibility of this function.
"""
type_spec = computation_types.to_type(type_spec)
py_typecheck.check_type(type_spec, computation_types.Type)
inferred_scalar_value_type = type_utils.infer_type(scalar_value)
if (not isinstance(inferred_scalar_value_type, computation_types.TensorType)
or inferred_scalar_value_type.shape != tf.TensorShape(())):
raise TypeError('Must pass a scalar value to '
'`create_tensorflow_constant`; encountered a value '
'{}'.format(scalar_value))
if not type_utils.type_tree_contains_only(
type_spec,
(computation_types.FederatedType, computation_types.NamedTupleType,
computation_types.TensorType)):
raise TypeError
if type_utils.type_tree_contains_only(
type_spec,
(computation_types.NamedTupleType, computation_types.TensorType)):
return computation_constructing_utils.create_tensorflow_constant(
type_spec, scalar_value)
elif isinstance(type_spec, computation_types.FederatedType):
unplaced_zero = computation_constructing_utils.create_tensorflow_constant(
type_spec.member, scalar_value)
if type_spec.placement == placement_literals.CLIENTS:
placement_fn_type = computation_types.FunctionType(
type_spec.member,
computation_types.FederatedType(
type_spec.member, type_spec.placement, all_equal=True))
placement_function = computation_building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS.uri, placement_fn_type)
elif type_spec.placement == placement_literals.SERVER:
placement_fn_type = computation_types.FunctionType(
type_spec.member,
computation_types.FederatedType(
type_spec.member, type_spec.placement, all_equal=True))
placement_function = computation_building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_VALUE_AT_SERVER.uri, placement_fn_type)
return computation_building_blocks.Call(placement_function, unplaced_zero)
elif isinstance(type_spec, computation_types.NamedTupleType):
elements = []
for k in range(len(type_spec)):
elements.append(create_generic_constant(type_spec[k], scalar_value))
names = [name for name, _ in anonymous_tuple.to_elements(type_spec)]
packed_elements = computation_building_blocks.Tuple(elements)
named_tuple = computation_constructing_utils.create_named_tuple(
packed_elements, names)
return named_tuple
else:
raise ValueError(
'The type_spec {} has slipped through all our '
'generic constant cases, and failed to raise.'.format(type_spec))
| 5,337,471
|
def has_openid(request):
"""
Given a HttpRequest determine whether the OpenID on it is associated thus
allowing caller to know whether OpenID is good to depend on.
"""
from django_openid.models import UserOpenidAssociation
for association in UserOpenidAssociation.objects.filter(user=request.user):
if association.openid == unicode(request.openid):
return True
return False
| 5,337,472
|
def get_wordcloud():
"""
Generates the wordcloud and sends it to the front end as a png file.
:return: generated tag_cloud.png file
"""
update_tagcloud(path_to_save='storage/tmp', solr_service=solr)
return send_from_directory("storage/tmp", "tag_cloud.png", as_attachment=True)
| 5,337,473
|
def rtc_runner(rtc):
"""
:type rtc: pbcommand.models.ResolvedToolContract
:return:
"""
return gather_run_main(chunk_json=rtc.task.input_files[0],
chunk_key=Constants.CHUNK_KEY,
gathered_fn=rtc.task.output_files[0],
ln_name = Constants.DEFAULT_OUT_NAME,
gather_func=cat_txt_with_header)
| 5,337,474
|
def get_matching_axis(shape: Tuple, length: int) -> int:
"""
Infers the correct axis to use
:param shape: the shape of the input
:param length: the desired length of the axis
:return: the correct axis. If multiple axes match, then it returns the last
one.
"""
# noinspection PyUnresolvedReferences
axis_candidates = np.nonzero(np.array(shape) == length)[0]
if len(axis_candidates) == 0:
raise ValueError('Unable to infer axis tue to shape mismatch: '
'{} =/= {}.'.format(shape, length))
return axis_candidates[-1]
| 5,337,475
|
def is_paths(maybe_paths, marker='*'):
"""
Does given object `maybe_paths` consist of path or path pattern strings?
"""
return ((is_path(maybe_paths) and marker in maybe_paths) or # Path str
(is_path_obj(maybe_paths) and marker in maybe_paths.as_posix()) or
(is_iterable(maybe_paths) and
all(is_path(p) or is_ioinfo(p) for p in maybe_paths)))
| 5,337,476
|
def output_numpy_or_asa(obj, data, *, output_type=None, labels=None):
"""This function returns a numpy ndarray or nelpy.AnalogSignalArray
Parameters
----------
obj : numpy.ndarray or a nelpy object
data : numpy.ndarray, with shape (n_samples, n_signals)
Data is either passed through as the np.ndarray
or used to form a nelpy object, depending on 'output_type'.
output_type : string, optional
Specifies the object that should be returned.
Default is a numpy np.ndarray
labels : np.adarray of string, optional
Labels that will be attached to the nelpy object, if
that is the desired output type. If the output type is
'numpy', the labels are ignored.
Returns
-------
Output object of the specified type. If a numpy array, it will
have shape (n_samples, n_signals)
"""
if data.size == 0:
logging.warning("Output data is empty")
if not isinstance(data, np.ndarray):
raise TypeError("data must be a numpy ndarray")
if output_type is not None:
if output_type != 'asa':
raise TypeError(("Invalid output type {} specified".
format(output_type)))
if output_type == 'asa':
try:
res = isinstance(obj, nel.RegularlySampledAnalogSignalArray)
if res is False:
raise TypeError("You specified output type {} but the input"
" object was not a nelpy object. Cannot form an"
" ASA around the input object".format(output_type))
# Transpose data since ASAs have shape (n_signals, n_samples)
out = nel.AnalogSignalArray(data.T,
abscissa_vals=obj.abscissa_vals,
fs=obj.fs,
support=obj.support,
labels=labels)
return out
except NameError:
raise ModuleNotFoundError("You must have nelpy installed for"
" output type {}".format(output_type))
return data
| 5,337,477
|
def embed_oar(features: Array, action: Array, reward: Array,
num_actions: int) -> Array:
"""Embed each of the (observation, action, reward) inputs & concatenate."""
chex.assert_rank([features, action, reward], [2, 1, 1])
action = jax.nn.one_hot(action, num_classes=num_actions) # [B, A]
reward = jnp.tanh(reward)
while reward.ndim < action.ndim:
reward = jnp.expand_dims(reward, axis=-1)
embedding = jnp.concatenate([features, action, reward], axis=-1) # [B, D+A+1]
return embedding
| 5,337,478
|
async def kibana(es, params):
"""
Simulates Kibana msearch dashboard queries.
It expects the parameter hash to contain the following keys:
"body" - msearch request body representing the Kibana dashboard in the form of an array of dicts.
"params" - msearch request parameters.
"meta_data" - Dictionary containing meta data information to be carried through into metrics.
"""
request = params["body"]
request_params = params["params"]
meta_data = params["meta_data"]
if meta_data["debug"]:
logger.info("Request:\n=====\n{}\n=====".format(json.dumps(request)))
visualisations = int(len(request) / 2)
response = {}
for key in meta_data.keys():
response[key] = meta_data[key]
response["request_params"] = request_params
response["weight"] = 1
response["unit"] = "ops"
response["visualisation_count"] = visualisations
result = await es.msearch(body=request, params=request_params)
sum_hits = 0
max_took = 0
error_count = 0
error_details = set()
for r in result["responses"]:
if "error" in r:
error_count += 1
extract_error_details(error_details, r)
else:
hits = r.get("hits", {}).get("total", 0)
if isinstance(hits, dict):
sum_hits += hits["value"]
else:
sum_hits += hits
max_took = max(max_took, r["took"])
# use the request's took if possible but approximate it using the maximum of all responses
response["took"] = result.get("took", max_took)
response["hits"] = sum_hits
response["success"] = error_count == 0
response["error-count"] = error_count
if error_count > 0:
response["error-type"] = "kibana"
response["error-description"] = error_description(error_details)
if meta_data["debug"]:
for r in result["responses"]:
# clear hits otherwise we'll spam the log
if "hits" in r and "hits" in r["hits"]:
r["hits"]["hits"] = []
r["aggregations"] = {}
logger.info("Response (excluding specific hits):\n=====\n{}\n=====".format(json.dumps(result)))
return response
| 5,337,479
|
def aggregations_terms(query=None):
"""Get page for aggregations."""
if query is None:
# Default query
query = "state,config.instance_type"
# Remove all white spaces from the str
query = query.replace(" ", "")
data = {"query": query}
end_point = "aggregations/terms"
url = SOUNDWAVE_API + end_point
response = requests.post(url, json=data)
if response.status_code == 200 or response.status_code == 304:
json_data = json_loads_byteified(response.text)
return render_template(
"aggregations.html",
data=json.dumps(json_data),
query=query)
elif response.status_code == 404 or response.status_code == 400:
logger.warn("Data not found in soundwave elastic search store. API returned 404")
return render_template("404.html")
elif response.status_code == 500:
logger.warn("soundwave api returned 500 status code. Internal Server error")
return render_template("500.html")
| 5,337,480
|
def validate_base(model,
args,
loader,
loadername,
train=True):
"""
The validation function. Validates the ELBO + MIL, ELBO, and the accuracy
of the given [training, validation or test] loader.
Returns
-------
loss: list
Either the ELBO (from base VAE) or the accuracy rate (from the base MIL).
"""
# Model: validate
model.eval()
# Declare loss tracker
loss_val = 0.
# Initialize the number of points
N = 0
# Loop through the data
for data, label in tqdm(loader, desc=f' Validation[{loadername}]'):
# Convert the data to cuda if available
data = data.to(device=args.device).squeeze(0)
# Update the N
N += data.shape[0]
# If args.mode is 'base_att'
if args.model == 'base_att':
# Convert the label to cuda if available
label = label[0].to(device=args.device)
# Calculate the objective for the Attention MIL
# (name kept the same not to duplicate the code blocks)
elbo_u_sum = model.calculate_classification_error(data, label)[0]
# Otherwise
else:
# Calculate ELBO for unlabeled data
elbo_u_sum = model(data)
# Track elbo results together [sum]
loss_val += elbo_u_sum.item()
if args.test_mode:
break
# If the mode is base_att
if args.model == 'base_att':
# Divide the accuracy by the length of the loader
loss_val = loss_val / len(loader)
# Trace
print(f' [Valid {loadername}]\t accuracy: {loss_val: .2f}')
# If the loader is not the training loader
if not train:
# If the validation accuracy is higher than the previous one
if loss_val >= args.prev_val:
# Save the model
torch.save(model.state_dict(),
f'{args.MODELPATH}/{args.mode}_E{args.epoch}.pt')
# Update the accuracy value
args.prev_val = loss_val
# If the mode is base
elif args.model == 'base':
# Divide the loss by the number of points
loss_val = loss_val / N
# Trace
print(f' [Valid {loadername}]\t elbo: {loss_val: .2f}')
# If the loader is not the training loader
if not train:
# If the validation loss is lower than the previous one
if loss_val <= args.prev_val:
# Save the model
torch.save(model.state_dict(),
f'{args.MODELPATH}/{args.mode}_E{args.epoch}.pt')
# Update the accuracy value
args.prev_val = loss_val
# Return validation records
return loss_val
| 5,337,481
|
def parse_person(person):
"""
https://doc.rust-lang.org/cargo/reference/manifest.html#the-authors-field-optional
A "person" is an object with an optional "name" or "email" field.
A person can be in the form:
"author": "Isaac Z. Schlueter <i@izs.me>"
For example:
>>> p = parse_person('Barney Rubble <b@rubble.com>')
>>> assert p == ('Barney Rubble', 'b@rubble.com')
>>> p = parse_person('Barney Rubble')
>>> assert p == ('Barney Rubble', None)
>>> p = parse_person('<b@rubble.com>')
>>> assert p == (None, 'b@rubble.com')
"""
parsed = person_parser(person)
if not parsed:
name = None
parsed = person_parser_no_name(person)
else:
name = parsed.group('name')
email = parsed.group('email')
if name:
name = name.strip()
if email:
email = email.strip('<> ')
return name, email
| 5,337,482
|
def flag_dict_to_args(flag_map):
"""Convert a dict of values into process call parameters.
This method is used to convert a dictionary into a sequence of parameters
for a binary that parses arguments using this module.
Args:
flag_map: dict, a mapping where the keys are flag names (strings).
values are treated according to their type:
* If value is None, then only the name is emitted.
* If value is True, then only the name is emitted.
* If value is False, then only the name prepended with 'no' is emitted.
* If value is a string then --name=value is emitted.
* If value is a collection, this will emit --name=value1,value2,value3.
* Everything else is converted to string an passed as such.
Yields:
sequence of string suitable for a subprocess execution.
"""
for key, value in six.iteritems(flag_map):
if value is None:
yield '--%s' % key
elif isinstance(value, bool):
if value:
yield '--%s' % key
else:
yield '--no%s' % key
elif isinstance(value, (bytes, type(u''))):
# We don't want strings to be handled like python collections.
yield '--%s=%s' % (key, value)
else:
# Now we attempt to deal with collections.
try:
yield '--%s=%s' % (key, ','.join(str(item) for item in value))
except TypeError:
# Default case.
yield '--%s=%s' % (key, value)
| 5,337,483
|
def build_snpeff(ref_genome):
"""Setup the SnpEff database for ref_genome.
This function does the following:
* Sets up the directory structure for SnpEff-related files.
* Writes a possibly modified Genbank to the location that SnpEff
expects to find it. A few cleanups are necessary to avoid SnpEff
quirks.
* Creates the SnpEff config file for building the database/index.
* Builds the SnpEff database/index.
SnpEFF needs a config file for every reference genome, which lists a
single reference genome, its chromosomes, and the codon table that
each uses. For now we can assume that all our genomes will use bacterial
codons. Every reference genome in the config file should look similar to:
# Ecoli K12 MG1655
NC_000913.genome : Escherichia_coli
NC_000913.chromosomes : NC_000913
NC_000913.NC_000913.codonTable: Bacterial_and_Plant_Plastid
We have made a template that can do this with yaml rendering, in the
snpEFF tools directory. Given a ref_genome object, it generates a
snpEFF config file and builds and snpEFF database file for the genome,
and places it in the ref genome's data dir under ./snpeff.
"""
# if no genbank file for this ref genome, then do nothing
if not ref_genome.is_annotated():
print "Skipping SnpEff indexing: No genbank for reference genome %s" % (
ref_genome.uid)
return
# Get the path to the reference genbank, making sure it exists.
ref_genome_path = get_dataset_with_type(ref_genome,
type=Dataset.TYPE.REFERENCE_GENOME_GENBANK).get_absolute_location()
assert ref_genome_path is not None, "Reference Genbank missing."
# Create the snpeff directory structure.
ref_genome.ensure_snpeff_dir()
# Build a template data dictionary which will be passed to the django
# template renderer in order to generate the config file.
templ_data = {}
templ_data['snpeff_dir'] = ref_genome.get_snpeff_dir()
templ_data['uid'] = ref_genome.uid
templ_data['label'] = ref_genome.label
# The following block does 2 things:
# 1. Identifies all chromosomes in the Genbank.
# 2. Ensures that the contained SeqRecord name and ids match, which is
# required by SnpEff.
templ_data['chromosomes'] = []
new_genbank_seq_records = []
with open(ref_genome_path) as genbank_fh:
for seq_record in SeqIO.parse(genbank_fh, 'genbank'):
# Set the ACCESSION/LOCUS/VERSION to all be the same for this
# new modified genbank
seq_record.name = seq_record.id
new_genbank_seq_records.append(seq_record)
# Add this record as a chromosome to this ref genome
# TODO: Do we want to check seqrecords for sane/sanitized names?
templ_data['chromosomes'].append(seq_record.name)
templ_data['chromosomes'].append(seq_record.name)
templ_data['chrs_string'] = ','.join(templ_data['chromosomes'])
# Write the updated Genbank.
snpeff_genbank_path = ref_genome.get_snpeff_genbank_file_path()
SeqIO.write(new_genbank_seq_records, snpeff_genbank_path, 'genbank')
# Stop-gap fix to ensure line lengths in Genbank to appease SnpEff.
ensure_line_lengths(ref_genome.get_snpeff_genbank_file_path())
# Render SnpEff config template.
render_snpeff_config(templ_data, ref_genome.get_snpeff_config_path())
# Build snpEff database
build_snpeff_db(ref_genome.get_snpeff_config_path(), ref_genome.uid)
| 5,337,484
|
def _glasstone_surface_cf(y):
"""Correction factor provided by TEoNW for contact surface bursts (p. 335)."""
return np.interp(y, [1.0, 50.0, 100.0, 300.0, 700.0, 2000.0, 5000.0, 5000.0], [0.6666666666666666, 0.6666666666666666, 1.0, 1.25, 1.5, 2.0, 3.0, 3.0])
| 5,337,485
|
def create_eval_fn(task_id, calculate_gradient=False):
"""Creates an evaluation function for a given task. Returns an evaluation
function that takes in a model, dataloader, and device, and evaluates the
model on the data from the dataloader. Returns a dictionary with mean
"loss" and "accuracy". If calculate_gradient is True, dictionary will also
contain gradients for the model wrt the loss on the data.
Args:
task_id: Task id corresponding to the data that will be evaluated.
calculate_gradient: Whether gradient should be calculated.
"""
def eval_fn(model, dataloader, device):
model.eval()
total_loss = 0
loss_fn = torch.nn.CrossEntropyLoss(reduction="sum").to(device=device)
num_correct = 0
model.zero_grad()
torch.set_grad_enabled(calculate_gradient)
for X, y in iter(dataloader):
X = X.to(device=device)
y = y.to(device=device)
output = model(X, task_id)
preds = torch.argmax(output, dim=1)
num_correct += (preds == y).sum().item()
loss = loss_fn(output, y) / len(dataloader.dataset)
if calculate_gradient:
loss.backward()
total_loss += loss.item()
accuracy = num_correct / len(dataloader.dataset)
metrics = {"loss": total_loss, "accuracy": accuracy}
if calculate_gradient:
gradients = flatten_gradients(model)
metrics["gradients"] = gradients
return metrics
return eval_fn
| 5,337,486
|
def get_dashboard_oauth_client_id():
"""Gets the client ID used to authenticate with Identity-Aware Proxy
from the environment variable DASHBOARD_OAUTH_CLIENT_ID."""
return os.environ.get('DASHBOARD_OAUTH_CLIENT_ID')
| 5,337,487
|
def naming_style(f):
"""Decorator for name utility functions.
Wraps a name utility function in a function that takes one or more names,
splits them into a list of words, and passes the list to the utility function.
"""
def inner(name_or_names):
names = name_or_names if isinstance(name_or_names, list) else [name_or_names]
words = []
for name in names:
words.extend(split_name(name))
return f(words)
return inner
| 5,337,488
|
def parse_args(argv, app_name):
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description=app_name)
parser.add_argument(
"-c",
"--config",
dest="config",
type=str,
default="config.yaml",
help="Set config.yaml file",
)
parser.add_argument(
"-s", "--service", dest="service", action="store_true", help="Run as service"
)
parser.add_argument(
"-d", "--debug", dest="debug", action="store_true", help="Turn on DEBUG logging"
)
parser.set_defaults(service=False)
parser.set_defaults(debug=False)
return parser.parse_args(argv)
| 5,337,489
|
def has_newer_fw( current_fw, bundled_fw ):
"""
:param current_fw: current FW version of a device
:param bundled_fw: bundled FW version of the same device
:return: True if the bundled version is newer than the current one
"""
current_fw_digits = current_fw.split( '.' )
bundled_fw_digits = bundled_fw.split( '.' )
if len( current_fw_digits ) != len( bundled_fw_digits ):
log.e( "Either the devices FW (", current_fw, ") or the bundled FW(", bundled_fw, ") was of an invalid format")
sys.exit(1)
for curr, bundled in zip( current_fw_digits, bundled_fw_digits ):
if int(bundled) > int(curr):
return True
if int(bundled) < int(curr):
return False
return False
| 5,337,490
|
def is_idaq(*args):
"""
is_idaq() -> bool
Returns True or False depending if IDAPython is hosted by IDAQ
"""
return _ida_kernwin.is_idaq(*args)
| 5,337,491
|
def print_grid(grid):
"""
Print the given grid in a readable form
:param grid: input grid/world to be printed
"""
print('[')
for row in grid:
print('\t', row, ',')
print(']')
| 5,337,492
|
def get_stats_yaml():
"""grab national stats yaml from scorecard repo"""
nat_dict = {}
try:
nat_yaml = requests.get(COLLEGE_CHOICE_NATIONAL_DATA_URL)
if nat_yaml.ok and nat_yaml.text:
nat_dict = yaml.safe_load(nat_yaml.text)
except AttributeError: # If response.text has no value
return nat_dict
except requests.exceptions.ConnectionError: # If requests can't connect
return nat_dict
else:
return nat_dict
| 5,337,493
|
def signal_interpolate(x_values, y_values, desired_length, method="quadratic"):
"""Interpolate a signal.
Interpolate (fills the values between data points) a signal using different methods.
Parameters
----------
x_values : list, array or Series
The samples corresponding to the values to be interpolated.
y_values : list, array or Series
The values to be interpolated.
desired_length : int
The amount of samples over which to interpolate the y_values.
method : str
Method of interpolation. Can be 'linear', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic', 'previous' or 'next'. 'zero', 'slinear',
'quadratic' and 'cubic' refer to a spline interpolation of zeroth,
first, second or third order; 'previous' and 'next' simply return the
previous or next value of the point) or as an integer specifying the
order of the spline interpolator to use.
Returns
-------
array
Vector of interpolated samples.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> import neurokit2 as nk
>>> import matplotlib.pyplot as plt
>>>
>>> signal = np.cos(np.linspace(start=0, stop=20, num=10))
>>> zero = nk.signal_interpolate(signal, desired_length=1000, method="zero")
>>> linear = nk.signal_interpolate(signal, desired_length=1000, method="linear")
>>> quadratic = nk.signal_interpolate(signal, desired_length=1000, method="quadratic")
>>> cubic = nk.signal_interpolate(signal, desired_length=1000, method="cubic")
>>> nearest = nk.signal_interpolate(signal, desired_length=1000, method="nearest")
>>>
>>> plt.plot(np.linspace(0, 1, num=len(zero)), zero, 'y',
np.linspace(0, 1, num=len(linear)), linear, 'r',
np.linspace(0, 1, num=len(quadratic)), quadratic, 'b',
np.linspace(0, 1, num=len(cubic)), cubic, 'g',
np.linspace(0, 1, num=len(nearest)), nearest, 'm',
np.linspace(0, 1, num=len(signal)), signal, 'ko')
>>>
>>> # Use x-axis end new x-axis
>>> x_axis = np.linspace(start=10, stop=30, num=10)
>>> signal = np.cos(x_axis)
>>> new_x = np.linspace(start=0, stop=40, num=1000)
>>> interpolated = nk.signal_interpolate(signal,
desired_length=1000,
x_axis=x_axis,
new_x=new_x)
>>> plt.plot(new_x, interpolated, '-',
x, signal, 'o')
"""
# Sanity checks
if len(x_values) != len(y_values):
raise ValueError("NeuroKit error: signal_interpolate(): x_values and y_values "
"must be of the same length.")
if desired_length is None or len(x_values) == desired_length:
return y_values
# Create interpolation function
interpolation_function = scipy.interpolate.interp1d(x_values,
y_values,
kind=method,
bounds_error=False,
fill_value=([y_values[0]], [y_values[-1]]))
new_x = np.linspace(x_values[0], x_values[-1], desired_length)
interpolated = interpolation_function(new_x)
return interpolated
| 5,337,494
|
def check_module(feature):
"""
Checks if a module is available.
:param feature: The module to check for.
:returns: ``True`` if available, ``False`` otherwise.
:raises ValueError: If the module is not defined in this version of Pillow.
"""
if not (feature in modules):
raise ValueError(f"Unknown module {feature}")
module, ver = modules[feature]
try:
__import__(module)
return True
except ImportError:
return False
| 5,337,495
|
def get_original(N: int = 64) -> np.ndarray:
"""radontea logo base image"""
x = np.linspace(-N / 2, N / 2, N, endpoint=False)
X = x.reshape(1, -1)
Y = x.reshape(-1, 1)
z = logo(X, Y, N)
return np.array((z) * 255, dtype=np.uint16)
| 5,337,496
|
def rebuild_ft_billing_for_day(service_id, day):
"""
Rebuild the data in ft_billing for the given service_id and date
"""
def rebuild_ft_data(process_day, service):
deleted_rows = delete_billing_data_for_service_for_day(process_day, service)
current_app.logger.info("deleted {} existing billing rows for {} on {}".format(deleted_rows, service, process_day))
transit_data = fetch_billing_data_for_day(process_day=process_day, service_id=service)
# transit_data = every row that should exist
for data in transit_data:
# upsert existing rows
update_fact_billing(data, process_day)
current_app.logger.info("added/updated {} billing rows for {} on {}".format(len(transit_data), service, process_day))
if service_id:
# confirm the service exists
dao_fetch_service_by_id(service_id)
rebuild_ft_data(day, service_id)
else:
services = get_service_ids_that_need_billing_populated(
get_local_timezone_midnight_in_utc(day),
get_local_timezone_midnight_in_utc(day + timedelta(days=1)),
)
for row in services:
rebuild_ft_data(day, row.service_id)
| 5,337,497
|
def get_sql_update_by_ids(table: str, columns: List[str], ids_length: int):
"""
获取添加数据的字符串
:param table:
:param columns:
:param ids_length:
:return:
"""
# 校验数据
if not table:
raise ParamError(f"table 参数错误:table={table}")
if not columns or not isinstance(columns, List):
raise ParamError(f"columns 参数错误:columns={columns}")
if not ids_length or not isinstance(ids_length, int):
raise ParamError(f"ids_length 参数错误:ids_length={ids_length}")
# 准备参数
kvs = [f"{columns[i]}=%s" for i in range(len(columns))]
kvs_str = ", ".join(kvs)
ids = ["%s" for _ in range(ids_length)]
ids_str = ", ".join(ids)
# 准备sql
s = f"update {table} set {kvs_str} where id in ({ids_str});"
return s
| 5,337,498
|
def extract_roi(
input_img,
masks_location,
mask_pattern,
cropped_input,
roi_list,
uncrop_output,
):
"""Extracts regions of interest defined by masks
This function extracts regions of interest from preprocessed nifti images.
The regions are defined using binary masks that must be located in the CAPS
at `masks/tpl-<template>`.
Args:
input_img: nifti format MRI image.
masks_location: path to the masks
mask_pattern: pattern to identify the masks
cropped_input: if the input is cropped or not (contains desc-Crop)
roi_list: list of the names of the regions that will be extracted.
uncrop_output: if True, the final region is not cropped.
Returns:
file: multiple tensors saved on the disk, suffixes corresponds to
indexes of the patches. Same location than input file.
"""
import os
import nibabel as nib
import numpy as np
import torch
image_array = nib.load(input_img).get_fdata(dtype="float32")
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
input_img_filename = os.path.basename(input_img)
sub_ses_prefix = "_".join(input_img_filename.split("_")[0:3:])
if not sub_ses_prefix.endswith("_T1w"):
sub_ses_prefix = "_".join(input_img_filename.split("_")[0:2:])
input_suffix = input_img_filename.split("_")[-1].split(".")[0]
output_roi = []
for index_roi, roi in enumerate(roi_list):
# read mask
mask_path, _ = find_mask_path(masks_location, roi, mask_pattern, cropped_input)
mask_np = nib.load(mask_path).get_fdata()
if len(mask_np.shape) == 3:
mask_np = mask_np[np.newaxis, :]
extracted_roi = image_tensor * mask_np
if not uncrop_output:
extracted_roi = extracted_roi[
np.ix_(
mask_np.any((1, 2, 3)),
mask_np.any((0, 2, 3)),
mask_np.any((0, 1, 3)),
mask_np.any((0, 1, 2)),
)
]
extracted_roi = extracted_roi.float()
# save into .pt format
output_pattern = compute_output_pattern(mask_path, not uncrop_output)
output_roi.append(
(
f"{sub_ses_prefix}_{output_pattern}_{input_suffix}.pt",
extracted_roi.clone(),
)
)
return output_roi
| 5,337,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.