content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def test_Highest_Greedy_Score__Single_Placable_Disk(score, max_score):
"""Function highest_greedy_score: Single disk."""
max_score.value += 6
try:
set_up()
test_board_6_copy = Board.get_board_copy(test_board_6)
disks_to_drop = [visible_disk_value_2_B]
disks_to_drop_copy = list.copy(disks_to_drop)
highest_score, columns = \
Drop7.highest_greedy_score(test_board_6, disks_to_drop)
assert highest_score == 6
assert columns == (6,)
assert len(disks_to_drop) == 0
actual_score = Drop7.play(test_board_6_copy, disks_to_drop_copy, columns)
assert actual_score == highest_score
assert are_equal_boards(test_board_6,test_board_6_copy)
score.value += 6
except:
pass
| 5,338,400
|
def __loadConfig():
"""
Load an .ini based config file.
"""
global __CONFIG, __DEFAULT_CONFIG
if not os.path.exists(__USER_CONFIG_FILENAME):
# if the user has no config,
# copy the default one to the expected location
shutil.copy(__DEFAULT_CONFIG_FILENAME, __USER_CONFIG_FILENAME)
__CONFIG = configparser.ConfigParser()
__CONFIG.read(__USER_CONFIG_FILENAME, encoding='utf8')
# the default config is also always loaded to try
# and fall back to its values
# if sections or keys are missing from the user config
__DEFAULT_CONFIG = configparser.ConfigParser()
__DEFAULT_CONFIG.read(__DEFAULT_CONFIG_FILENAME, encoding='utf8')
| 5,338,401
|
def model_density_of_sky_fibers(margin=1.5):
"""Use desihub products to find required density of sky fibers for DESI.
Parameters
----------
margin : :class:`float`, optional, defaults to 1.5
Factor of extra sky positions to generate. So, for margin=10, 10x as
many sky positions as the default requirements will be generated.
Returns
-------
:class:`float`
The density of sky fibers to generate in per sq. deg.
"""
from desimodel.io import load_fiberpos, load_target_info
fracsky = load_target_info()["frac_sky"]
nfibers = len(load_fiberpos())
nskies = margin*fracsky*nfibers
return nskies
| 5,338,402
|
def edit_train_mp3(address_mp3,
time_first_slice,
time_last_slice,
num_repeats,
break_time,
address_folder,
name_new_mp3
):
"""
Функция создает зацикленный тренировочный mp3 файл
:param address_mp3: Путь к файлу
:param time_first_slice: Время начала зацикливания
:param time_last_slice: Время окончания зацикливания
:param num_repeats: Количество повторов
:param break_time: Общее время для переходов затухания и набора громкости
:param address_folder: Папка в которой лежит файл
:param name_new_mp3: Имя для нового mp3 файла
"""
song = AudioSegment.from_mp3(address_mp3)
slice_segment = song[time_first_slice:time_last_slice] # Определяем вырезаемый сегмент и присваиваем переменной
fade_slice_segment = slice_segment.fade_in(int(break_time/2)).fade_out(int(break_time/2))
begin_song = song[:time_last_slice] # Начало трека до момента с которого будет начинаться повтор
training_song = begin_song + (fade_slice_segment * num_repeats) # Создаем тренировочный трек
training_song.export(f'{address_folder+"//"+name_new_mp3}', format="mp3")
| 5,338,403
|
def op_finish_word_definition(c: AF_Continuation) -> None:
"""
WordDefinition(Op_name), OutputTypeSignature(TypeSignature), CodeCompile(Operation')
-> (empty)
"""
op_finish_word_compilation(c)
c.stack.pop()
| 5,338,404
|
def dice_counts(dice):
"""Make a dictionary of how many of each value are in the dice """
return {x: dice.count(x) for x in range(1, 7)}
| 5,338,405
|
def SynthesizeData(phase, total_gen):
""" Phase ranges from 0 to 24 with increments of 0.2. """
x_list = [phase]
y_list = []
while len(x_list) < total_gen or len(y_list) < total_gen:
x = x_list[-1]
y = sine_function(x=x, amp=amp, per=per, shift_h=shift_h, shift_v=shift_v)
x_list.append(y+x)
y_list.append(y)
x_list = x_list[:-1]
return x_list, y_list
| 5,338,406
|
def compare():
""" Eats two file names, returns a comparison of the two files.
Both files must be csv files containing
<a word>;<doc ID>;<pageNr>;<line ID>;<index of the word>
They may also contain lines with additional HTML code (if the
output format is html):
<h3>Document 1</h3>
"""
if request.method == 'GET':
return "html"
elif request.method == 'POST':
# Get the JSON payload of the request containing the two file names
payload = request.get_json()
if payload['format'] == "html":
# Read input data, i.e. both input files (CSV) from disk:
dumping_path = Path(current_app.config["CACHE_PATH"]) # \Dokumente\Synchronisation\Programmieren\Python\tutorial_flask_wsgi\instance\cache
filename1 = Path(dumping_path, payload['files'][0])
filename2 = Path(dumping_path, payload['files'][1])
o = openfile(filename1)
e = openfile(filename2)
balance_tokens(o, e)
data1 = prepare_for_diff(o)
data2 = prepare_for_diff(e)
# Use difflib to find the differences:
print("ANALYZER: searching for differences (with difflib) ...")
d = difflib.Differ()
delta = d.compare(data1, data2)
delta = [*delta] # convert generator to list
pairs = prepare_output(delta, o,e)
filtered = filter_false_positives(pairs)
html = export_to_html(filtered,
original_document=o[0]['document'],
censored_document=e[0]['document'])
dumping_path = Path(current_app.config["CACHE_PATH"])
timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
filename = f"differences,{o[0]['document']}_vs_{e[0]['document']},{timestamp}.html"
savename = Path(dumping_path, filename)
try:
with open(savename, "w", encoding="utf-8") as f:
f.write(html)
except:
pass
return html
elif payload['format'] == "raw_diff":
# Read input data, i.e. both input files (CSV) from disk:
dumping_path = Path(current_app.config["CACHE_PATH"])
filename1 = Path(dumping_path, payload['files'][0])
filename2 = Path(dumping_path, payload['files'][1])
o = openfile(filename1)
e = openfile(filename2)
balance_tokens(o, e)
data1 = prepare_for_diff(o)
data2 = prepare_for_diff(e)
# Use difflib to find the differences:
print("ANALYZER: searching for differences (with difflib) ...")
d = difflib.Differ()
delta = d.compare(data1, data2)
delta = [*delta] # convert generator to list
pairs = prepare_output(delta, o,e)
filtered = filter_false_positives(pairs)
output = serialize_diff_pairs(filtered)
output["original"]["docTitle"] = o[0]['document']
output["censored"]["docTitle"] = e[0]['document']
output["message"] = "Success! Use the censorship inspector to process the output."
print("ANALYZER: Done! Sending JSON to client.")
return jsonify(output)
elif payload['format'] == "TRACER":
""" The TRACER data is already formatted correctly in the TSV files.
The only thing we have to do here is to replace the "XX" place holders
at the beginning of every line with a two digit number representing
the no. of the document. """
dumping_path = Path(current_app.config["CACHE_PATH"])
output = []
docs = []
docnr = 10
for file in payload['files']:
infile = Path(dumping_path, file)
with open(infile, "r", encoding="utf-8") as f:
lines = f.readlines()
for idx, line in enumerate(lines):
output.append(f"{docnr}{line[2:]}")
if idx == 0: # get the document identifier of the first line
docs.append(line.split("\t")[-1].strip())
docnr += 1
timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
filename = f"tracer_{','.join([str(x) for x in docs])}_{timestamp}.txt"
savename = Path(dumping_path, filename)
print(f"ANALYZER: Trying to write {savename}")
try:
print("ANALYZER: Sucess!")
with open(savename, "w", encoding="utf-8") as f:
f.writelines(output)
return jsonify(message = f'Success! You can download the exported file under /download/{savename}',
links = [{'href': f'/download/{savename}',
'rel': 'download',
'type': 'GET'}]), 200
except:
print(f"ERROR: Analyzer: Could not write file {savename}")
return jsonify(message = f"ERROR: Analyzer: Could not write file {savename}",
links = [{'href': "error",
'rel': 'download',
'type': 'GET'}]), 500
| 5,338,407
|
def draw_transform(dim_steps, filetype="png", dpi=150):
"""create image from variable transormation steps
Args:
dim_steps(OrderedDict): dimension -> steps
* each element contains steps for a dimension
* dimensions are all dimensions in source and target domain
* each step is (from_level, to_level, action, (weight_level, weight_var))
filetype(str): "png" or "svg"
dpi(int): resolution for png image
"""
dot_cmd = get_dot_cmd(filetype=filetype, dpi=dpi)
dot_components = get_components(dim_steps)
dot_str = get_dot_digraph_str(dot_components)
image_bytes = get_image_bytes(dot_cmd, dot_str)
return image_bytes
| 5,338,408
|
def get_ei_border_ratio_from_exon_id(exon_id, regid2nc_dic,
exid2eibrs_dic=None,
ratio_mode=1,
last_exon_dic=None,
last_exon_ratio=2.5,
min_reg_cov=5,
min_reg_mode=1):
"""
Ratio is average of ratios at both exon ends (if embedded in introns),
or if first / last exon, only one ratio.
Assign -1, if only exon, or if both exon and intron border region read
count below min_reg_cov.
min_reg_cov:
Minimum region read coverage. If both exon and intron border region
have < min_reg_cov, return ratio of -1.
regid2nc_dic:
Contains exon/intron/border region ID -> [norm_cov, coverage, reg_len]
exid2eibrs_dic:
Exon ID to all EIB ratios list mapping.
ratio_mode:
How to calculate the returned EIBR ratio.
1: Return the exon-intro border ratio with the higher coverage.
2: Average the two exon-intron border ratios of the exon,
if both have more than > min_reg_cov
last_exon_dic:
Last transcript exon ID -> polarity
Used for prioritizing the inner exon intron border for multi-exon
transcript last exons. Only effective for ratio_mode 1.
last_exon_ratio:
If the outer last exon read count is higher last_exon_ratio, prioritize
the outter border again, i.e. select the outter ratio
for EIB ratio calculation.
>>> regid2nc_dic = {"t1_e1_ebe2" : [0.5, 10, 20], "t1_e1_ebi2" : [0.2, 4, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e1", regid2nc_dic)
(2.5, 'first_exon')
>>> get_ei_border_ratio_from_exon_id("t2_e1", regid2nc_dic)
(-1, 'single_exon')
>>> regid2nc_dic = {"t1_e2_ebe1" : [0.5, 10, 20], "t1_e2_ebi1" : [0.25, 5, 20], "t1_e2_ebe2" : [1.0, 20, 20], "t1_e2_ebi2" : [0.25, 5, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e2", regid2nc_dic, ratio_mode=2)
(3.0, 'inner_exon')
>>> regid2nc_dic = {"t1_e2_ebe1" : [0.5, 10, 20], "t1_e2_ebi1" : [0.25, 5, 20], "t1_e2_ebe2" : [0.1, 2, 20], "t1_e2_ebi2" : [0.1, 2, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e2", regid2nc_dic, ratio_mode=2)
(2.0, 'inner_exon_ds_lc')
>>> regid2nc_dic = {"t1_e2_ebe1" : [0.1, 2, 20], "t1_e2_ebi1" : [0.1, 2, 20], "t1_e2_ebe2" : [0.5, 10, 20], "t1_e2_ebi2" : [0.25, 5, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e2", regid2nc_dic, ratio_mode=2)
(2.0, 'inner_exon_us_lc')
>>> regid2nc_dic = {"t1_e1_ebe2" : [0.5, 10, 20], "t1_e1_ebi2" : [0.0, 0, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e1", regid2nc_dic)
(10, 'first_exon')
>>> regid2nc_dic = {"t1_e1_ebe2" : [0.0, 0, 20], "t1_e1_ebi2" : [0.5, 10, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e1", regid2nc_dic)
(0.0, 'first_exon')
>>> regid2nc_dic = {"t1_e2_ebe1" : [0.5, 10, 20], "t1_e2_ebi1" : [0.25, 5, 20], "t1_e2_ebe2" : [1.0, 20, 20], "t1_e2_ebi2" : [0.25, 5, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e2", regid2nc_dic, ratio_mode=1)
(4.0, 'inner_exon')
"""
exb_id_e1 = exon_id + "_ebe1"
exb_id_i1 = exon_id + "_ebi1"
exb_id_e2 = exon_id + "_ebe2"
exb_id_i2 = exon_id + "_ebi2"
# For single-exon transcripts.
if exb_id_e1 not in regid2nc_dic and exb_id_e2 not in regid2nc_dic:
if exid2eibrs_dic is not None:
assert exon_id not in exid2eibrs_dic, "exon ID %s already stored in exid2eibrs_dic"
exid2eibrs_dic[exon_id] = [-1]
return -1, "single_exon"
# Last exon.
if exb_id_e1 in regid2nc_dic and exb_id_e2 not in regid2nc_dic:
assert exb_id_i1 in regid2nc_dic, "exb_id_e1 %s in regid2nc_dic, but not exb_id_i1 %s" %(exb_id_e1, exb_id_i1)
ratio1 = -1
sel_crit = "last_exon"
if regid2nc_dic[exb_id_e1][1] >= min_reg_cov or regid2nc_dic[exb_id_i1][1] >= min_reg_cov:
if regid2nc_dic[exb_id_i1][0]:
ratio1 = regid2nc_dic[exb_id_e1][0] / regid2nc_dic[exb_id_i1][0]
else:
ratio1 = regid2nc_dic[exb_id_e1][1]
if exid2eibrs_dic is not None:
assert exon_id not in exid2eibrs_dic, "exon ID %s already stored in exid2eibrs_dic"
exid2eibrs_dic[exon_id] = [ratio1]
return ratio1, sel_crit
# First exon.
if exb_id_e1 not in regid2nc_dic and exb_id_e2 in regid2nc_dic:
assert exb_id_i2 in regid2nc_dic, "exb_id_e2 %s in regid2nc_dic, but not exb_id_i2 %s" %(exb_id_e2, exb_id_i2)
ratio2 = -1
sel_crit = "first_exon"
if regid2nc_dic[exb_id_e2][1] >= min_reg_cov or regid2nc_dic[exb_id_i2][1] >= min_reg_cov:
if regid2nc_dic[exb_id_i2][0]:
ratio2 = regid2nc_dic[exb_id_e2][0] / regid2nc_dic[exb_id_i2][0]
else:
ratio2 = regid2nc_dic[exb_id_e2][1]
if exid2eibrs_dic is not None:
assert exon_id not in exid2eibrs_dic, "exon ID %s already stored in exid2eibrs_dic"
exid2eibrs_dic[exon_id] = [ratio2]
return ratio2, sel_crit
# In-between exons.
if exb_id_e1 in regid2nc_dic and exb_id_e2 in regid2nc_dic:
assert exb_id_i1 in regid2nc_dic, "exb_id_e1 %s in regid2nc_dic, but not exb_id_i1 %s" %(exb_id_e1, exb_id_i1)
assert exb_id_i2 in regid2nc_dic, "exb_id_e2 %s in regid2nc_dic, but not exb_id_i2 %s" %(exb_id_e2, exb_id_i2)
ratio1 = -1
ratio2 = -1
# if exon_id == "ENST00000366553.3_e2":
# print(exon_id)
# print("regid2nc_dic[exb_id_i1][1]:", regid2nc_dic[exb_id_i1][1])
# print("regid2nc_dic[exb_id_e1][1]:", regid2nc_dic[exb_id_e1][1])
# print("regid2nc_dic[exb_id_e2][1]:", regid2nc_dic[exb_id_e2][1])
# print("regid2nc_dic[exb_id_i2][1]:", regid2nc_dic[exb_id_i2][1])
# print("regid2nc_dic[exb_id_i1][0]:", regid2nc_dic[exb_id_i1][0])
# print("regid2nc_dic[exb_id_e1][0]:", regid2nc_dic[exb_id_e1][0])
# print("regid2nc_dic[exb_id_e2][0]:", regid2nc_dic[exb_id_e2][0])
# print("regid2nc_dic[exb_id_i2][0]:", regid2nc_dic[exb_id_i2][0])
sel_crit = "inner_exon"
if regid2nc_dic[exb_id_e1][1] >= min_reg_cov or regid2nc_dic[exb_id_i1][1] >= min_reg_cov:
if regid2nc_dic[exb_id_i1][0]:
ratio1 = regid2nc_dic[exb_id_e1][0] / regid2nc_dic[exb_id_i1][0]
else:
ratio1 = regid2nc_dic[exb_id_e1][1]
else:
sel_crit += "_us_lc"
if regid2nc_dic[exb_id_e2][1] >= min_reg_cov or regid2nc_dic[exb_id_i2][1] >= min_reg_cov:
if regid2nc_dic[exb_id_i2][0]:
ratio2 = regid2nc_dic[exb_id_e2][0] / regid2nc_dic[exb_id_i2][0]
else:
ratio2 = regid2nc_dic[exb_id_e2][1]
else:
sel_crit += "_ds_lc"
if exid2eibrs_dic is not None:
assert exon_id not in exid2eibrs_dic, "exon ID %s already stored in exid2eibrs_dic" %(exon_id)
exid2eibrs_dic[exon_id] = [ratio1, ratio2]
if ratio1 == -1 and ratio2 != -1:
avg_ratio = ratio2
elif ratio1 != -1 and ratio2 == -1:
avg_ratio = ratio1
elif ratio1 == -1 and ratio2 == -1:
avg_ratio = -1
else:
if ratio_mode == 1:
cov_b1 = regid2nc_dic[exb_id_i1][0] + regid2nc_dic[exb_id_e1][0]
cov_b2 = regid2nc_dic[exb_id_i2][0] + regid2nc_dic[exb_id_e2][0]
if cov_b1 > cov_b2:
avg_ratio = ratio1
else:
avg_ratio = ratio2
if last_exon_dic is not None:
if exon_id in last_exon_dic:
sel_crit = "last_exon"
exon_pol = last_exon_dic[exon_id]
# Define inner borders.
cov_inner = cov_b1
ratio_inner = ratio1
cov_outer = cov_b2
ratio_outer = ratio2
if exon_pol == "-":
cov_inner = cov_b2
ratio_inner = ratio2
cov_outer = cov_b1
ratio_outer = ratio1
if cov_inner*last_exon_ratio >= cov_outer:
avg_ratio = ratio_inner
sel_crit += "_inner"
else:
avg_ratio = ratio_outer
sel_crit += "_outer"
elif ratio_mode == 2:
avg_ratio = statistics.mean([ratio1, ratio2])
else:
assert False, "invalid ratio_mode (%i)" %(ratio_mode)
return avg_ratio, sel_crit
assert False, "invalid get_ei_border_ratio_from_exon_id()"
| 5,338,409
|
def test_sysparm_input_display_value(mocker, requests_mock):
"""Unit test
Given
- create_record_command function
- command args, including input_display_value
- command raw response
When
- mock the requests url destination.
Then
- run the create command using the Client
Validate that the sysparm_input_display_value parameter has the correct value
"""
client = Client(server_url='https://server_url.com/', sc_server_url='sc_server_url', username='username',
password='password', verify=False, fetch_time='fetch_time',
sysparm_query='sysparm_query', sysparm_limit=10, timestamp_field='opened_at',
ticket_type='incident', get_attachments=False, incident_name='description')
mocker.patch.object(demisto, 'args', return_value={'input_display_value': 'true',
'table_name': "alm_asset",
'fields': "asset_tag=P4325434;display_name=my_test_record"
}
)
requests_mock.post('https://server_url.com/table/alm_asset?sysparm_input_display_value=True', json={})
# will raise a requests_mock.exceptions.NoMockAddress if the url address will not be as given in the requests_mock
create_record_command(client, demisto.args())
assert requests_mock.request_history[0].method == 'POST'
mocker.patch.object(demisto, 'args', return_value={'input_display_value': 'false',
'table_name': "alm_asset",
'fields': "asset_tag=P4325434;display_name=my_test_record"
}
)
requests_mock.post('https://server_url.com/table/alm_asset?sysparm_input_display_value=False', json={})
# will raise a requests_mock.exceptions.NoMockAddress if the url address will not be as given in the requests_mock
create_record_command(client, demisto.args())
assert requests_mock.request_history[1].method == 'POST'
| 5,338,410
|
def events(*_events):
""" A class decorator. Adds auxiliary methods for callback based event
notification of multiple watchers.
"""
def add_events(cls):
# Maintain total event list of both inherited events and events added
# using nested decorations.
try:
all_events = cls.events
except AttributeError:
cls.events = _events
else:
cls.events = all_events + _events
for e in _events:
helpers = {}
exec("""
@lazy
def {event}_handlers(self):
return []
def {event}(self, *a, **kw):
for h in list(self.{handlers}):
h(*a, **kw)
def watch_{event}(self, cb):
self.{handlers}.append(cb)
def unwatch_{event}(self, cb):
self.{handlers}.remove(cb)
""".format(event = e, handlers = e + "_handlers"),
globals(), helpers
)
for n, h in helpers.items():
setattr(cls, n, h)
return cls
return add_events
| 5,338,411
|
def test_losers_advantage(client):
"""
user with the lose_count of 3 and others with that of 0 attempt to
apply a lottery
test loser is more likely to win
target_url: /lotteries/<id>/draw
"""
users_num = 12
idx = 1
win_count = {i: 0 for i in range(1, users_num + 1)} # user.id -> count
with client.application.app_context():
target_lottery = Lottery.query.get(idx)
index = target_lottery.index
users = User.query.order_by(User.id).all()[:users_num]
users[0].lose_count = 3
user0_id = users[0].id
add_db(users2application(users, target_lottery))
token = get_token(client, admin)
resp = draw(client, token, idx, index)
for winner_json in resp.get_json():
winner_id = winner_json['id']
win_count[winner_id] += 1
# display info when this test fails
print("final results of applications (1's lose_count == 3)")
print(win_count)
assert win_count[user0_id] > 0
| 5,338,412
|
def print_column_vertically(target_column_name, dataset):
"""
Prints each variable of a column to a new line in console.
:param target_column_name: (str) Header of the column to be printed
:param dataset: dataset to column is in
:returns: Strings printed to console
:example:
>>> long_data = [["date"], ['2017/03/30 12:20:57 AM UTC+0200'], ['2017/03/31 1:38:41 AM UTC+0200'], ['2017/04/01 12:00:27 AM UTC+0200']]
>>> print_column_vertically("date", long_data)
2017/03/30 12:20:57 AM UTC+0200
2017/03/31 1:38:41 AM UTC+0200
2017/04/01 12:00:27 AM UTC+0200
"""
#############################################################################################################
from preprocessor.legacy_functions.select_column import select_column
selected_column = select_column(target_column_name, dataset)
for i, value in enumerate(selected_column):
print(value)
| 5,338,413
|
def _find_nearest(array, value):
"""Find the nearest numerical match to value in an array.
Args:
array (np.ndarray): An array of numbers to match with.
value (float): Single value to find an entry in array that is close.
Returns:
np.array: The entry in array that is closest to value.
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
| 5,338,414
|
def download_document(url):
"""Downloads document using BeautifulSoup, extracts the subject and all
text stored in paragraph tags
"""
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
title = soup.find('title').get_text()
document = ' '.join([p.get_text() for p in soup.find_all('p')])
return document
| 5,338,415
|
def rz_gate(phi: float = 0):
"""Functional for the single-qubit Pauli-Z rotation-gate.
Parameters
----------
phi : float
Rotation angle (in radians)
Returns
-------
rz : (2, 2) np.ndarray
"""
arg = 1j * phi / 2
return np.array([[np.exp(-arg), 0], [0, np.exp(arg)]])
| 5,338,416
|
def test_append_to_history(qtbot, historylog):
"""
Test the append_to_history method.
Test adding text to a history file. Also test the go_to_eof config
option for positioning the cursor.
"""
hl = historylog
hw = historylog.get_widget()
# Toggle to move to the end of the file after appending.
hw.set_conf('go_to_eof', True)
# Force cursor to the beginning of the file.
text1 = 'import re\n'
path1 = create_file('test_history.py', text1)
hw.add_history(path1)
hw.editors[0].set_cursor_position('sof')
hw.append_to_history(path1, 'foo = "bar"\n')
assert hw.editors[0].toPlainText() == text1 + 'foo = "bar"\n'
assert hw.tabwidget.currentIndex() == 0
# Cursor moved to end.
assert hw.editors[0].is_cursor_at_end()
assert not hw.editors[0].linenumberarea.isVisible()
# Toggle to not move cursor after appending.
hw.set_conf('go_to_eof', False)
# Force cursor to the beginning of the file.
hw.editors[0].set_cursor_position('sof')
hw.append_to_history(path1, 'a = r"[a-z]"\n')
assert hw.editors[0].toPlainText() == ('import re\n'
'foo = "bar"\n'
'a = r"[a-z]"\n')
# Cursor not at end.
assert not hw.editors[0].is_cursor_at_end()
| 5,338,417
|
def dict_to_kvp(dictionary: dict) -> List[tuple]:
"""
Converts a dictionary to a list of tuples where each tuple has the key and value
of each dictionary item
:param dictionary: Dictionary to convert
:return: List of Key-Value Pairs
"""
return [(k, v) for k, v in dictionary.items()]
| 5,338,418
|
def convert_and_remove_punctuation(text):
"""
remove punctuation that are not allowed, e.g. / \
convert Chinese punctuation into English punctuation, e.g. from「 to "
"""
# removal
text = text.replace("\\", "")
text = text.replace("\\", "")
text = text.replace("[", "")
text = text.replace("]", "")
text = text.replace("【", "")
text = text.replace("】", "")
text = text.replace("{", "")
text = text.replace("}", "")
# conversion
text = text.replace(u"\u201C", "\"")
text = text.replace(u"\u201D", "\"")
text = text.replace(u"\u2018", "'")
text = text.replace(u"\u2019", "'")
text = text.replace("「", "\"")
text = text.replace("」", "\"")
text = text.replace("『", "\"")
text = text.replace("』", "\"")
text = text.replace("quot;", "\"")
return text
| 5,338,419
|
def create_random_context(dialog,rng,minimum_context_length=2,max_context_length=20):
"""
Samples random context from a dialog. Contexts are uniformly sampled from the whole dialog.
:param dialog:
:param rng:
:return: context, index of next utterance that follows the context
"""
# sample dialog context
#context_turns = rng.randint(minimum_context_length,len(dialog)-1)
max_len = min(max_context_length, len(dialog)) - 2
if max_len <= minimum_context_length:
context_turns = max_len
else:
context_turns = rng.randint(minimum_context_length,max_len)
# create string
return dialog_turns_to_string(dialog[:context_turns]),context_turns
| 5,338,420
|
def get_video_collection_items(**kwargs):
"""
Get the contents of video collections
"""
get(
url="/v2/videos/collections/" + kwargs["id"] + "/items",
params=kwargs,
json_data=None,
)
| 5,338,421
|
def dev_test_new_schema_version(dbname, sqldb_dpath, sqldb_fname,
version_current, version_next=None):
"""
hacky function to ensure that only developer sees the development schema
and only on test databases
"""
TESTING_NEW_SQL_VERSION = version_current != version_next
if TESTING_NEW_SQL_VERSION:
print('[sql] ATTEMPTING TO TEST NEW SQLDB VERSION')
devdb_list = ['PZ_MTEST', 'testdb1', 'testdb0', 'testdb2',
'testdb_dst2', 'emptydatabase']
testing_newschmea = ut.is_developer() and dbname in devdb_list
#testing_newschmea = False
#ut.is_developer() and ibs.get_dbname() in ['PZ_MTEST', 'testdb1']
if testing_newschmea:
# Set to true until the schema module is good then continue tests
# with this set to false
testing_force_fresh = True or ut.get_argflag('--force-fresh')
# Work on a fresh schema copy when developing
dev_sqldb_fname = ut.augpath(sqldb_fname, '_develop_schema')
sqldb_fpath = join(sqldb_dpath, sqldb_fname)
dev_sqldb_fpath = join(sqldb_dpath, dev_sqldb_fname)
ut.copy(sqldb_fpath, dev_sqldb_fpath, overwrite=testing_force_fresh)
# Set testing schema version
#ibs.db_version_expected = '1.3.6'
print('[sql] TESTING NEW SQLDB VERSION: %r' % (version_next,))
#print('[sql] ... pass --force-fresh to reload any changes')
return version_next, dev_sqldb_fname
else:
print('[ibs] NOT TESTING')
return version_current, sqldb_fname
| 5,338,422
|
def _get_distance_euclidian(row1: np.array, row2: np.array):
"""
_get_distance
returns the distance between 2 rows
(euclidian distance between vectors)
takes into account all columns of data given
"""
distance = 0.
for i, _ in enumerate(row1):
distance += (row1[i] - row2[i]) ** 2
return np.sqrt(distance)
| 5,338,423
|
def _get_ip_from_response(response):
"""
Filter ipv4 addresses from string.
Parameters
----------
response: str
String with ipv4 addresses.
Returns
-------
list: list with ip4 addresses.
"""
ip = re.findall(r'\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b', response)
return ip
| 5,338,424
|
def create_processor(
options: options_pb2.ConvertorOptions,
theorem_database: Optional[proof_assistant_pb2.TheoremDatabase] = None,
tactics: Optional[List[deephol_pb2.Tactic]] = None) -> ProofLogToTFExample:
"""Factory function for ProofLogToTFExample."""
if theorem_database and options.theorem_database_path:
raise ValueError(
'Both thereom database as well as a path to load it from file '
'provided. Only provide one.')
if not theorem_database:
theorem_database = io_util.load_theorem_database_from_file(
str(options.theorem_database_path))
if tactics and options.tactics_path:
raise ValueError('Both tactics as well as a path to load it from '
'provided. Only provide one.')
if not tactics:
tactics = io_util.load_tactics_from_file(str(options.tactics_path), None)
tactics_name_id_map = {tactic.name: tactic.id for tactic in tactics}
if options.replacements_hack:
logging.warning('Replacments hack is enabled.')
tactics_name_id_map.update({
'GEN_TAC': 8,
'MESON_TAC': 11,
'CHOOSE_TAC': 34,
})
if options.format != options_pb2.ConvertorOptions.HOLPARAM:
raise ValueError('Unknown options_pb2.ConvertorOptions.TFExampleFormat.')
return ProofLogToTFExample(tactics_name_id_map, theorem_database, options)
| 5,338,425
|
def adsAddRoute(net_id, ip_address):
"""
:summary: Establish a new route in the AMS Router.
:param pyads.structs.SAmsNetId net_id: net id of routing endpoint
:param str ip_address: ip address of the routing endpoint
"""
add_route = _adsDLL.AdsAddRoute
add_route.restype = ctypes.c_long
# Convert ip address to bytes (PY3) and get pointer.
ip_address = ctypes.c_char_p(ip_address.encode('utf-8'))
error_code = add_route(net_id, ip_address)
if error_code:
raise ADSError(error_code)
| 5,338,426
|
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
| 5,338,427
|
def secrets():
"""interact with secret packages and scripts only"""
if not os.path.exists(DOTSECRETS_PATH):
print(f"{DOTSECRETS_PATH} does not exist", file=sys.stderr)
sys.exit(1)
| 5,338,428
|
def login_invalid(request, error_type):
""" Displays the index with an error message. """
# TODO - encode authentification error message in URI
try:
message = INVALID_LOGIN_MESSAGE[error_type]
except KeyError:
message = "Erreur inconnue"
context = {'form': LoginForm(), 'message': message }
return render(request, 'index.htm', context)
| 5,338,429
|
def lsh(B_BANDS, docIdList, sig):
""" Applies the LSH algorithm. This function first divides the signature matrix into bands and hashes each column onto buckets.
:param B_BANDS: Number of bands in signature matrix
:param docIdList: List of document ids
:param sig: signature matrix
:return: List of document to its hash along with the buckets
"""
numHash = number_of_hash
bands = getbestb(threshold,number_of_hash)
rows = numHash / bands
d = 1681
# Array of dictionaries, each dictionary is for each band which will hold buckets for hashed vectors in that band
buckets = np.full(bands, {})
# Mapping from docid to h to find the buckets in which document with docid was hashed
docth = np.zeros((d, bands), dtype=int) # doc to hash
for i in range(bands):
for j in range(d):
low = int(i*rows) # First row in a band
high = min(int((i+1)*rows), numHash)# Last row in current band
l = []
for x in range(low, high):
l.append(sig[x, j]) # Append each row into l
h = int(hash(tuple(l))) % (d+1)
try:
buckets[i][h].append(j) # If a bucket corresponds to this hash value append this document into it
except:
buckets[i][h] = {j}
docth[j][i] = h
# print(docth)
return docth, buckets
| 5,338,430
|
def langevin_coefficients(
temperature,
dt,
friction,
masses):
"""
Compute coefficients for langevin dynamics
Parameters
----------
temperature: float
units of Kelvin
dt: float
units of picoseconds
friction: float
frequency in picoseconds
masses: array
mass of each atom in standard mass units
Returns
-------
tuple (ca, cb, cc)
ca is scalar, and cb and cc are n length arrays
that are used during langevin dynamics
"""
vscale = np.exp(-dt*friction)
if friction == 0:
fscale = dt
else:
fscale = (1-vscale)/friction
kT = BOLTZ * temperature
nscale = np.sqrt(kT*(1-vscale*vscale)) # noise scale
invMasses = 1.0/masses
sqrtInvMasses = np.sqrt(invMasses)
ca = vscale
cb = fscale*invMasses
cc = nscale*sqrtInvMasses
return ca, cb, cc
| 5,338,431
|
def waitpid_handle_exceptions(pid, deadline):
"""Wrapper around os.waitpid()/waitpid_with_timeout(), which waits until
either a child process exits or the deadline elapses, and retries if certain
exceptions occur.
Args:
pid: Process ID to wait for, or -1 to wait for any child process.
deadline: If non-zero, waiting stops when time.time() exceeds this value.
If zero, waiting stops when a child process exits.
Returns:
(pid, status): Same as for waitpid_with_timeout(). |pid| is non-zero if and
only if a child exited during the wait.
Raises:
Same as for os.waitpid(), except:
OSError with errno==EINTR causes the wait to be retried (this can happen,
for example, if this parent process receives SIGHUP).
OSError with errno==ECHILD means there are no child processes, and so
this function sleeps until |deadline|. If |deadline| is zero, this is an
error and the OSError exception is raised in this case.
"""
while True:
try:
if deadline == 0:
pid_result, status = os.waitpid(pid, 0)
else:
pid_result, status = waitpid_with_timeout(pid, deadline)
return (pid_result, status)
except OSError, e:
if e.errno == errno.EINTR:
continue
elif e.errno == errno.ECHILD:
now = time.time()
if deadline == 0:
# No time-limit and no child processes. This is treated as an error
# (see docstring).
raise
elif deadline > now:
time.sleep(deadline - now)
return (0, 0)
else:
# Anything else is an unexpected error.
raise
| 5,338,432
|
def tedor_ideal(t_mix, a, dist, t2, j_cc, obs='C13', pulsed='N15', vr=14000, return_t=False):
"""
Makes a SpinEvolution input file from template file "tedor_ideal_template", calls SpinEvolution, parses the output,
and applies phenomenological scaling and exponential relaxation.
The tedor_ideal is a calculation for interpreting and ultimately fitting ZF-TEDOR build-up curves
Parameters
----------
a: float, scaling factor
dist: float, distance between 13C-15N
t2: float, $T_2$ relaxations time
vr: float, MAS speed in HZ
j_cc: float, carbon carbon J coupling in Hz
return_t: bool, should the function return t=np.arange(0, n)*tr
t_mix: array of mixing experimental mixing times in ms
obs: string, the observed nucleus for the TEDOR experiment
pulsed: string, the nucleus with the REDOR pulses on it
Returns
-------
signal: array, len(t_mix)
or
time; signal: array, len(n); array, len(t_mix)
"""
# Build the simulation program from the template
sim_params = {'dist': dist, 'vr': vr / 1000, 'tr': 1 / vr, 'obs': obs, 'pulsed': pulsed}
with open('templates/tedor_ideal_template', 'r') as fid:
template = fid.read()
with open('templates/tedor_ideal_step', 'w') as fid:
fid.write(template.format(**sim_params))
cmd = ['/opt/spinev/spinev', 'templates/tedor_ideal_step']
# Run the simulation
subprocess.call(cmd)
# Parse the results
output_file = 'templates/tedor_ideal_step_re.dat'
results = np.loadtxt(output_file)
time = results[:, 0]
signal = results[:, 1]
# Apply phenomenological corrections
signal = a * signal * (np.cos(np.pi * (j_cc * 1000 / 2))**2) * np.exp(-time / t2)
time_points = []
signal_points = []
for i in t_mix:
ind = (np.where((np.trunc(time * 100) / 100) == i)[0][0])
time_points.append(time[ind])
signal_points.append(signal[ind])
if return_t:
return time_points, signal_points
else:
return signal_points
| 5,338,433
|
def preprocess(image, image_size):
"""
Preprocess
pre-process the image by to adaptive_treshold, perspectiv_transform,
erode, diletate, resize
:param image: image of display from cv2.read
:return out_image: output image after preprocessing
"""
# blurr
blurred = cv2.GaussianBlur(image, (5, 5), 1)
# perspective transformation
out_img = myPerspectiveTransformation(blurred)
# resize it
out_img = resizeSquareRespectAcpectRatio(
out_img,
image_size,
cv2.INTER_AREA
)
return out_img
| 5,338,434
|
def download(distributor: Distributor, max_try:int = 4) -> list[TrainInformation]|None:
"""Download train information from distributor.
If response status code was 500-599, this function retries up to max_try times.
Parameters
----------
distributor : Distributor
Distributor of infomation source.
max_try : int, optional
If response status code was 500-599, it retries up to this value.(default = 4)
Returns
-------
list[TrainInformation]|None
List of train information which is downloaded from web, or None if consumerKey is unset.
Raises
------
InvalidParameterError
HTTP status code was 400.
InvalidConsumerKeyError
HTTP status code was 401.
Forbidden
HTTP status code was 403.
NotFound
HTTP status code was 404.
OdptServerError
HTTP status code was 500-599.
UnknownHTTPError
HTTP status code was unexpected.
"""
if not distributor.is_valid():
return None
query = {}
query["acl:consumerKey"] = distributor.consumer_key
json_dict:list[TrainInformation_jsondict] = []
for try_count in range(max_try):
try:
with urllib.request.urlopen("%s?%s" % (distributor.URL, urllib.parse.urlencode(query))) as f:
json_dict = json.load(f)
break
except HTTPError as e:
match e.code:
case 400:
raise InvalidParameterError(e)
case 401:
raise InvalidConsumerKeyError(e)
case 403:
raise Forbidden(e)
case 404:
raise NotFound(e, distributor.value)
case code if 500 <= code < 600:
if try_count == max_try-1:
raise OdptServerError(e)
else:
time.sleep(1+try_count)
continue
case _:
raise UnknownHTTPError(e)
except Exception as e:
if try_count == max_try-1:
raise
else:
time.sleep(1+try_count)
continue
return TrainInformation.from_list(json_dict)
| 5,338,435
|
def add_payloads(prev_layer, input_spikes):
"""Get payloads from previous layer."""
# Get only payloads of those pre-synaptic neurons that spiked
payloads = tf.where(tf.equal(input_spikes, 0.),
tf.zeros_like(input_spikes), prev_layer.payloads)
print("Using spikes with payloads from layer {}".format(prev_layer.name))
return input_spikes + payloads
| 5,338,436
|
def read_csv_to_data(path: str, delimiter: str = ",", headers: list = []):
"""A zero-dependancy helper method to read a csv file
Given the path to a csv file, read data row-wise. This data may be later converted to a dict of lists if needed (column-wise).
Args:
path (str): Path to csv file
delimiter (str, optional): Delimiter to split the rows by. Defaults to ','
headers: (list, optional): Given header list for a csv file. Defaults to an empty list, which results in the first row being used as a header.
Returns:
A list of dictionary values (list of rows) representing the file being read
"""
data = []
with open(path, "r") as f:
header = headers
if len(headers) == 0:
header = f.readline().split(",")
for line in f:
entry = {}
for i, value in enumerate(line.split(",")):
entry[header[i].strip()] = value.strip()
data.append(entry)
return data
| 5,338,437
|
def main_menu(update, context):
"""Handling the main menu
:param update: Update of the sent message
:param context: Context of the sent message
:return: Status for main menu
"""
keyboard = [['Eintragen'],
['Analyse']]
update.message.reply_text(
'Was möchtest du machen?',
reply_markup=ReplyKeyboardMarkup(keyboard)
)
return MAIN
| 5,338,438
|
def build_model(inputs, num_classes, is_training, hparams):
"""Constructs the vision model being trained/evaled.
Args:
inputs: input features/images being fed to the image model build built.
num_classes: number of output classes being predicted.
is_training: is the model training or not.
hparams: additional hyperparameters associated with the image model.
Returns:
The logits of the image model.
"""
scopes = setup_arg_scopes(is_training)
if len(scopes) != 1:
raise ValueError('Nested scopes depreciated in py3.')
with scopes[0]:
if hparams.model_name == 'pyramid_net':
logits = build_shake_drop_model(inputs, num_classes, is_training)
elif hparams.model_name == 'wrn':
logits = build_wrn_model(inputs, num_classes, hparams.wrn_size)
elif hparams.model_name == 'shake_shake':
logits = build_shake_shake_model(inputs, num_classes, hparams,
is_training)
elif hparams.model_name == 'resnet':
logits = build_resnet_model(inputs, num_classes, hparams,
is_training)
else:
raise ValueError("Unknown model name.")
return logits
| 5,338,439
|
def get_test_app_for_status_code_testing(schedule=False):
"""
:return: Flask Test Application with the right settings
"""
import flask_monitoringdashboard
app = Flask(__name__)
@app.route('/return-a-simple-string')
def return_a_simple_string():
return 'Hello, world'
@app.route('/return-a-tuple')
def return_a_tuple():
return 'Hello, world', 404
@app.route('/ridiculous-return-value')
def return_ridiculous_return_value():
return 'hello', 'ridiculous'
@app.route('/return-jsonify-default-status-code')
def return_jsonify_default_status_code():
return jsonify({
'apples': 'banana'
})
@app.route('/return-jsonify-with-custom-status-code')
def return_jsonify_with_custom_status_code():
response = jsonify({
'cheese': 'pears'
})
response.status_code = 401
return response
@app.route('/unhandled-exception')
def unhandled_exception():
potatoes = 1000
bananas = 0
return potatoes / bananas
app.config['SECRET_KEY'] = flask_monitoringdashboard.config.security_token
app.testing = True
flask_monitoringdashboard.user_app = app
app.config['WTF_CSRF_ENABLED'] = False
app.config['WTF_CSRF_METHODS'] = []
flask_monitoringdashboard.config.get_group_by = lambda: '12345'
flask_monitoringdashboard.bind(app=app, schedule=schedule)
TEST_CACHE = {'main': EndpointInfo()}
flask_monitoringdashboard.core.cache.memory_cache = TEST_CACHE
return app
| 5,338,440
|
def current_user():
"""Returns the value of the USER environment variable"""
return os.environ['USER']
| 5,338,441
|
def run_multiple_cases(x, y, z, door_height, door_width, t_amb,
HoC, time_ramp, hrr_ramp, num, door, wall,
simulation_time, dt_data):
"""
Generate multiple CFAST input files and calls other functions
"""
resulting_temps = np.array([])
for i in range(len(door_width)):
casename = gen_input(x, y, z, door_height[i], door_width[i],
t_amb[i], HoC, time_ramp, hrr_ramp, num, door,
wall, simulation_time, dt_data)
run_cfast(casename)
temps, outfile = read_cfast(casename)
outfile.close()
hgl = temps[:,1]
resulting_temps = np.append(hgl[-1], resulting_temps)
return(resulting_temps)
| 5,338,442
|
def get_all():
"""
Obtiene todas las tuplas de la relación Estudiantes
:returns: Todas las tuplas de la relación.
:rtype: list
"""
try:
conn = helpers.get_connection()
cur = conn.cursor()
cur.execute(ESTUDIANTE_QUERY_ALL)
result = cur.fetchall()
# Confirma los cambios y libera recursos
conn.commit()
cur.close()
conn.close()
return result
except Exception as e:
raise e
| 5,338,443
|
def _create_seqs_name_dict(in_file):
"""read fasta file and populate dict"""
global name_data
name = ""
with open(in_file) as in_handle:
for line in in_handle:
if not line.startswith(">"):
name_data.update({line.strip(): name})
else:
name = line.strip().replace(">", "")
| 5,338,444
|
def load_fgong(filename, fmt='ivers', return_comment=False,
return_object=True, G=None):
"""Given an FGONG file, returns NumPy arrays ``glob`` and ``var`` that
correspond to the scalar and point-wise variables, as specified
in the `FGONG format`_.
.. _FGONG format: https://www.astro.up.pt/corot/ntools/docs/CoRoT_ESTA_Files.pdf
Also returns the first four lines of the file as a `comment`, if
desired.
The version number ``ivers`` is used to infer the format of floats
if ``fmt='ivers'``.
If ``return_object`` is ``True``, instead returns an :py:class:`FGONG`
object. This is the default behaviour as of v0.0.12. The old
behaviour will be dropped completely from v0.1.0.
Parameters
----------
filename: str
Name of the FGONG file to read.
fmt: str, optional
Format string for floats in `glob` and `var`. If ``'ivers'``,
uses ``%16.9E`` if the file's ``ivers < 1000`` or ``%26.18E3` if
``ivers >= 1000``. If ``'auto'``, tries to guess the size of each
float. (default: 'ivers')
return_comment: bool, optional
If ``True``, return the first four lines of the FGONG file.
These are comments that are not used in any calculations.
Returns
-------
glob: NumPy array
The scalar (or global) variables for the stellar model
var: NumPy array
The point-wise variables for the stellar model. i.e. things
that vary through the star like temperature, density, etc.
comment: list of strs, optional
The first four lines of the FGONG file. These are comments
that are not used in any calculations. Only returned if
``return_comment=True``.
"""
with tomso_open(filename, 'rb') as f:
comment = [f.readline().decode('utf-8').strip() for i in range(4)]
nn, iconst, ivar, ivers = [int(i) for i in f.readline().decode('utf-8').split()]
# lines = f.readlines()
lines = [line.decode('utf-8').lower().replace('d', 'e')
for line in f.readlines()]
tmp = []
if fmt == 'ivers':
if ivers < 1000:
N = 16
else:
N = 27
# try to guess the length of each float in the data
elif fmt == 'auto':
N = len(lines[0])//5
else:
N = len(fmt % -1.111)
for line in lines:
for i in range(len(line)//N):
s = line[i*N:i*N+N]
# print(s)
if s[-9:] == '-Infinity':
s = '-Inf'
elif s[-9:] == ' Infinity':
s = 'Inf'
elif s.lower().endswith('nan'):
s = 'nan'
elif 'd' in s.lower():
s = s.lower().replace('d','e')
tmp.append(float(s))
glob = np.array(tmp[:iconst])
var = np.array(tmp[iconst:]).reshape((-1, ivar))
if return_object:
return FGONG(glob, var, ivers=ivers, G=G,
description=comment)
else:
warnings.warn("From tomso 0.1.0+, `fgong.load_fgong` will only "
"return an `FGONG` object: use `return_object=True` "
"to mimic future behaviour",
FutureWarning)
if return_comment:
return glob, var, comment
else:
return glob, var
| 5,338,445
|
def hla_saturation_flags(drizzled_image, flt_list, catalog_name, catalog_data, proc_type, param_dict, plate_scale,
column_titles, diagnostic_mode):
"""Identifies and flags saturated sources.
Parameters
----------
drizzled_image : string
drizzled filter product image filename
flt_list : list
list of calibrated images that were drizzle-combined to produce image specified by input parameter
'drizzled_image'
catalog_name : string
drizzled filter product catalog filename to process
catalog_data : astropy.Table object
drizzled filter product catalog data to process
proc_type : string
sourcelist generation type.
param_dict : dictionary
Dictionary of instrument/detector - specific drizzle, source finding and photometric parameters
plate_scale : float
plate scale, in arcseconds/pixel
column_titles : dictionary
Relevant column titles
diagnostic_mode : bool
write intermediate files?
Returns
-------
phot_table_rows : astropy.Table object
drizzled filter product catalog data with updated flag values
"""
image_split = drizzled_image.split('/')[-1]
channel = drizzled_image.split("_")[4].upper()
if channel == 'IR': # TODO: Test and IR case just to make sure that IR shouldn't be skipped
return catalog_data
# -------------------------------------------------------------------
# STEP THROUGH EACH APPLICABLE FLT IMAGE, DETERMINE THE COORDINATES
# FOR ALL SATURATION FLAGGED PIXELS, AND TRANSFORM THESE COORDINATES
# INTO THE DRIZZLED IMAGE REFERENCE FRAME.
# -------------------------------------------------------------------
num_flts_in_main_driz = len(flt_list)
flt_list.sort()
log.info(' ')
log.info("Current Working Directory: {}".format(os.getcwd()))
log.info(' ')
log.info('LIST OF FLTS IN {}: {}'.format(drizzled_image.split('/')[-1], flt_list))
log.info(' ')
log.info('NUMBER OF FLTS IN {}: {}'.format(drizzled_image.split('/')[-1], num_flts_in_main_driz))
log.info(' ')
# ----------------------------------------------------
# EXTRACT DQ DATA FROM FLT IMAGE AND CREATE A LIST
# OF "ALL" PIXEL COORDINATES WITH A FLAG VALUE OF 256
# ----------------------------------------------------
if ((channel.lower() != 'wfpc2') and (channel.lower() != 'pc')):
if channel.lower() in ['wfc', 'uvis']:
image_ext_list = ["[sci,1]", "[sci,2]"]
if channel.lower() in ['sbc', 'hrc']:
image_ext_list = ["[sci,1]"]
dq_sat_bit = 256
if channel.lower() == 'wfpc2':
image_ext_list = ["[sci,1]", "[sci,2]", "[sci,3]", "[sci,4]"]
dq_sat_bit = 8
if channel.lower() == 'pc':
image_ext_list = ["[sci,1]"]
dq_sat_bit = 8
# build list of arrays
drz_sat_xy_coords_list = []
for flt_cnt, flt_image in enumerate(flt_list):
for ext_cnt, image_ext in enumerate(image_ext_list):
ext_part = image_ext.split(',')[1].split(']')[0]
try:
if ((channel.lower() != 'wfpc2') and (channel.lower() != 'pc')):
flt_data = fits.getdata(flt_image, 'DQ', int(ext_part))
if ((channel.lower() == 'wfpc2') or (channel.lower() == 'pc')):
flt_data = fits.getdata(flt_image.replace("_c0m", "_c1m"), 'SCI', int(ext_part))
except KeyError:
log.info(' ')
log.info('WARNING: There is only one set of file extensions in {}'.format(flt_image))
log.info(' ')
continue
# TODO: Should we also look for pixels flagged with DQ value 2048 (A to D saturation) for ACS data?
# ----------------------------------------------------
# DETERMINE IF ANY OF THE PIXELS LOCATED IN THE GRID
# HAVE A BIT VALUE OF 256, I.E. FULL WELL SATURATION.
# ----------------------------------------------------
# NOTE: NUMPY ARRAYS REPORT Y COORD VALUES FIRST AND
# X COORD VALUES SECOND AS FOLLOWS:
#
# --> numpy.shape(flt_data)
# (2051, 4096)
#
# WHERE 2051 IS THE NUMBER OF PIXELS IN THE Y
# DIRECTION, AND 4096 IS THE NUMBER OF PIXELS
# IN THE X DIRECTION.
# ----------------------------------------------------
bit_flt_data = dq_sat_bit & flt_data
complete_sat_coords = numpy.where(bit_flt_data == dq_sat_bit)
if len(complete_sat_coords[0]) == 0:
continue
# -------------------------------------------------
# RESTRUCTURE THE LIST OF X AND Y COORDINATES FROM
# THE FLT FILE THAT HAVE BEEN FLAGGED AS SATURATED
# -------------------------------------------------
nsat = len(complete_sat_coords[0])
x_y_array = numpy.empty((nsat, 2), dtype=int)
x_y_array[:, 0] = complete_sat_coords[1]
x_y_array[:, 1] = complete_sat_coords[0]
# ---------------------------------------------------
# WRITE FLT COORDS TO A FILE FOR DIAGNOSTIC PURPOSES
# ---------------------------------------------------
if diagnostic_mode:
flt_xy_coord_out = flt_image.split('/')[-1].split('.')[0] + '_sci' + str(ext_cnt + 1) + '.txt'
outfile = open(flt_xy_coord_out, 'w')
for flt_xy_coord in x_y_array:
x = flt_xy_coord[0]
y = flt_xy_coord[1]
outfile.write(str(x) + ' ' + str(y) + '\n')
outfile.close()
# ----------------------------------------------------
# CONVERT SATURATION FLAGGED X AND Y COORDINATES FROM
# THE FLT IMAGE INTO RA AND DEC
# ----------------------------------------------------
flt_ra_dec_coords = xytord(x_y_array, flt_image, image_ext)
# -------------------------------------------------
# CONVERT RA & DEC VALUES FROM FLT REFERENCE FRAME
# TO THAT OF THE DRIZZLED IMAGE REFERENCE FRAME
# -------------------------------------------------
drz_sat_xy_coords_list.append(rdtoxy(flt_ra_dec_coords, drizzled_image, "[sci,1]"))
log.info(' ')
log.info('FLT IMAGE = {}'.format(flt_image.split('/')[-1]))
log.info('IMAGE EXT = {}'.format(image_ext))
log.info(' ')
# ----------------------------------------------------------------
# IF NO SATURATION FLAGS EXIST IN ANY OF THE FLT FILES, THEN SKIP
# ----------------------------------------------------------------
if len(drz_sat_xy_coords_list) == 0:
log.info(' ')
log.info('*******************************************************************************************')
log.info('NO SATURATION FLAGGED PIXELS EXIST IN ANY OF THE FLT FILES FOR:')
log.info(' --> {}'.format(drizzled_image.split('/')[-1]))
log.info('*******************************************************************************************')
log.info(' ')
return catalog_data
# ------------------------------
# now concatenate all the arrays
# ------------------------------
full_sat_list = numpy.concatenate(drz_sat_xy_coords_list)
# --------------------------------------------
# WRITE RA & DEC FLT CONVERTED X & Y DRIZZLED
# IMAGE COORDINATES TO A TEXT FILE
# --------------------------------------------
if diagnostic_mode:
drz_coord_file = drizzled_image.split('/')[-1].split('.')[0] + '_ALL_FLT_SAT_FLAG_PIX.txt'
drz_coord_out = open(drz_coord_file, 'w')
for coord in full_sat_list:
drz_coord_out.write(str(coord[0]) + ' ' + str(coord[1]) + '\n')
drz_coord_out.close()
# ----------------------------------------------------
# GET SOURCELIST X AND Y VALUES
# ----------------------------------------------------
all_detections = catalog_data
nrows = len(all_detections)
full_coord_list = numpy.empty((nrows, 2), dtype=float)
for row_count, detection in enumerate(all_detections):
full_coord_list[row_count, 0] = float(detection[column_titles["x_coltitle"]])
full_coord_list[row_count, 1] = float(detection[column_titles["y_coltitle"]])
"""
# This option to determine saturation from the drizzled image alone should complement
# the computation based on the DQ array, since the IR (and MAMA?) detectors will not
# have saturated sources that 'bleed' or 'top out'...
#
# Extract Ap2 radius from parameter dict
#
ap2 = param_dict['catalog generation']['aperture_2']
#
# Convert source positions into slices
#
apers = CircularAperture(full_coord_list, ap2)
#
# Determine whether any source (slice) has more than 3 pixels
# within 10% of the max value in the source slice.
# If True, flag as saturated.
#
drz_img = fits.getdata(drizzled_image, ext=1)
img_sat = numpy.zeros(len(full_coord_list), dtype=bool)
for n,aper in enumerate(apers):
if (drz_img[aper.bbox.slices] > drz_img[aper.bbox.slices].max() * 0.9).sum() > 3:
img_sat[n] = True
del drz_img
"""
# ----------------------------------------------------
# CREATE SUB-GROUPS OF SATURATION-FLAGGED COORDINATES
# ----------------------------------------------------
proc_time1 = time.ctime()
log.info(' ')
log.info('PROC_TIME_1: {}'.format(proc_time1))
log.info(' ')
# ----------------------------------
# Convert aperture radius to pixels
# ----------------------------------
ap2 = param_dict['catalog generation']['aperture_2']
radius = round((ap2/plate_scale) + 0.5) * 2.
log.info(' ')
log.info('THE RADIAL DISTANCE BEING USED IS {} PIXELS'.format(str(radius)))
log.info(' ')
# do the cross-match using xymatch
log.info('Matching {} saturated pixels with {} catalog sources'.format(len(full_sat_list), len(full_coord_list)))
psat, pfull = xymatch(full_sat_list, full_coord_list, radius, multiple=True, verbose=False)
log.info('Found cross-matches (including duplicates)'.format(len(psat)))
saturation_flag = numpy.zeros(len(full_coord_list), dtype=bool)
saturation_flag[pfull] = True
proc_time2 = time.ctime()
log.info(' ')
log.info('PROC_TIME_2: {}'.format(proc_time2))
log.info(' ')
# ------------------------------------------------------------------
# REMOVE DUPLICATE DETECTIONS FROM THE LIST, "group", CREATTED FROM
# MATCHING SATURATION FLAGGED FLT PIXELS TO FINAL SOURCE DETECTIONS
# ------------------------------------------------------------------
nsaturated = saturation_flag.sum()
if nsaturated == 0:
log.info(' ')
log.info('**************************************************************************************')
log.info('NOTE: NO SATURATED SOURCES WERE FOUND FOR: {}'.format(image_split))
log.info('**************************************************************************************')
log.info(' ')
return catalog_data
else:
log.info(' ')
log.info('FLAGGED {} SOURCES'.format(nsaturated))
log.info(' ')
if diagnostic_mode:
sat_coord_file = drizzled_image.split('/')[-1].split('.')[0] + '_INTERMEDIATE.txt'
sat_coord_out = open(sat_coord_file, 'w')
for sat_coord in full_coord_list[saturation_flag, :]:
sat_coord_out.write(str(sat_coord[0]) + ' ' + str(sat_coord[1]) + '\n')
sat_coord_out.close()
# --------------------------------------------------------------------------
# WRITE SAT FLAGS TO OUTPUT PHOT TABLE BASED ON flag_src_central_pixel_list
# --------------------------------------------------------------------------
phot_table = catalog_name
phot_table_root = phot_table.split('.')[0]
phot_table_rows = catalog_data
for i, table_row in enumerate(phot_table_rows):
if saturation_flag[i]:
table_row["Flags"] = int(table_row["Flags"]) | 4
phot_table_rows = flag4and8_hunter_killer(phot_table_rows, column_titles)
if diagnostic_mode:
phot_table_temp = phot_table_root + '_SATFILT.txt'
phot_table_rows.write(phot_table_temp, delimiter=",", format='ascii')
return phot_table_rows
| 5,338,446
|
def dskb02(handle, dladsc):
"""
Return bookkeeping data from a DSK type 2 segment.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskb02_c.html
:param handle: DSK file handle
:type handle: int
:param dladsc: DLA descriptor
:type dladsc: spiceypy.utils.support_types.SpiceDLADescr
:return: bookkeeping data from a DSK type 2 segment
:rtype: tuple
"""
handle = ctypes.c_int(handle)
nv = ctypes.c_int(0)
np = ctypes.c_int(0)
nvxtot = ctypes.c_int(0)
vtxbds = stypes.emptyDoubleMatrix(3, 2)
voxsiz = ctypes.c_double(0.0)
voxori = stypes.emptyDoubleVector(3)
vgrext = stypes.emptyIntVector(3)
cgscal = ctypes.c_int(0)
vtxnpl = ctypes.c_int(0)
voxnpt = ctypes.c_int(0)
voxnpl = ctypes.c_int(0)
libspice.dskb02_c(handle, dladsc, ctypes.byref(nv), ctypes.byref(np), ctypes.byref(nvxtot), vtxbds, ctypes.byref(voxsiz), voxori, vgrext, ctypes.byref(cgscal), ctypes.byref(vtxnpl), ctypes.byref(voxnpt), ctypes.byref(voxnpl))
return nv.value, np.value, nvxtot.value, stypes.cMatrixToNumpy(vtxbds), voxsiz.value, stypes.cVectorToPython(voxori), stypes.cVectorToPython(vgrext), cgscal.value, vtxnpl.value, voxnpt.value, voxnpl.value
| 5,338,447
|
def mt_sec(package, db):
"""
Multithreaded function for security check of packages
:param package: package name
:param db: vuln db
:return:
"""
all_rep = {}
all_rep[package] = {}
error_message = None
try:
_, status, rep = control_vulnerability(package, db)
if status:
all_rep = {**all_rep, **rep}
package_name = package.split('==')[0]
versions = get_available_package_versions(package_name)
secure_packages = []
for v in versions:
_, status, rep = control_vulnerability("==".join([package_name, v]), db)
if not status:
secure_packages.append(_)
# else:
# all_rep = {**all_rep, **rep}
if not secure_packages:
error_message = f"!!! IMPORTANT !!! No alternative secure package versions found for package {package}."
print_critical(error_message)
return package, all_rep, error_message
else:
for pkg in secure_packages:
if version.parse(pkg.split("==")[1]) > version.parse(package.split('==')[1]):
error_message = f"Package: {package} is vulnerable replacing with package: {pkg}. Available " \
f"secure versions are : {secure_packages} "
print_warning("WARNING : " + error_message)
return pkg, all_rep, error_message
error_message = f'Package: {package} is vulnerable replacing with latest secure package: ' \
f'{secure_packages[-1]}. Available secure versions are : {secure_packages} '
print_warning(error_message)
return secure_packages[-1], all_rep, error_message
else:
return _, all_rep, error_message
except Exception as e:
error_message = str(e)
return package, all_rep, error_message
| 5,338,448
|
def train_node2vec(graph, dim, p, q):
"""Obtains node embeddings using Node2vec."""
emb = n2v.Node2Vec(
graph=graph,
dimensions=dim,
workers=mp.cpu_count(),
p=p,
q=q,
quiet=True,
).fit()
emb = {
node_id: emb.wv[str(node_id)]
for node_id in sorted(graph.nodes())
}
return emb
| 5,338,449
|
def approx_match_dictionary():
"""Maps abbreviations to the part of the expanded form that is common beween all forms of the word"""
k=["%","bls","gr","hv","hæstv","kl","klst","km","kr","málsl",\
"málsgr","mgr","millj","nr","tölul","umr","þm","þskj","þús"]
v=['prósent','blaðsíð',\
'grein','háttvirt',\
'hæstvirt','klukkan',\
'klukkustund','kílómetr',\
'krón','málslið',\
'málsgrein','málsgrein',\
'milljón','númer','tölulið',\
'umræð','þingm',\
'þingskj','þúsund']
d={}
for i in range(len(k)):
d[k[i]] = v[i]
return d
| 5,338,450
|
def matrix2array(M):
"""
1xN matrix to array.
In other words:
[[1,2,3]] => [1,2,3]
"""
if isspmatrix(M):
M = M.todense()
return np.squeeze(np.asarray(M))
| 5,338,451
|
def test_ap_wps_ie_fragmentation(dev, apdev):
"""WPS AP using fragmented WPS IE"""
ssid = "test-wps-ie-fragmentation"
params = { "ssid": ssid, "eap_server": "1", "wps_state": "2",
"wpa_passphrase": "12345678", "wpa": "2",
"wpa_key_mgmt": "WPA-PSK", "rsn_pairwise": "CCMP",
"device_name": "1234567890abcdef1234567890abcdef",
"manufacturer": "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
"model_name": "1234567890abcdef1234567890abcdef",
"model_number": "1234567890abcdef1234567890abcdef",
"serial_number": "1234567890abcdef1234567890abcdef" }
hapd = hostapd.add_ap(apdev[0], params)
hapd.request("WPS_PBC")
dev[0].scan_for_bss(apdev[0]['bssid'], freq="2412")
dev[0].request("WPS_PBC " + apdev[0]['bssid'])
dev[0].wait_connected(timeout=30)
bss = dev[0].get_bss(apdev[0]['bssid'])
if "wps_device_name" not in bss or bss['wps_device_name'] != "1234567890abcdef1234567890abcdef":
logger.info("Device Name not received correctly")
logger.info(bss)
# This can fail if Probe Response frame is missed and Beacon frame was
# used to fill in the BSS entry. This can happen, e.g., during heavy
# load every now and then and is not really an error, so try to
# workaround by runnign another scan.
dev[0].scan(freq="2412", only_new=True)
bss = dev[0].get_bss(apdev[0]['bssid'])
if not bss or "wps_device_name" not in bss or bss['wps_device_name'] != "1234567890abcdef1234567890abcdef":
logger.info(bss)
raise Exception("Device Name not received correctly")
if len(re.findall("dd..0050f204", bss['ie'])) != 2:
raise Exception("Unexpected number of WPS IEs")
| 5,338,452
|
def main(unused_argv):
"""Convert to Examples and write the result to TFRecords."""
convert_to(FLAGS.name, FLAGS.rt60, FLAGS.inputs, FLAGS.labels,
FLAGS.output_dir, FLAGS.apply_cmvn, FLAGS.test)
| 5,338,453
|
async def emote(ctx: slash.Context, choice: emote_opt):
"""Send a premade message."""
# By default, this sends a message and shows
# the command invocation in a reply-like UI
await ctx.respond(choice, allowed_mentions=discord.AllowedMentions.none())
| 5,338,454
|
def expected(data):
"""Computes the expected agreement, Pr(e), between annotators."""
total = float(np.sum(data))
annotators = range(len(data.shape))
percentages = ((data.sum(axis=i) / total) for i in annotators)
percent_expected = np.dot(*percentages)
return percent_expected
| 5,338,455
|
def maximization_step(num_words, stanzas, schemes, probs):
"""
Update latent variables t_table, rprobs
"""
t_table = numpy.zeros((num_words, num_words + 1))
rprobs = numpy.ones(schemes.num_schemes)
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
myprob = probs[i, scheme_index]
rprobs[scheme_index] += myprob
scheme = schemes.scheme_list[scheme_index]
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for j, word_index in enumerate(rhymelist):
t_table[word_index, -1] += myprob
for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:
t_table[word_index, word_index2] += myprob
# Normalize t_table
t_table_sums = numpy.sum(t_table, axis=0)
for i, t_table_sum in enumerate(t_table_sums.tolist()):
if t_table_sum != 0:
t_table[:, i] /= t_table_sum
# Normalize rprobs
totrprob = numpy.sum(rprobs)
rprobs /= totrprob
return t_table, rprobs
| 5,338,456
|
def account_export_mydata_content(account_id=None):
"""
Export ServiceLinks
:param account_id:
:return: List of dicts
"""
if account_id is None:
raise AttributeError("Provide account_id as parameter")
# Get table names
logger.info("ServiceLinkRecord")
db_entry_object = ServiceLinkRecord()
slr_table_name = db_entry_object.table_name
logger.info("ServiceLinkRecord table name: " + str(slr_table_name))
logger.info("ConsentRecord")
db_entry_object = ConsentRecord()
cr_table_name = db_entry_object.table_name
logger.info("ConsentRecord table name: " + str(cr_table_name))
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise
logger.info("Get SLR IDs")
db_entry_list = []
cursor, slr_id_list = get_slr_ids(cursor=cursor, account_id=account_id, table_name=slr_table_name)
for slr_id in slr_id_list:
logger.info("Getting SLR with slr_id: " + str(slr_id))
slr_dict = account_get_slr(account_id=account_id, slr_id=slr_id)
#
logger.info("Getting status records for SLR")
slsr_dict = account_get_slsrs(account_id=account_id, slr_id=slr_id)
logger.info("Appending status record to SLR")
slr_dict['status_records'] = slsr_dict
#
logger.info("Get CR IDs")
cr_dict_list = []
cursor, cr_id_list = get_cr_ids(slr_id=slr_id, table_name=cr_table_name, cursor=cursor)
for cr_id in cr_id_list:
logger.info("Getting CR with cr_id: " + str(cr_id))
cr_dict = account_get_cr(cr_id=cr_id, account_id=account_id)
logger.info("Getting status records for CR")
csr_dict = account_get_csrs(account_id=account_id, consent_id=cr_id)
logger.info("Appending status record to CR")
cr_dict['status_records'] = csr_dict
logger.info("Appending CR to CR list")
cr_dict_list.append(cr_dict)
#
slr_dict['consent_records'] = cr_dict_list
#
logger.info("Appending SLR to main list")
db_entry_list.append(slr_dict)
logger.info("SLR added to main list: " + json.dumps(slr_dict))
return db_entry_list
| 5,338,457
|
def indicator_entity(indicator_types: List[str] = None) -> type:
"""Return custom model for Indicator Entity."""
class CustomIndicatorEntity(IndicatorEntity):
"""Indicator Entity Field (Model) Type"""
@validator('type', allow_reuse=True)
def is_empty(cls, value: str, field: 'ModelField') -> str:
"""Validate that the value is a non-empty string."""
if isinstance(value, str) and value.replace(' ', '') == '':
raise InvalidEmptyValue(field_name=field.name)
return value
@validator('type', allow_reuse=True)
def is_type(cls, value: str, field: 'ModelField') -> str:
"""Validate that the entity is of a specific Indicator type."""
if value.lower() not in [i.lower() for i in indicator_types]:
raise InvalidEntityType(
field_name=field.name, entity_type=str(indicator_types), value=value
)
return value
return CustomIndicatorEntity
| 5,338,458
|
def z_to_t(z_values, dof):
"""
Convert z-statistics to t-statistics.
An inversion of the t_to_z implementation of [1]_ from Vanessa Sochat's
TtoZ package [2]_.
Parameters
----------
z_values : array_like
Z-statistics
dof : int
Degrees of freedom
Returns
-------
t_values : array_like
T-statistics
References
----------
.. [1] Hughett, P. (2007). Accurate Computation of the F-to-z and t-to-z
Transforms for Large Arguments. Journal of Statistical Software,
23(1), 1-5.
.. [2] Sochat, V. (2015, October 21). TtoZ Original Release. Zenodo.
http://doi.org/10.5281/zenodo.32508
"""
# Select just the nonzero voxels
nonzero = z_values[z_values != 0]
# We will store our results here
t_values_nonzero = np.zeros(len(nonzero))
# Select values less than or == 0, and greater than zero
c = np.zeros(len(nonzero))
k1 = nonzero <= c
k2 = nonzero > c
# Subset the data into two sets
z1 = nonzero[k1]
z2 = nonzero[k2]
# Calculate p values for <=0
p_values_z1 = stats.norm.cdf(z1)
t_values_z1 = stats.t.ppf(p_values_z1, df=dof)
# Calculate p values for > 0
p_values_z2 = stats.norm.cdf(-z2)
t_values_z2 = -stats.t.ppf(p_values_z2, df=dof)
t_values_nonzero[k1] = t_values_z1
t_values_nonzero[k2] = t_values_z2
t_values = np.zeros(z_values.shape)
t_values[z_values != 0] = t_values_nonzero
return t_values
| 5,338,459
|
def getInputShape(model):
"""
Gets the shape when there is a single input.
Return:
Numeric dimensions, omits dimensions that have no value. eg batch
size.
"""
s = []
for dim in model.input.shape:
if dim.value:
s.append(dim.value)
return tuple(s)
| 5,338,460
|
def get_latest_file(file_paths, only_return_one_match=True):
"""
Returns the latest created file from a list of file paths
:param file_paths: list(str)
:param only_return_one_match: bool
:return: list(str) or str
"""
last_time = 0
times = dict()
for file_path in file_paths:
mtime = os.stat(file_path).st_mtime
if mtime not in times:
times[mtime] = list()
times[mtime].append(file_path)
if mtime > last_time:
last_time = mtime
if not times:
return
if only_return_one_match:
return times[mtime][0]
else:
return times[mtime]
| 5,338,461
|
def get_node_depths(tree):
"""
Get the node depths of the decision tree
>>> d = DecisionTreeClassifier()
>>> d.fit([[1,2,3],[4,5,6],[7,8,9]], [1,2,3])
>>> get_node_depths(d.tree_)
array([0, 1, 1, 2, 2])
"""
def get_node_depths_(current_node, current_depth, l, r, depths):
depths += [current_depth]
if l[current_node] != -1 and r[current_node] != -1:
get_node_depths_(l[current_node], current_depth + 1, l, r, depths)
get_node_depths_(r[current_node], current_depth + 1, l, r, depths)
depths = []
get_node_depths_(0, 0, tree.children_left, tree.children_right, depths)
return np.array(depths)
| 5,338,462
|
def ihfft(a: numpy.ndarray, n: None, axis: int):
"""
usage.dask: 3
"""
...
| 5,338,463
|
def add_metrics(engine, met):
""" add provided metrics to database """
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
session.add(met)
session.commit()
session.expunge_all()
session.close()
| 5,338,464
|
def homo_tuple_typed_attrs(draw, defaults=None, legacy_types_only=False, kw_only=None):
"""
Generate a tuple of an attribute and a strategy that yields homogenous
tuples for that attribute. The tuples contain strings.
"""
default = attr.NOTHING
val_strat = tuples(text(), text(), text())
if defaults is True or (defaults is None and draw(booleans())):
default = draw(val_strat)
return (
attr.ib(
type=draw(
sampled_from(
[tuple[str, ...], tuple, Tuple, Tuple[str, ...]]
if not legacy_types_only
else [tuple, Tuple, Tuple[str, ...]]
)
),
default=default,
kw_only=draw(booleans()) if kw_only is None else kw_only,
),
val_strat,
)
| 5,338,465
|
def RunSuite(config, files, extra_flags, errors):
"""Run a collection of benchmarks."""
global ERRORS, CONCURRENCY
Banner('running %d tests' % (len(files)))
pool = multiprocessing.Pool(processes=CONCURRENCY)
# create a list of run arguments to map over
argslist = [(num, len(files), config, test, extra_flags)
for num, test in enumerate(files)]
# let the process pool handle the test assignments, order doesn't matter
pool.map(RunTest, argslist)
while not ERRORS.empty():
phase, test = ERRORS.get()
errors[phase].append(test)
| 5,338,466
|
def test_get_project_info(client):
"""Test get info on the project"""
response = client.get("/api/projects/project-id/info")
json_data = response.get_json()
assert json_data["authors"] == "asreview team"
assert json_data["dataset_path"] == "Hall_2012.csv"
| 5,338,467
|
def flatland_env_factory(
evaluation: bool = False,
env_config: Dict[str, Any] = {},
preprocessor: Callable[
[Any], Union[np.ndarray, Tuple[np.ndarray], Dict[str, np.ndarray]]
] = None,
include_agent_info: bool = False,
random_seed: Optional[int] = None,
) -> FlatlandEnvWrapper:
"""Loads a flatand environment and wraps it using the flatland wrapper"""
del evaluation # since it has same behaviour for both train and eval
env = create_rail_env_with_tree_obs(**env_config)
wrapped_env = FlatlandEnvWrapper(env, preprocessor, include_agent_info)
if random_seed and hasattr(wrapped_env, "seed"):
wrapped_env.seed(random_seed)
return wrapped_env
| 5,338,468
|
def notify_about_update(user, event_type="UPDATED"):
"""Notify all organisation about changes of the user."""
for org in user.organisations.where(
Organisation.webhook_enabled | Organisation.email_notifications_enabled
):
if org.webhook_enabled and org.webhook_url:
invoke_webhook_handler.queue(
org.id,
user.orcid,
user.created_at or user.updated_at,
user.updated_at or user.created_at,
event_type=event_type,
)
if org.email_notifications_enabled:
url = app.config["ORCID_BASE_URL"] + user.orcid
send_email(
f"""<p>User {user.name} (<a href="{url}" target="_blank">{user.orcid}</a>)
{"profile was updated" if event_type == "UPDATED" else "has linked their account"} at
{(user.updated_at or user.created_at).isoformat(timespec="minutes", sep=' ')}.</p>""",
recipient=org.notification_email
or (org.tech_contact.name, org.tech_contact.email),
cc_email=(org.tech_contact.name, org.tech_contact.email)
if org.notification_email
else None,
subject=f"ORCID Profile Update ({user.orcid})",
org=org,
)
| 5,338,469
|
def industry(code, market="cn"):
"""获取某个行业的股票列表。目前支持的行业列表具体可以查询以下网址:
https://www.ricequant.com/api/research/chn#research-API-industry
:param code: 行业代码,如 A01, 或者 industry_code.A01
:param market: 地区代码, 如'cn' (Default value = "cn")
:returns: 行业全部股票列表
"""
if not isinstance(code, six.string_types):
code = code.code
else:
code = to_industry_code(code)
return [
v.order_book_id
for v in _all_instruments_list(market)
if v.type == "CS" and v.industry_code == code
]
| 5,338,470
|
def kewley_agn_oi(log_oi_ha):
"""Seyfert/LINER classification line for log([OI]/Ha)."""
return 1.18 * log_oi_ha + 1.30
| 5,338,471
|
def XGMMLReader(graph_file):
"""
Arguments:
- `file`:
"""
parser = XGMMLParserHelper()
parser.parseFile(graph_file)
return parser.graph()
| 5,338,472
|
def test_dispatch_request(client, method):
"""Dispatch request to the `Injector` subclass attributes."""
response = getattr(client, method)("/test_dispatch_request/1/test/")
assert response.status_code == 200
assert response.content == b"<h1>OK</h1>"
| 5,338,473
|
def pt_encode(index):
"""pt: Toggle light."""
return MessageEncode(f"09pt{index_to_housecode(index)}00", None)
| 5,338,474
|
def normalize_inputspace(
x,
vmax=1,
vmin=0,
mean=PYTORCH_IMAGENET_MEAN,
std=PYTORCH_IMAGENET_STD,
each=True,
img_format="CHW",
):
"""
Args:
x: numpy.ndarray
format is CHW or BCHW
each: bool
if x has dimension B
then apply each input x.
Returns:
normalized x: numpy.ndarray
"""
if len(x.shape) == 3:
return normalize3d_inputspace(x, vmax, vmin, mean, std, img_format=img_format)
elif len(x.shape) == 4:
if each:
return np.array(
[
normalize_inputspace(
_x, vmax, vmin, mean, std, img_format=img_format
)
for _x in x
]
)
else:
# TODO:
raise ValueError(each)
| 5,338,475
|
def test_authentiaction_the_token_in_the_header_contains_spaces(jwt_token):
"""
...
"""
client = APIClient()
client.credentials(HTTP_AUTHORIZATION="Bearer " + jwt_token["VALID"] + " 12")
response = client.post("/api/current_user_jwt/")
assert (
response.data["detail"]
== "Invalid token header. Token string should not contain spaces."
)
assert response.status_code == 401
| 5,338,476
|
def get_load_average() -> Tuple[float, float, float]:
"""Get load average"""
return os.getloadavg()
| 5,338,477
|
def test_local_orchestrator(fileutils):
"""Test launching orchestrator locally"""
global first_dir
exp_name = "test-orc-launch-local"
exp = Experiment(exp_name, launcher="local")
test_dir = fileutils.make_test_dir(exp_name)
first_dir = test_dir
orc = Orchestrator(port=6780)
orc.set_path(test_dir)
exp.start(orc)
statuses = exp.get_status(orc)
assert [stat != status.STATUS_FAILED for stat in statuses]
# simulate user shutting down main thread
exp._control._jobs.actively_monitoring = False
exp._control._launcher.task_manager.actively_monitoring = False
| 5,338,478
|
def test_insert_table(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('1t')
assert len(graph.get_tables()) == 1
| 5,338,479
|
def start_file(filename):
"""
Generalized os.startfile for all platforms supported by Qt
This function is simply wrapping QDesktopServices.openUrl
Returns True if successfull, otherwise returns False.
"""
from qtpy.QtCore import QUrl
from qtpy.QtGui import QDesktopServices
# We need to use setUrl instead of setPath because this is the only
# cross-platform way to open external files. setPath fails completely on
# Mac and doesn't open non-ascii files on Linux.
# Fixes spyder-ide/spyder#740.
url = QUrl()
url.setUrl(filename)
return QDesktopServices.openUrl(url)
| 5,338,480
|
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`authorize_project_context` and
:py:func:`authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
raise exception.NotAuthorized()
return f(*args, **kwargs)
return wrapper
| 5,338,481
|
def local_launcher(commands):
"""Launch all of the scripts in commands on the local machine serially. If GPU is available it is gonna use it.
Taken from : https://github.com/facebookresearch/DomainBed/
Args:
commands (List): List of list of string that consists of a python script call
"""
for cmd in commands:
subprocess.call(cmd, shell=True)
| 5,338,482
|
def failsafe_hull(coords):
"""
Wrapper of ConvexHull which returns None if hull cannot be computed for given points (e.g. all colinear or too few)
"""
coords = np.array(coords)
if coords.shape[0] > 3:
try:
return ConvexHull(coords)
except QhullError as e:
if 'hull precision error' not in str(e) and 'input is less than 3-dimensional' not in str(e):
raise e
return None
| 5,338,483
|
def CheckAttribs(a, b, attrs, assertEquals):
"""Checks that the objects a and b have the same values for the attributes
given in attrs. These checks are done using the given assert function.
Args:
a: The first object.
b: The second object.
attrs: The list of attribute names (strings).
assertEquals: The assertEquals method from unittest.TestCase.
"""
# For Stop objects (and maybe others in the future) Validate converts some
# attributes from string to native type
a.Validate()
b.Validate()
for k in attrs:
assertEquals(getattr(a, k), getattr(b, k))
| 5,338,484
|
def list_closed_poll_sessions(request_ctx, **request_kwargs):
"""
Lists all closed poll sessions available to the current user.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:return: List closed poll sessions
:rtype: requests.Response (with void data)
"""
path = '/v1/poll_sessions/closed'
url = request_ctx.base_api_url + path.format()
response = client.get(request_ctx, url, **request_kwargs)
return response
| 5,338,485
|
def _wrap_apdu(command: bytes) -> List[bytes]:
"""Return a list of packet to be sent to the device"""
packets = []
header = struct.pack(">H", len(command))
command = header + command
chunks = [command[i : i + _PacketData.FREE] for i in range(0, len(command), _PacketData.FREE)]
# Create a packet for each command chunk
for packet_id in range(len(chunks)):
header = struct.pack(">HBH", _CHANNEL_ID, _CmdTag.APDU, packet_id)
packet = header + chunks[packet_id]
packet.ljust(_PacketData.SIZE, bytes([0x0]))
packets.append(packet)
return packets
| 5,338,486
|
def build_dtree(bins):
"""
Build the directory tree out of what's under `user/`. The `dtree` is a
dict of:
string name -> 2-list [inumber, element]
, where element could be:
- Raw bytes for regular file
- A `dict` for directory, which recurses on
"""
def next_inumber():
"""
Allocate the next available inumber.
"""
global curr_inumber
inumber = curr_inumber
curr_inumber += 1
return inumber
for b in bins:
bpath = Path(b)
if not bpath.is_file():
print("Error: user binary '{}' is not a regular file".format(b))
exit(1)
parts = PurePath(b).parts
parents = parts[1:-1]
binary = parts[-1]
if parts[0] != "user":
print("Error: user binray '{}' is not under 'user/'".format(b))
exit(1)
if not binary.endswith(".bin"):
print("Error: user binray '{}' does not end with '.bin'".format(b))
exit(1)
binary = binary[:-4]
curr_dir = dtree
for d in parents:
if d not in curr_dir:
curr_dir[d] = [next_inumber(), dict()]
curr_dir = curr_dir[d][1]
with bpath.open(mode='br') as bfile:
curr_dir[binary] = [next_inumber(), bytearray(bfile.read())]
| 5,338,487
|
def noisify_image(image_to_noisify, secret, indices_used, original_distribution, printf=None, process_count=None):
"""Encode all pixels not containing secret with noise with its distribution matching source image.
Notes:
This method modifies passed image object - encoding is done in-place.
Args:
image_to_noisify: PIL.Image object to noisify,.
secret: secret as str or list of bytes/char values.
indices_used: set containing indexes of pixels encoded with secret
original_distribution: distribution of zeroes and ones in luma LSB in source image.
printf: optional ConsolePrinter object to use for printing encoding status.
process_count: optional number of worker processes to use.
"""
secret_dist = calculate_bit_distribution(secret)
leftover_dist = (original_distribution[0] - secret_dist[0], original_distribution[1] - secret_dist[1])
pixel_count = image_to_noisify.size[0] * image_to_noisify.size[1]
zero_chance = float(leftover_dist[0])/float(leftover_dist[0]+leftover_dist[1])
noise_bits = (0 if random.random() < zero_chance else 1 for _ in xrange(pixel_count-len(indices_used)))
indices_to_noise = (n for n in xrange(pixel_count) if n not in indices_used)
if process_count == 0:
encode_bits(image_to_noisify, indices_to_noise, noise_bits, printf=printf,
secret_bit_len=pixel_count-len(indices_used), skip_assert=True)
else:
encode_bits_mp(image_to_noisify, indices_to_noise, noise_bits, printf=printf,
secret_bit_len=pixel_count - len(indices_used), process_count=process_count, skip_assert=True)
| 5,338,488
|
def dataframe_to_rows(df, index=True, header=True):
"""
Convert a Pandas dataframe into something suitable for passing into a worksheet.
If index is True then the index will be included, starting one row below the header.
If header is True then column headers will be included starting one column to the right.
Formatting should be done by client code.
"""
import numpy
from pandas import Timestamp
blocks = df._data.blocks
ncols = sum(b.shape[0] for b in blocks)
data = [None] * ncols
for b in blocks:
values = b.values
if b.dtype.type == numpy.datetime64:
values = numpy.array([Timestamp(v) for v in values.ravel()])
values = values.reshape(b.shape)
result = values.tolist()
for col_loc, col in zip(b.mgr_locs, result):
data[col_loc] = col
if header:
if df.columns.nlevels > 1:
rows = expand_index(df.columns, header)
else:
rows = [list(df.columns.values)]
for row in rows:
n = []
for v in row:
if isinstance(v, numpy.datetime64):
v = Timestamp(v)
n.append(v)
row = n
if index:
row = [None]*df.index.nlevels + row
yield row
if index:
yield df.index.names
expanded = ([v] for v in df.index)
if df.index.nlevels > 1:
expanded = expand_index(df.index)
for idx, v in enumerate(expanded):
row = [data[j][idx] for j in range(ncols)]
if index:
row = v + row
yield row
| 5,338,489
|
def _compose_image(digit, background):
"""Difference-blend a digit and a random patch from a background image."""
w, h, _ = background.shape
dw, dh, _ = digit.shape
x = np.random.randint(0, w - dw)
y = np.random.randint(0, h - dh)
bg = background[x:x+dw, y:y+dh]
return np.abs(bg - digit).astype(np.uint8)
| 5,338,490
|
def permissions_vsr(func):
"""
:param func:
:return:
"""
def func_wrapper(name):
return "<p>{0}</p>".format(func(name))
return func_wrapper
| 5,338,491
|
def upgrade(profile, validator, writeProfileToFileFunc):
""" Upgrade a profile in memory and validate it
If it is safe to do so, as defined by shouldWriteProfileToFile, the profile is written out.
"""
# when profile is none or empty we can still validate. It should at least have a version set.
_ensureVersionProperty(profile)
startSchemaVersion = int(profile[SCHEMA_VERSION_KEY])
log.debug("Current config schema version: {0}, latest: {1}".format(startSchemaVersion, latestSchemaVersion))
for fromVersion in range(startSchemaVersion, latestSchemaVersion):
_doConfigUpgrade(profile, fromVersion)
_doValidation(deepcopy(profile), validator) # copy the profile, since validating mutates the object
try:
# write out the configuration once the upgrade has been validated. This means that if NVDA crashes for some
# other reason the file does not need to be upgraded again.
if writeProfileToFileFunc:
writeProfileToFileFunc(profile.filename, profile)
except Exception as e:
log.warning("Error saving configuration; probably read only file system")
log.debugWarning("", exc_info=True)
pass
| 5,338,492
|
def tri_interpolate_zcoords(points: np.ndarray, triangles: np.ndarray, mesh_points: np.ndarray,
is_mesh_edge: np.ndarray, num_search_tris: int=10):
"""
Interpolate z-coordinates to a set of 2D points using 3D point coordinates and a triangular mesh.
If point is along a mesh boundary, the boundary values are used instead.
Returned values are:
z: The interpolated z-values
"""
# Get triangle centroid coordinates and create KD-tree.
tri_coords = points[triangles,:]
tri_coords2D = points[triangles,0:2]
tri_centroids = np.mean(tri_coords2D, axis=1)
tri_tree = scipy.spatial.cKDTree(tri_centroids)
# Loop over points.
coords2d = mesh_points[:,0:2]
num_mesh_points = coords2d.shape[0]
z = np.zeros(num_mesh_points, dtype=np.float64)
for point_num in range(num_mesh_points):
if not(is_mesh_edge[point_num]):
z[point_num] = project_2d_coords(tri_coords, coords2d[point_num,:], tri_tree, num_search_tris=num_search_tris)
return z
| 5,338,493
|
def getServiceTypes(**kwargs) -> List:
"""List types of services.
Returns:
List of distinct service types.
"""
services = getServices.__wrapped__()
types = [s['type'] for s in services]
uniq_types = [dict(t) for t in {tuple(sorted(d.items())) for d in types}]
return uniq_types
| 5,338,494
|
def train_model(train_data, test_data, model, model_name, optimizer, loss='mse', scale_factor=1000., batch_size=128, max_epochs=200, early_stop=True, plot_history=True):
""" Code to train a given model and save out to the designated path as given by 'model_name'
Parameters
----------
train_data : 2-tuple
(train_x, train_y) where train_x is the images and train_y is the Gaussian dot annotation images in the train split.
test_data : 2-tuple
(test_x, test_y) where test_x is the images and test_y is the Gaussian dot annotation images in the test split.
model : a Keras model
a defined Keras model
optimizer : Keras optimizer object
the gradient descent optimizer e.g. Adam, SGD instance used to optimizer the model. We used Adam() with default settings.
loss : string
one of 'mse' (mean squared error) or 'mae' (mean absolute error)
scale_factor : None or float
multiplicative factor to apply to annotation images to increase the gradient in the backpropagation
batch_size : int
number of images to batch together for training
max_epochs : int
the maximum number of epochs to train for if early_stop is enabled else this is the number of epochs of training.
early_stop : bool
if True, monitors the minimum of the test loss. If loss does not continue to decrease for a set duration, stop the training and return the model with the best test loss.
plot_hist : bool
if True, plots the training and test loss over the training period on the same axes for visualisation.
Returns
-------
None : void
This function will simply save the model to the location given by model_name.
"""
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, ModelCheckpoint
import pylab as plt
train_x, train_y = train_data
test_x, test_y = test_data
if scale_factor is not None:
train_y = train_y * float(scale_factor)
test_y = test_y * float(scale_factor)
# compile the model with chosen optimizer.
model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
if early_stop:
""" Set some early stopping parameters """
early_stop = EarlyStopping(monitor='val_loss',
min_delta=0.001,
patience=15,
mode='min',
verbose=1)
checkpoint = ModelCheckpoint(model_name,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min',
period=1)
history = model.fit(train_x, train_y,
batch_size=batch_size,
epochs=epochs,
validation_data=(test_x, test_y), shuffle=True,
callbacks = [early_stop, checkpoint])
else:
history = model.fit(train_x, train_y,
batch_size=batch_size,
epochs=epochs,
validation_data=(test_x, test_y), shuffle=True)
model.save(model_name) # save the whole model state.
if plot_history:
plt.figure()
plt.plot(history.history['loss'], 'r', label='train loss')
plt.plot(history.history['val_loss'], 'g', label='test loss')
plt.legend()
plt.show()
return []
| 5,338,495
|
def get_pipeline_storage_es_client(session, *, index_date):
"""
Returns an Elasticsearch client for the pipeline-storage cluster.
"""
secret_prefix = f"elasticsearch/pipeline_storage_{index_date}"
host = get_secret_string(session, secret_id=f"{secret_prefix}/public_host")
port = get_secret_string(session, secret_id=f"{secret_prefix}/port")
protocol = get_secret_string(session, secret_id=f"{secret_prefix}/protocol")
username = get_secret_string(
session, secret_id=f"{secret_prefix}/read_only/es_username"
)
password = get_secret_string(
session, secret_id=f"{secret_prefix}/read_only/es_password"
)
return Elasticsearch(f"{protocol}://{username}:{password}@{host}:{port}")
| 5,338,496
|
def cqcc_resample(s, fs_orig, fs_new, axis=0):
"""implement the resample operation of CQCC
Parameters
----------
s : ``np.ndarray``
the input spectrogram.
fs_orig : ``int``
origin sample rate
fs_new : ``int``
new sample rate
axis : ``int``
the resample axis
Returns
-------
spec_res : ``np.ndarray``
spectrogram after resample
"""
if int(fs_orig) != int(fs_new):
s = resampy.resample(s, sr_orig=fs_orig, sr_new=fs_new,
axis=axis)
return s
| 5,338,497
|
def weights_init():
"""
Gaussian init.
"""
def init_fun(m):
classname = m.__class__.__name__
if (classname.find("Conv") == 0 or classname.find("Linear") == 0) and hasattr(m, "weight"):
nn.init.normal_(m.weight, 0.0, 0.02)
if hasattr(m, "bias") and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun
| 5,338,498
|
def dozier_2d(dem, number_of_sectors, distance):
"""
:param dem:
:param number_of_sectors:
:param distance:
:return:
"""
pass
| 5,338,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.