content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def read_train_valid(filename):
"""
读取训练或者验证文件
:param filename: 训练集/验证集的文件名字
:return:
返回训练集的文本和标签
其中文本是一个list, 标签是一个list(每个元素为int)
返回示例:['我很开心', '你不是真正的快乐', '一切都是假的], [1, 0, 0]
"""
fp = pd.read_table(filename, sep='\t', error_bad_lines=False)
return fp['review'].tolist(), list(map(int, fp['sentiment'].tolist())) | 28,800 |
def check_for_recommendation_result_report(context):
"""Check for result if recommedation or not."""
json_data = context.response.json()
if "recommendation" in json_data:
check_recommendation_in_result(context)
else:
look_for_other_attributes(context)
check_vulnerability_in_result(context) | 28,801 |
def handle_skinnyski_pdf(race_info):
"""
:param race_info: race metadata (RaceInfo)
:return: void
"""
pdf_content = get_skinnyski_content(race_info)
if pdf_content:
results = UnstructuredPDFRaceResults(race_info, pdf_content)
results.serialize()
else:
print("Warning: skipping a pdf which was unable to be accessed") | 28,802 |
def test_stateless_token_authentication_no_header(rf):
"""Tests that StatelessTokenAuthentication returns nothing if no auth header is present"""
request = rf.get("api/v0/notification_settings")
authentication = StatelessTokenAuthentication()
assert authentication.authenticate(request) is None | 28,803 |
def weighted_regularization_matrix_from(
regularization_weights: np.ndarray,
pixel_neighbors: np.ndarray,
pixel_neighbors_sizes: np.ndarray,
) -> np.ndarray:
"""
From the pixel-neighbors, setup the regularization matrix using the weighted regularization scheme.
Parameters
----------
regularization_weights
The regularization_ weight of each pixel, which governs how much smoothing is applied to that individual pixel.
pixel_neighbors
An array of length (total_pixels) which provides the index of all neighbors of every pixel in
the Voronoi grid (entries of -1 correspond to no neighbor).
pixel_neighbors_sizes
An array of length (total_pixels) which gives the number of neighbors of every pixel in the
Voronoi grid.
Returns
-------
np.ndarray
The regularization matrix computed using an adaptive regularization scheme where the effective regularization
coefficient of every source pixel is different.
"""
pixels = len(regularization_weights)
regularization_matrix = np.zeros(shape=(pixels, pixels))
regularization_weight = regularization_weights ** 2.0
for i in range(pixels):
regularization_matrix[i, i] += 1e-8
for j in range(pixel_neighbors_sizes[i]):
neighbor_index = pixel_neighbors[i, j]
regularization_matrix[i, i] += regularization_weight[neighbor_index]
regularization_matrix[
neighbor_index, neighbor_index
] += regularization_weight[neighbor_index]
regularization_matrix[i, neighbor_index] -= regularization_weight[
neighbor_index
]
regularization_matrix[neighbor_index, i] -= regularization_weight[
neighbor_index
]
return regularization_matrix | 28,804 |
def generateConstantNoneReferenceCode(to_name, expression, emit, context):
""" Assign 'None' to to_name."""
# No context or other knowledge needed, pylint: disable=unused-argument
emit("%s = Py_None;" % to_name) | 28,805 |
def test_mo_ei_watanabe():
"""based on Watanabe value on H-like Molybdenum
check if deviation by Lotz formula from experiemntal
value exceeds error bars specified by Lotz
"""
elem = csd.get_element_data('Mo')
e_e = 64400
watanabe__ei = csd.ei_lotz_cs(elem, 41, e_e)
print('Error Watanabe Mo', abs(watanabe__ei - 3.13E-23) / 3.13E-23)
assert abs(watanabe__ei - 3.13E-23) / 3.13E-23 < 0.3 | 28,806 |
def _send_email(self, to, subject, html_body):
# We intentionally commented out this code - we used it to prevent emails in development from going to non-Staffjoy emails.
"""
if current_app.config.get("ENV") != "prod":
allowed_domains = ["@staffjoy.com", "@7bridg.es"]
ok = False
for d in allowed_domains:
if to[-len(d):].lower() == d:
ok = True
if not ok:
current_app.logger.info(
"Intercepted email to %s and prevented sending due to environment rules."
% to)
return
"""
if to in current_app.config.get("EMAIL_BLACKLIST") or (
to.startswith("demo+") and to.endswith("@7bridg.es")):
current_app.logger.debug(
"Not sending email to %s becuase it is blacklisted" % to)
return
current_app.logger.info("Sending an email to %s - subject '%s' - body %s" %
(to, subject, html_body))
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = email_utils.formataddr(("Staffjoy", current_app.config.get("FROM_EMAIL")))
msg['To'] = to
# Record the MIME types of both parts - text/plain and text/html.
part2 = MIMEText(html_body, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part2)
try:
server = smtplib.SMTP(current_app.config.get("SMTP_HOST"), 587)
server.ehlo()
server.starttls()
#stmplib docs recommend calling ehlo() before & after starttls()
server.ehlo()
server.login(current_app.config.get("SMTP_USER"), current_app.config.get("SMTP_PASSWORD"))
server.sendmail(current_app.config.get("FROM_EMAIL"), to, msg.as_string())
server.close()
except Exception as e:
current_app.logger.exception(
'An smtp error to email %s occurred: %s - %s' %
(to, e.__class__, e))
raise self.retry(exc=e) | 28,807 |
def child_at_time(
self,
search_time,
shallow_search=False,
):
"""Return the child that overlaps with time search_time.
search_time is in the space of self.
If shallow_search is false, will recurse into compositions.
"""
range_map = self.range_of_all_children()
# find the first item whose end_time_exclusive is after the
first_inside_range = _bisect_left(
seq=self,
tgt=search_time,
key_func=lambda child: range_map[child].end_time_exclusive(),
)
# find the last item whose start_time is before the
last_in_range = _bisect_right(
seq=self,
tgt=search_time,
key_func=lambda child: range_map[child].start_time,
lower_search_bound=first_inside_range,
)
# limit the search to children who are in the search_range
possible_matches = self[first_inside_range:last_in_range]
result = None
for thing in possible_matches:
if range_map[thing].overlaps(search_time):
result = thing
break
# if the search cannot or should not continue
if (
result is None
or shallow_search
or not hasattr(result, "child_at_time")
):
return result
# before you recurse, you have to transform the time into the
# space of the child
child_search_time = self.transformed_time(search_time, result)
return result.child_at_time(child_search_time, shallow_search) | 28,808 |
def weight_variable_truncated_normal(input_dim, output_dim, name=""):
"""Create a weight variable with truncated normal distribution, values
that are more than 2 stddev away from the mean are redrawn."""
initial = tf.truncated_normal([input_dim, output_dim], stddev=0.5)
return tf.Variable(initial, name=name) | 28,809 |
def download_image(id_, url, icon_size=200, dst_path="./movie/"):
"""URLから画像をダウンロードする
Parameters
----------
id_ : int
受付ID
url : str
画像のURL
icon_size : int
画像サイズ(24, 48, 73, 200, 400, 512のいずれか), by default 200
dst_path : str, optional
画像の保存場所, by default "icon.png"
"""
url = url.replace("_normal.jpg", "_"+str(icon_size)+"x"+str(icon_size)+".jpg")
data = urllib.request.urlopen(url).read()
with open(dst_path+str(id_)+"/icon.png", mode="wb") as f:
f.write(data) | 28,810 |
def get_dummy_vm_create_spec(client_factory, name, data_store_name):
"""Builds the dummy VM create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = name
config_spec.guestId = "otherGuest"
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
tools_info = client_factory.create('ns0:ToolsConfigInfo')
tools_info.afterPowerOn = True
tools_info.afterResume = True
tools_info.beforeGuestStandby = True
tools_info.beforeGuestShutdown = True
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
config_spec.numCPUs = 1
config_spec.memoryMB = 4
controller_key = -101
controller_spec = create_controller_spec(client_factory, controller_key)
disk_spec = create_virtual_disk_spec(client_factory, 1024, controller_key)
device_config_spec = [controller_spec, disk_spec]
config_spec.deviceChange = device_config_spec
return config_spec | 28,811 |
def _kill_jupyter_processes():
"""Ensure all Jupyter processes are killed."""
global _all_jupyter_processes
while _all_jupyter_processes:
proc = _all_jupyter_processes[0]
if proc.poll() is not None:
_all_jupyter_processes = _all_jupyter_processes[1:]
continue
_log.info("Killing Jupyter process %s" % proc.pid)
si = subprocess.STARTUPINFO(wShowWindow=subprocess.SW_HIDE)
subprocess.check_call(['taskkill', '/F', '/T', '/PID', str(proc.pid)],
startupinfo=si,
shell=True) | 28,812 |
def test_comparison_ops_eq_t():
"""Check the equal-to operator for a truthy result."""
return """
fn main() {
{dest} = 1 == 1;
}
""" | 28,813 |
def binary_accuracy(*, logits, labels):
"""Accuracy of binary classifier, from logits."""
p = jax.nn.sigmoid(logits)
return jnp.mean(labels == (p > 0.5)) | 28,814 |
def create_app(app_name=None, blueprints=None, config=None):
"""
Diffy application factory
:param config:
:param app_name:
:param blueprints:
:return:
"""
if not blueprints:
blueprints = DEFAULT_BLUEPRINTS
else:
blueprints = blueprints + DEFAULT_BLUEPRINTS
if not app_name:
app_name = __name__
app = Flask(app_name)
configure_app(app, config)
configure_blueprints(app, blueprints)
configure_extensions(app)
configure_logging(app)
if app.logger.isEnabledFor(DEBUG):
p_config = pformat(app.config)
app.logger.debug(f"Current Configuration: {p_config}")
return app | 28,815 |
def extract_tag(inventory, url):
"""
extract data from sphinx inventory.
The extracted datas come from a C++ project
documented using Breathe. The structure of the inventory
is a dictionary with the following keys
- cpp:class (class names)
- cpp:function (functions or class methods)
- cpp:type (type names)
each value of this dictionary is again a dictionary with
- key : the name of the element
- value : a tuple where the third index is the url to the corresponding documentation
Parameters
----------
inventory : dict
sphinx inventory
url : url of the documentation
Returns
-------
dictionary with keys class, class_methods, func, type
but now the class methods are with their class.
"""
classes = {}
class_methods = {}
functions = {}
types = {}
get_relative_url = lambda x: x[2].replace(url, '')
for c, v in inventory.get('cpp:class', {}).items():
classes[c] = get_relative_url(v)
class_methods[c] = {}
for method, v in inventory.get('cpp:function', {}).items():
found = False
for c in class_methods.keys():
find = c + '::'
if find in method:
class_methods[c][method.replace(find, '')] = get_relative_url(v)
found = True
break
if not found:
functions[method] = get_relative_url(v)
for typename, v in inventory.get('cpp:type', {}).items():
types[typename] = get_relative_url(v)
return {'class': classes,
'class_methods': class_methods,
'func':functions,
'type': types
} | 28,816 |
def strip_parens(s):
"""Strip parentheses around string"""
if not s:
return s
if s[0] == "(" and s[-1] == ")":
return strip_parens(s[1:-1])
else:
return s | 28,817 |
def custom_eval(node, value_map=None):
"""
for safely using `eval`
"""
if isinstance(node, ast.Call):
values = [custom_eval(v) for v in node.args]
func_name = node.func.id
if func_name in {"AVG", "IF"}:
return FUNCTIONS_MAP[func_name](*values)
elif func_name in FUNCTIONS_MAP:
return FUNCTIONS_MAP[func_name](values)
else:
raise NotImplementedError(func_name)
elif isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.Str):
return node.s
elif isinstance(node, ast.BinOp):
return OPERATORS[type(node.op)](
custom_eval(node.left, value_map=value_map),
custom_eval(node.right, value_map=value_map),
)
elif isinstance(node, ast.UnaryOp):
return OPERATORS[type(node.op)](custom_eval(node.operand, value_map=value_map))
elif isinstance(node, ast.Compare):
return OPERATORS[type(node.ops[0])](
custom_eval(node.left, value_map=value_map),
custom_eval(node.comparators[0], value_map=value_map),
)
elif isinstance(node, ast.Name):
name = node.id
if value_map is None:
raise ValueError("value_map must not be None")
if name not in value_map:
raise KeyError()
try:
return value_map[name]
except KeyError as e:
raise e
else:
raise ArithmeticError() | 28,818 |
def validate_dict(input,validate):
"""
This function returns true or false if the dictionaries pass regexp
validation.
Validate format:
{
keyname: {
substrname: "^\w{5,10}$",
subintname: "^[0-9]+$"
}
}
Validates that keyname exists, and that it contains a substrname
that is 5-10 word characters, and that it contains subintname which
is only integers.
"""
# Create a local copy to work our magic on.
input = dict(input)
if not type(input) == dict and type(validate) == dict:
raise ValueError, "Values to validate_dict must be dicts."
for key in validate.keys():
if not input.get(key):
# Key didn't exist.
return False
else:
if not type(input[key]) == type(validate[key]) and not type(input[key]) == unicode:
# The types of keys didn't match.
return False
elif type(input[key]) == dict:
if not validate_dict(input[key],validate[key]):
# The sub-validate didn't pass.
return False
else:
del input[key]
elif type(input[key]) == str or type(input[key]) == unicode:
if not validate_str(input[key],validate[key]):
# The sub-validate didn't pass.
return False
else:
del input[key]
elif type(input[key]) == int:
del input[key]
pass
elif type(input[key]) == float:
del input[key]
pass
else:
# I don't know how to deal with this case!
return False
if input == {}:
return True
else:
return False | 28,819 |
def run(app=None, host='0.0.0.0', port=8080, cam=None):
""" サーバー起動用関数
app: アプリケーション機能拡張用オブジェクト exec()メソッドが必要
host: 待機するIPアドレス
port: 待機するポート
cam: mjpeg_server用カメラオブジェクト start(), stop(), capture() メソッドが必要
"""
global _application
global _server
_application = app
_server = HTTPServer((host, port), Handler)
print(time.asctime(), 'Server start - {0}:{1}'.format(host, port))
try:
_server.serve_forever()
except ValueError:
print('remote shutdown')
except KeyboardInterrupt:
print('KeyboardInterrupt')
finally:
if _server is not None and _server.socket is not None:
_server.socket.close()
print(time.asctime(), 'Server stop - {0}:{1}'.format(host, port)) | 28,820 |
def build_windows_and_pods_from_events(backpressure_events, window_width_in_hours=1) -> (list, list):
"""
Generate barchart-friendly time windows with counts of backpressuring durations within each window.
:param backpressure_events: a list of BackpressureEvents to be broken up into time windows
:param window_width_in_hours: how wide each time window should be in hours
:return: a dictionary with timestamp keys to list of BackpressureEvent values
"""
# The logic below is highly dependent on events being sorted by start timestamp oldest to newest.
sorted_events = backpressure_events.copy()
sorted_events.sort(key=lambda e: e.start)
interval = sorted_events[0].start.replace(minute=0, second=0, microsecond=0)
next_interval = interval + timedelta(hours=window_width_in_hours)
all_pods = set(())
windows = [BackpressureWindow(interval)]
for event in sorted_events:
all_pods.add(event.pod)
while event.start >= next_interval:
interval = next_interval
windows.append(BackpressureWindow(interval))
next_interval = next_interval + timedelta(hours=window_width_in_hours)
windows[-1].add_event(event)
all_pods_list = list(all_pods)
all_pods_list.sort()
return windows, all_pods_list | 28,821 |
def package_search(api_url, org_id=None, params=None, start_index=0, rows=100, logger=None, out=None):
"""
package_search: run the package_search CKAN API query, filtering by org_id, iterating by 100, starting with 'start_index'
perform package_search by owner_org:
https://data.ioos.us/api/3/action/package_search?q=owner_org:
"""
action = "package_search"
if org_id is not None:
if params is not None:
payload = {'q': "owner_org:{id}+{params}".format(id=org_id, params="+".join(params)), 'start': start_index, 'rows': rows}
print(payload)
else:
payload = {'q': "owner_org:{id}".format(id=org_id), 'start': start_index, 'rows': rows}
print(payload)
else:
if params is not None:
payload = {'q': "{params}".format(params=" ".join(params)), 'start': start_index, 'rows': rows}
print(payload)
else:
payload = {'start': start_index, 'rows': rows}
print(payload)
url = ("/").join([api_url, "action", action])
if logger:
logger.info("Executing {action}. URL: {url}. Parameters {params}".format(action=action, url=url, params=payload))
#r = requests.get(url=url, headers = {'content-type': 'application/json'}, params=payload)
#r = requests.post(url=url, headers = {'content-type': 'application/json'}, data=json.dumps(payload))
r = requests.post(url=url, headers = {'content-type': 'application/json'}, json=payload)
print(json.dumps(payload))
print(r.text)
# either works:
#result = json.loads(r.text)
result = r.json()
# this is the full package_search result:
#if out:
# out.write(json.dumps(result, indent=4, sort_keys=True, ensure_ascii=False))
return result | 28,822 |
def get_reduction(n_components=None, seed=4242) -> Iterable[
Tuple[str, "sklearn.base.TransformerMixin", int]]:
"""
Get benchmark reduction algorithms
"""
# Note: FA rotation requires sklearn version > 0.24
import sklearn
assert tuple(map(int, sklearn.__version__.split('.'))) >= (0, 24)
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.preprocessing import FunctionTransformer
for n in np.ravel(n_components):
if n is None:
yield 'none', FunctionTransformer(), n
else:
yield 'kbest', SelectKBest(f_classif, k=n), n
yield 'pca', PCA(n_components=n, random_state=seed), n
yield 'fa', FactorAnalysis(n_components=n, rotation='varimax',
random_state=seed), n | 28,823 |
def write_inline_statistics(inline_file_path, inline_statistics, compilation):
"""write inline statistic into file"""
csv_writer = csv.writer(open(inline_file_path, "w", newline=""))
for i in range(len(compilation)):
line = [compilation[i]] + inline_statistics[i]
csv_writer.writerow(line) | 28,824 |
def voigt_fit(prefix,x,slice,c,vary):
"""
This function fits a voigt to a spectral slice. Center value can be set to constant or floated, everything else is floated.
Parameters:
prefix: prefix for lmfit to distinguish variables during multiple fits
x: x values to use in fit
slice: slice to be fit
c: center of voigt obtained from max value of the slice
vary: Boolean, determines whether c is floated default is True
Returns:
out: lmfit fit output
"""
model = VoigtModel(prefix=prefix)
pars = model.guess(slice,x=x)
pars[str(prefix)+'center'].set(c,vary=vary)
out = model.fit(slice,pars,x=x)
return out | 28,825 |
def add_tags(obj, tags):
"""
:param obj: Maya object to add string attributes to.
:param tags: dict{'Region': 'Arm', 'Side': 'R', 'Type': 'IK'}
"""
for key in tags.keys():
if not obj.hasAttr(key):
obj.addAttr(key, type='string', keyable=False)
obj.setAttr(key, tags[key])
else:
obj.setAttr(key, tags[key]) | 28,826 |
def turn_coordinates_into_list_of_distances(list_of_coordinates: List[tuple]):
"""
Function to calculate the distance between coordinates in a list. Using the
'great_circle' for measuring here, since it is much faster (but less precise
than 'geodesic').
Parameters
----------
list_of_coordinates : List[tuple]
A list containing tuples with coordinates
Returns
-------
list_of_distances : List[float]
A list containing the distance in kilometers between two coordinates.
Subsequent values are added up, thus the values are increasing.
"""
list_of_distances = []
previous_coordinates = None
for coordinates in list_of_coordinates:
if not previous_coordinates:
list_of_distances.append(0.)
else:
dist = distance.great_circle([previous_coordinates[1], previous_coordinates[0]], [coordinates[1], coordinates[0]])
list_of_distances.append(round(list_of_distances[-1] + dist.km, 4))
previous_coordinates = coordinates
return list_of_distances | 28,827 |
def getPileupDatasetSizes(datasets, phedexUrl):
"""
Given a list of datasets, find all their blocks with replicas
available, i.e., blocks that have valid files to be processed,
and calculate the total dataset size
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:return: a dictionary of datasets and their respective sizes
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
sizeByDset = {}
if not datasets:
return sizeByDset
urls = ['%s/blockreplicas?dataset=%s' % (phedexUrl, dset) for dset in datasets]
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if row['data'] is None:
print("Failure in getPileupDatasetSizes for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
sizeByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
sizeByDset.setdefault(dataset, 0) # flat dict in the format of blockName: blockSize
try:
for item in rows['phedex']['block']:
sizeByDset[dataset] += item['bytes']
except Exception as exc:
print("Failure in getPileupDatasetSizes for dataset %s. Error: %s" % (dataset, str(exc)))
sizeByDset[dataset] = None
return sizeByDset | 28,828 |
def convertpo(inputpofile, outputpotfile, template, reverse=False):
"""reads in inputpofile, removes the header, writes to outputpotfile."""
inputpo = po.pofile(inputpofile)
templatepo = po.pofile(template)
if reverse:
swapdir(inputpo)
templatepo.makeindex()
header = inputpo.header()
if header:
inputpo.units = inputpo.units[1:]
for i, unit in enumerate(inputpo.units):
for location in unit.getlocations():
templateunit = templatepo.locationindex.get(location, None)
if templateunit and templateunit.source == unit.source:
break
else:
templateunit = templatepo.findunit(unit.source)
unit.othercomments = []
if unit.target and not unit.isfuzzy():
unit.source = unit.target
elif not reverse:
if inputpo.filename:
unit.addnote("No translation found in %s" % inputpo.filename, origin="programmer")
else:
unit.addnote("No translation found in the supplied source language", origin="programmer")
unit.target = ""
unit.markfuzzy(False)
if templateunit:
unit.addnote(templateunit.getnotes(origin="translator"))
unit.markfuzzy(templateunit.isfuzzy())
unit.target = templateunit.target
if unit.isobsolete():
del inputpo.units[i]
outputpotfile.write(str(inputpo))
return 1 | 28,829 |
def _test_get_atoms(atom_factory, atoms):
"""
Test :meth:`.AtomFactory.get_atoms`.
Parameters
----------
atom_factory : :class:`.AtomFactory`
The atom factory to test.
atoms : :class:`tuple` of :class:`.Atom`
The atoms which should be created.
Returns
-------
None : :class:`NoneType`
"""
for atom1, atom2 in it.zip_longest(
atom_factory.get_atoms(),
atoms,
):
assert atom1.get_atomic_number() == atom2.get_atomic_number()
assert atom1.get_charge() == atom2.get_charge()
assert atom1.get_max_valence() == atom2.get_max_valence() | 28,830 |
def do_fk5(l, b, jde):
"""[summary]
Parameters
----------
l : float
longitude
b : float
latitude
jde : float
Julian Day of the ephemeris
Returns
-------
tuple
tuple(l,b)
"""
T = (jde - JD_J2000) / CENTURY
lda = l - deg2rad(1.397)*T - deg2rad(0.00031)*T*T
delta_lon = -deg2rad(0.09033/3600) + deg2rad(0.03916/3600)*(cos(lda)+sin(lda))*tan(b)
delta_lat = deg2rad(0.03916/3600)*(np.cos(lda)- np.sin(lda))
l += delta_lon
b += delta_lat
return l,b | 28,831 |
def test_construct_many_to_one_kwargs():
"""
GIVEN artifacts for a many to one relationship with kwargs
WHEN construct is called with the artifacts
THEN a many to one relationship with kwargs is returned.
"""
artifacts = artifacts_types.ManyToOneRelationshipPropertyArtifacts(
type=types.PropertyType.RELATIONSHIP,
schema={},
required=False,
description=None,
sub_type=types.RelationshipType.MANY_TO_ONE,
parent="Parent",
backref_property=None,
kwargs={"order_by": "id"},
write_only=False,
foreign_key_property="prop_1",
foreign_key="foreign.key",
nullable=False,
)
returned_relationship = relationship.construct(artifacts=artifacts)
assert returned_relationship.order_by == "id" | 28,832 |
def command_norm_yaml(ns):
"""Run the norm_yaml command."""
parent_dir = common.YAML_DIR
paths = ns_to_paths(ns, parent_dir=parent_dir, ext='.yaml')
for path in paths:
common.normalize_yaml(path) | 28,833 |
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.8 ** (epoch // 1))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr | 28,834 |
def get_defaults(module, *args):
"""
Find an internal defaults data file, load it using YAML, and return the resulting
dictionary.
Takes the dot-separated module path (e.g. "abscal.wfc3.reduce_grism_extract"), splits
off the last item (e.g. ["abscal.wfc3", "reduce_grism_extract"]), adds ".yaml" to the
end of the second item (e.g. ["abscal.wfc3", "reduce_grism_extract.yaml"]), adds
".defaults" to the first item
(e.g. ["abscal.wfc3.defaults", "reduce_grism_extract.yaml"]), and feeds the result
into :code:`get_data_file()`. Then loads the resulting file as a dictionary, and
builds a new dictionary consisting of:
- All key/value pairs in the "all" dictionary
- All key/value pairs in any dictionary matching any of the keyword arguments
- The above two items from any dictionary matching any of the keyword arguments,
extending recursively into the depths of the dictionary.
The result will be a flat (i.e. single-level) dictionary.
Parameters
----------
module : str
The module to search in, using standard dot separators (e.g. abscal.wfc3)
args : list
A list of specific keyword arguments, provided to ensure the inclusion of
specific sub-values or sub-dictionaries.
Returns
-------
defaults : dict
Dictionary of default parameters.
"""
items = module.split(".")
module = ".".join(items[:-1])
file_name = items[-1]+".yaml"
defaults_file = get_data_file(module, file_name, defaults=True)
with open(defaults_file, "r") as inf:
defaults_dict = yaml.safe_load(inf)
defaults = _extract_dict(defaults_dict, {}, args)
return defaults | 28,835 |
def register_rnaseq(rna_seq_files, transaction):
"""Registers RNAseq experiment raw data in openBIS.
The list must contain two elements, following the naming convention ``r'.*tumor_rna[1,2]{1}.fastq.gz'``.
Both files must additionally contain the same QBiC sample code in order to get registered.
Args:
rna_seq_files (list): A list with fastq files from an RNAseq experiment
transaction (:class:DataSetRegistrationTransaction): An openBIS data set registration object
Raises:
MTBdropboxerror: If some of the conditions have not been fullfilled, with a text string explaining
the reason for the failure.
"""
print(mtbutils.log_stardate('Registering incoming MTB RNAseq data {}'.format(rna_seq_files)))
# Check if dataset files are paired end and complete
assert len(rna_seq_files) % 2 == 0
file1 = os.path.basename(rna_seq_files[0])
file2 = os.path.basename(rna_seq_files[1])
assert len(set(QCODE_REG.findall(file1))) == 1
assert len(set(QCODE_REG.findall(file2))) == 1
assert QCODE_REG.findall(file1)[0] == QCODE_REG.findall(file2)[0]
# This is the tumor dna sample barcode (type: TEST_SAMPLE)
dna_barcode = QCODE_REG.findall(file1)[0]
# Find the corresponding space and project
space, project = space_and_project(dna_barcode)
search_service = transaction.getSearchService()
sc = SearchCriteria()
pc = SearchCriteria()
pc.addMatchClause(SearchCriteria.MatchClause.createAttributeMatch(SearchCriteria.MatchClauseAttribute.PROJECT, project));
sc.addSubCriteria(SearchSubCriteria.createExperimentCriteria(pc))
result = search_service.searchForSamples(sc)
print("Found {} samples for project {} in space {}.".format(len(result), project, space))
new_rna_sample_barcode = getNextFreeBarcode(project,
numberOfBarcodes=len(result),
transaction=transaction,
space=space)
# Now get the parent sample id (tumor sample, type: BIOLOGICAL_SAMPLE)
tumor_dna_sample = getsample(dna_barcode, transaction)
parent_ids = tumor_dna_sample.getParentSampleIdentifiers()
assert len(parent_ids) == 1
print(parent_ids)
tumor_tissue_sample = getsample(parent_ids[0], transaction)
# Now we have to create a new TEST_SAMPLE with sample type RNA and attach it
# to the tumor tissue sample
new_rna_sample = transaction.createNewSample("/{space}/{barcode}".format(
space=space,
barcode=new_rna_sample_barcode), "Q_TEST_SAMPLE")
new_rna_sample.setExperiment(transaction.getSearchService().getExperiment(
"/{space}/{project}/{project}E{number}".format(
space=space,
project=project,
number=3
)
))
parent_sample_id = tumor_tissue_sample.getSampleIdentifier()
new_rna_sample.setParentSampleIdentifiers([parent_sample_id])
new_rna_sample.setPropertyValue('Q_SAMPLE_TYPE', 'RNA')
# We design a new experiment and sample identifier
experiments = transaction.getSearchService().listExperiments('/{}/{}'.format(space, project))
last_exp_id = get_last_exp_id(experiments)
new_exp_id = '/{space}/{project}/{project}E{number}'.format(
space=space, project=project, number=last_exp_id + COUNTER.newId())
new_sample_id = '/{space}/NGS{barcode}'.format(
space=space, project=project, barcode=new_rna_sample_barcode)
new_ngs_experiment = transaction.createNewExperiment(new_exp_id, "Q_NGS_MEASUREMENT")
new_ngs_experiment.setPropertyValue('Q_CURRENT_STATUS', 'FINISHED')
new_ngs_sample = transaction.createNewSample(new_sample_id, "Q_NGS_SINGLE_SAMPLE_RUN")
new_ngs_sample.setParentSampleIdentifiers(["/{space}/{barcode}".format(space=space, barcode=new_rna_sample_barcode)])
new_ngs_sample.setExperiment(new_ngs_experiment)
# Create a data-set attached to the VARIANT CALL sample
data_set = transaction.createNewDataSet("Q_NGS_RAW_DATA")
data_set.setMeasuredData(False)
data_set.setSample(new_ngs_sample)
# Put the files in one directory
base_path = os.path.dirname(transaction.getIncoming().getAbsolutePath())
registration_dir = os.path.join(base_path, '{}_pairend_end_sequencing_reads'.format(new_rna_sample_barcode))
os.mkdir(registration_dir)
for raw_data in rna_seq_files:
# replace tumor dna barcode with tumor rna barcode
old_base = os.path.basename(raw_data)
new_base = old_base.replace(dna_barcode, new_rna_sample_barcode)
os.rename(raw_data, os.path.join(registration_dir, os.path.basename(new_base)))
# Attach the directory to the dataset
transaction.moveFile(registration_dir, data_set)
# Update sample location
update_sample_location_to_qbic(new_rna_sample_barcode) | 28,836 |
def publish(dry_run=False):
""" POST /repos/:owner/:repo/releases
https://developer.github.com/v3/repos/releases/#create-a-release
"""
# dynamic import to allow the other commands to run without requests
import requests
# get gihub config. If not set -> POST will fail, developer will understand
github_owner = os.environ.get('GITHUB_OWNER')
github_repo = os.environ.get('GITHUB_REPO')
github_user = os.environ.get('GITHUB_USER')
github_key = os.environ.get('GITHUB_KEY')
# build request
url = "https://api.github.com/repos/{}/{}/releases".format(github_owner, github_repo)
changelog_url = "https://github.com/{}/{}/blob/release-{}/CHANGELOG.md".format(github_owner, github_repo, _version)
post_args = {
"tag_name": _version,
"name": "Release {}".format(_version),
"body": "See [CHANGELOG.md]({}) for all details".format(changelog_url),
"draft": False,
"prerelease": False
}
logging.debug("POST %s with data: %s", url, post_args)
# make request and raise exception if we had an issue
if not dry_run:
response = requests.post(url, data=json.dumps(post_args), auth=(github_user, github_key))
response.raise_for_status()
else:
print("POST {}".format(url))
print("auth({}, xxx)".format(github_user))
pprint(post_args) | 28,837 |
def get(using=None):
"""Return a browser launcher instance appropriate for the environment."""
if _tryorder is None:
with _lock:
if _tryorder is None:
register_standard_browsers()
if using is not None:
alternatives = [using]
else:
alternatives = _tryorder
for browser in alternatives:
if '%s' in browser:
# User gave us a command line, split it into name and args
browser = shlex.split(browser)
if browser[-1] == '&':
return BackgroundBrowser(browser[:-1])
else:
return GenericBrowser(browser)
else:
# User gave us a browser name or path.
try:
command = _browsers[browser.lower()]
except KeyError:
command = _synthesize(browser)
if command[1] is not None:
return command[1]
elif command[0] is not None:
return command[0]()
raise Error("could not locate runnable browser") | 28,838 |
def _bin_to_long(x):
"""
Convert a binary string into a long integer
This is a clever optimization for fast xor vector math
"""
return int(binascii.hexlify(x), 16) | 28,839 |
def update_book(username, book_id, data):
"""Update book data"""
cursor, conn = db_sql.connect('books.db')
keys = list(data.keys())
sql = ("UPDATE " + username + " SET " + " = ?, ".join(keys) +
" = ? WHERE _id = ?")
temp_list = []
for key in keys:
temp_list.append(data[key])
temp_list.append(book_id)
cursor.execute(sql, tuple(temp_list))
conn.commit()
conn.close()
return cursor.lastrowid | 28,840 |
def test_that_peek_returns_the_next_value(q_20):
"""Will test that the peek method returns the next value, the next to be dequeued."""
for num in range(20):
peek = q_20.peek()
dequeued = q_20.dequeue()
assert peek == dequeued | 28,841 |
def score_retrievals(label, retrievals):
"""
Evaluating the current retrieval experiment
Args:
-----
label: string
label corresponding to the query
retrivals: list
list of strings containing the ranked labels corresponding to the retrievals
tot_labels: integer
number of images with the current label. We need this to compute recalls
"""
# retrievals = retrievals[1:] # we do not account rank-0 since it's self-retrieval
relevant_mask = np.array([1 if r==label else 0 for r in retrievals])
num_relevant_retrievals = np.sum(relevant_mask)
if(num_relevant_retrievals == 0):
print(label)
metrics = {
"label": label,
"p@1": -1,
"p@5": -1,
"p@10": -1,
"p@50": -1,
"p@rel": -1,
"mAP": -1,
"r@1": -1,
"r@5": -1,
"r@10": -1,
"r@50": -1,
"r@rel": -1,
"mAR": -1
}
return metrics
# computing precision based metrics
precision_at_rank = np.cumsum(relevant_mask) / np.arange(1, len(relevant_mask) + 1)
precision_at_1 = precision_at_rank[0]
precision_at_5 = precision_at_rank[4]
precision_at_10 = precision_at_rank[9]
precision_at_50 = precision_at_rank[49]
precision_at_rel = precision_at_rank[num_relevant_retrievals - 1]
average_precision = np.sum(precision_at_rank * relevant_mask) / num_relevant_retrievals
# computing recall based metrics
recall_at_rank = np.cumsum(relevant_mask) / num_relevant_retrievals
recall_at_1 = recall_at_rank[0]
recall_at_5 = recall_at_rank[4]
recall_at_10 = recall_at_rank[9]
recall_at_50 = recall_at_rank[49]
recall_at_rel = recall_at_rank[num_relevant_retrievals - 1]
average_recall = np.sum(recall_at_rank * relevant_mask) / num_relevant_retrievals
metrics = {
"label": label,
"p@1": precision_at_1,
"p@5": precision_at_5,
"p@10": precision_at_10,
"p@10": precision_at_50,
"p@rel": precision_at_rel,
"mAP": average_precision,
"r@1": recall_at_1,
"r@5": recall_at_5,
"r@10": recall_at_10,
"r@10": recall_at_50,
"r@rel": recall_at_rel,
"mAR": average_recall
}
return metrics | 28,842 |
def default_add_one_res_2_all_res(one_res: list, all_res: list) -> list:
"""
默认函数1: one_res 增加到all_res
:param one_res:
:param all_res:
:return:
"""
for i in one_res:
for j in i:
all_res.append(j)
return all_res | 28,843 |
def search_gene(search_string: str, **kwargs) -> Iterable[Gene]:
""" Symbols have been separated into search_gene_symbol - this returns Gene objects """
CONSORTIUM_REGEX = {
r"(ENSG\d+)": AnnotationConsortium.ENSEMBL,
r"Gene:(\d+)": AnnotationConsortium.REFSEQ,
r"GeneID:(\d+)": AnnotationConsortium.REFSEQ,
r"Gene ID:(\d+)": AnnotationConsortium.REFSEQ,
}
for c_regex, annotation_consortium in CONSORTIUM_REGEX.items():
if m := re.match(c_regex, search_string, re.IGNORECASE):
gene_id = m.group(1)
return Gene.objects.filter(identifier=gene_id, annotation_consortium=annotation_consortium)
return [] | 28,844 |
def get_steam_libraries():
"""Returns list of found Steam library folders."""
found_libraries = []
if os.path.isdir(STEAM_INSTALL_DIR + '/steamapps/common'):
found_libraries.append(STEAM_INSTALL_DIR)
libraries_config = {}
if LIBRARY_FOLDERS_FILE:
libraries_config = vdf.load(open(LIBRARY_FOLDERS_FILE))
if libraries_config:
keyword = ''
if 'libraryfolders' in libraries_config:
keyword = 'libraryfolders'
elif 'LibraryFolders' in libraries_config:
keyword = 'LibraryFolders'
for library in libraries_config[keyword].values():
library_path = ''
if 'path' in library:
library_path = library['path']
elif isinstance(library, str):
library_path = library
if library_path and library_path not in found_libraries and os.path.isdir(library_path + '/steamapps/common'):
found_libraries.append(library_path)
return found_libraries | 28,845 |
def detect_wings_simple(img, pixel_size=1,
ds=2, layers=2, thresh_window=1.8e3,
minarea=0.5e6, maxarea=2e6, minsolidity=.6,
minaspect=.3, plot=False, threshold_fun=None):
"""
simple wing detection via adaptive thresholding and some filtering by shape
default area 0.5-2 mm^2
Parameters
----------
img: np-array (2-dim)
the input image
pixel_size: scalar
pixel size in input image
ds: scalar
downsampling factor at each layer
layers: scalar
how may downsampling layers to calculate
thresh_window: integer
window for adaptive threshold, in original image pixels
minarea: scalar
minimum size of objects to detect, in units^2
maxarea: scalar
maximum size of objects to detect, in units^2
minsolidity: scalar
minimal solidity of detected objects \in (0,1)
minaspect: scalar
minimal inverse aspect ratio of detected objects \in (0,1)
plot: boolean
whether to plot detections or not
threshold_fun: function pointer, optional
thresholding function to use in windows
Returns
-------
bboxes: list of 4-tuples
bounding boxes (in original image pixel units)
"""
# scale min and max area to be in pixels^2
minarea = minarea / pixel_size**2 / ds**(layers*2)
maxarea = maxarea / pixel_size**2 / ds**(layers*2)
# scale thresh window size, make sure it is odd
thresh_window = int(thresh_window / pixel_size / ds**layers)
thresh_window += 0 if thresh_window%2 == 1 else 1
logger = logging.getLogger(__name__)
# some debug output:
logger.info('wing detection started')
logger.debug('input shape: {}'.format(img.shape))
logger.debug('ds: {}, layer:{}'.format(ds, layers))
logger.debug('minarea: {}, maxarea:{}'.format(minarea, maxarea))
logger.debug('threshold window: {}'.format(thresh_window))
# downsample
pyr = [p for p in pyramid_gaussian(img, max_layer= layers, downscale = ds)]
img_ds = pyr[layers]
logger.debug('img size after ds: {}'.format(img_ds.shape))
# rescale to (0-1)
img_ds = img_ds.astype(float)
img_ds = rescale_intensity(img_ds, out_range=(0.0, 1.0))
# smooth
img_ds = gaussian_filter(img_ds, 2.0)
# adaptive threshold
if threshold_fun is None:
thrd = img_ds > threshold_local(img_ds, thresh_window)
else:
thrd = img_ds > threshold_local(img_ds, thresh_window, method='generic', param=threshold_fun)
# clean a bit
thrd = np.bitwise_not(thrd)
thrd = binary_opening(thrd, selem=disk(4))
labelled = label(thrd)
# filter objs
ls = [r.label for r in regionprops(labelled) if r.area>minarea and
r.area<maxarea and r.solidity>minsolidity and aspect(r.bbox) > minaspect]
# filtered binary
res = np.zeros(thrd.shape)
l = label(thrd)
for li in ls:
res += (l == li)
# more cleaning, plus some erosion to separate touching wings
r2 = remove_small_holes(res.astype(np.bool), 25000)
r2 = binary_erosion(r2, selem=disk(3))
# show detections
if plot:
image_label_overlay = label2rgb(label(r2), image=img_ds)
plt.imshow(image_label_overlay)
ax = plt.gca()
# get bboxes
bboxes = []
for r in regionprops(label(r2)):
# TODO: is this really necessary?
if r.area < (minarea * .8 ):
continue
bbox_scaled = np.array(r.bbox) * (ds**layers)
logger.debug('bbox: {}, upsampled: {}'.format(r.bbox, bbox_scaled))
bboxes.append(bbox_scaled)
if plot:
minr, minc, maxr, maxc = r.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
logger.info('found {} object(s)'.format(len(bboxes)) )
return bboxes | 28,846 |
def check_logged(request):
"""Check if user is logged and have the permission."""
permission = request.GET.get('permission', '')
if permission:
has_perm = request.user.has_perm(permission)
if not has_perm:
msg = (
"User does not have permission to exectute this action:\n"
"expected permission: {permission}").format(
permission=permission)
raise exceptions.PumpWoodUnauthorized(
message=msg, payload={
"permission": permission})
return Response(True) | 28,847 |
def _deprecated_configs(agentConfig):
""" Warn about deprecated configs
"""
deprecated_checks = {}
deprecated_configs_enabled = [v for k, v in OLD_STYLE_PARAMETERS if len([l for l in agentConfig if l.startswith(k)]) > 0]
for deprecated_config in deprecated_configs_enabled:
msg = "Configuring %s in datadog.conf is not supported anymore. Please use conf.d" % deprecated_config
deprecated_checks[deprecated_config] = {'error': msg, 'traceback': None}
log.error(msg)
return deprecated_checks | 28,848 |
def os_link(source, link_name):
"""Add support for os.link() on Windows."""
if sys.platform == 'win32':
if not ctypes.windll.kernel32.CreateHardLinkW(
unicode(link_name), unicode(source), 0):
raise OSError()
else:
os.link(source, link_name) | 28,849 |
def check_number_of_calls(object_with_method, method_name, maximum_calls, minimum_calls=1, stack_depth=2):
"""
Instruments the given method on the given object to verify the number of calls to the method is
less than or equal to the expected maximum_calls and greater than or equal to the expected minimum_calls.
"""
return check_sum_of_calls(
object_with_method,
[method_name],
maximum_calls,
minimum_calls,
stack_depth=stack_depth + 1
) | 28,850 |
def strict_transport_security(reqs: dict, expectation='hsts-implemented-max-age-at-least-six-months') -> dict:
"""
:param reqs: dictionary containing all the request and response objects
:param expectation: test expectation
hsts-implemented-max-age-at-least-six-months: HSTS implemented with a max age of at least six months (15768000)
hsts-implemented-max-age-less-than-six-months: HSTS implemented with a max age of less than six months
hsts-not-implemented-no-https: HSTS can't be implemented on http only sites
hsts-not-implemented: HSTS not implemented
hsts-header-invalid: HSTS header isn't parsable
hsts-invalid-cert: Invalid certificate chain
:return: dictionary with:
data: the raw HSTS header
expectation: test expectation
includesubdomains: whether the includeSubDomains directive is set
pass: whether the site's configuration met its expectation
preload: whether the preload flag is set
result: short string describing the result of the test
"""
SIX_MONTHS = 15552000 # 15768000 is six months, but a lot of sites use 15552000, so a white lie is in order
output = {
'data': None,
'expectation': expectation,
'includeSubDomains': False,
'max-age': None,
'pass': False,
'preload': False,
'preloaded': False,
'result': 'hsts-not-implemented',
}
response = reqs['responses']['https']
# If there's no HTTPS, we can't have HSTS
if response is None:
output['result'] = 'hsts-not-implemented-no-https'
# Also need a valid certificate chain for HSTS
elif not response.verified:
output['result'] = 'hsts-invalid-cert'
elif 'Strict-Transport-Security' in response.headers:
output['data'] = response.headers['Strict-Transport-Security'][0:1024] # code against malicious headers
try:
sts = [i.lower().strip() for i in output['data'].split(';')]
# Throw an error if the header is set twice
if ',' in output['data']:
raise ValueError
for parameter in sts:
if parameter.startswith('max-age='):
output['max-age'] = int(parameter[8:128]) # defense
elif parameter == 'includesubdomains':
output['includeSubDomains'] = True
elif parameter == 'preload':
output['preload'] = True
if output['max-age']:
if output['max-age'] < SIX_MONTHS: # must be at least six months
output['result'] = 'hsts-implemented-max-age-less-than-six-months'
else:
output['result'] = 'hsts-implemented-max-age-at-least-six-months'
else:
raise ValueError
except:
output['result'] = 'hsts-header-invalid'
# If they're in the preloaded list, this overrides most anything else
# TODO: Check to see if all redirect domains are preloaded
# TODO: Check every redirect along the way for HSTS
if response is not None:
preloaded = is_hsts_preloaded(urlparse(response.url).netloc)
if preloaded:
output['result'] = 'hsts-preloaded'
output['includeSubDomains'] = preloaded['includeSubDomains']
output['preloaded'] = True
# Check to see if the test passed or failed
if output['result'] in ('hsts-implemented-max-age-at-least-six-months',
'hsts-preloaded',
expectation):
output['pass'] = True
return output | 28,851 |
def _get_span(succ, name, resultidx=0, matchidx=0, silent_fail=False):
"""
Helper method to return the span for the given result index and name, or None.
Args:
succ: success instance
name: name of the match info, if None, uses the entire span of the result
resultidx: index of the result in success
matchidx: if there is more than one match info with that name, which one to return, if no name, ignored
silent_fail: if True, return None, if False, raise an exception if the match info is not present
Returns:
the span or None if no Span exists
"""
if resultidx >= len(succ):
if not silent_fail:
raise Exception(f"No resultidx {resultidx}, only {len(succ)} results")
return None
res = succ[resultidx]
if name:
matches = res.matches4name(name)
if not matches:
if not silent_fail:
raise Exception(f"No match info with name {name} in result")
return None
if matchidx >= len(matches):
if not silent_fail:
raise Exception(
f"No match info with index {matchidx}, length is {len(matches)}"
)
return None
ret = matches[matchidx].get("span")
else:
ret = res.span
if ret is None:
if silent_fail:
return None
else:
raise Exception("No span found")
return ret | 28,852 |
def plot_ft(fns=None, s=False, o=None, xd=False, yd=False, o_fmt='png',
dpi=300, in_fmt='mat', **kwargs):
"""
Plot the fourier spectrum of the data.
Can be useful if you have mystery data of unknown frequency.
"""
plot.plot(fns, xd=xd, yd=yd, s=s, o=o, ftype=o_fmt, dpi=dpi,
filetype=in_fmt, ft=True) | 28,853 |
def gen_k_arr(K, n):
"""
Arguments:
K {int} -- [apa numbers]
n {int} -- [trial numbers]
"""
def random_sel(K, trial=200):
count_index = 0
pool = np.arange(K)
last = None
while count_index < trial:
count_index += 1
random.shuffle(pool)
if pool[0] == last:
swap_with = random.randrange(1, len(pool))
pool[0], pool[swap_with] = pool[swap_with], pool[0]
for item in pool:
yield item
last = pool[-1]
if K <= 1:
return np.repeat(K - 1, n)
else:
k_lst = list(random_sel(K, trial=n))
return np.array(k_lst) | 28,854 |
def tau_data(spc_dct_i,
spc_mod_dct_i,
run_prefix, save_prefix, saddle=False):
""" Read the filesystem to get information for TAU
"""
# Set up all the filesystem objects using models and levels
pf_filesystems = filesys.models.pf_filesys(
spc_dct_i, spc_mod_dct_i, run_prefix, save_prefix, saddle)
[harm_cnf_fs, _,
harm_min_locs, harm_save, _] = pf_filesystems['harm']
# [tors_cnf_fs, _, tors_min_locs, _, _] = pf_filesystems['tors']
# Get the conformer filesys for the reference geom and energy
if harm_min_locs:
geom = harm_cnf_fs[-1].file.geometry.read(harm_min_locs)
min_ene = harm_cnf_fs[-1].file.energy.read(harm_min_locs)
# Set the filesystem
tau_save_fs = autofile.fs.tau(harm_save)
# Get the rotor info
rotors = tors.build_rotors(spc_dct_i, pf_filesystems, spc_mod_dct_i)
run_path = filesys.models.make_run_path(pf_filesystems, 'tors')
tors_strs = tors.make_hr_strings(
rotors, run_path, spc_mod_dct_i)
[_, hr_str, flux_str, prot_str, _] = tors_strs
# Use model to determine whether to read grads and hessians
vib_model = spc_mod_dct_i['vib']['mod']
freqs = ()
_, _, proj_zpve, harm_zpve = vib.tors_projected_freqs_zpe(
pf_filesystems, hr_str, prot_str, run_prefix, zrxn=None)
zpe_chnlvl = proj_zpve * phycon.EH2KCAL
# Set reference energy to harmonic zpve
db_style = 'directory'
reference_energy = harm_zpve * phycon.EH2KCAL
if vib_model == 'tau':
if db_style == 'directory':
tau_locs = [locs for locs in tau_save_fs[-1].existing()
if tau_save_fs[-1].file.hessian.exists(locs)]
elif db_style == 'jsondb':
tau_locs = [locs for locs in tau_save_fs[-1].json_existing()
if tau_save_fs[-1].json.hessian.exists(locs)]
else:
if db_style == 'directory':
tau_locs = tau_save_fs[-1].existing()
elif db_style == 'jsondb':
tau_locs = tau_save_fs[-1].json_existing()
# Read the geom, ene, grad, and hessian for each sample
samp_geoms, samp_enes, samp_grads, samp_hessians = [], [], [], []
for locs in tau_locs:
# ioprinter.info_message('Reading tau info at path {}'.format(
# tau_save_fs[-1].path(locs)))
if db_style == 'directory':
geo = tau_save_fs[-1].file.geometry.read(locs)
elif db_style == 'jsondb':
geo = tau_save_fs[-1].json.geometry.read(locs)
geo_str = autofile.data_types.swrite.geometry(geo)
samp_geoms.append(geo_str)
if db_style == 'directory':
tau_ene = tau_save_fs[-1].file.energy.read(locs)
elif db_style == 'jsondb':
tau_ene = tau_save_fs[-1].json.energy.read(locs)
rel_ene = (tau_ene - min_ene) * phycon.EH2KCAL
ene_str = autofile.data_types.swrite.energy(rel_ene)
samp_enes.append(ene_str)
if vib_model == 'tau':
if db_style == 'directory':
grad = tau_save_fs[-1].file.gradient.read(locs)
elif db_style == 'jsondb':
grad = tau_save_fs[-1].json.gradient.read(locs)
grad_str = autofile.data_types.swrite.gradient(grad)
samp_grads.append(grad_str)
if db_style == 'directory':
hess = tau_save_fs[-1].file.hessian.read(locs)
elif db_style == 'jsondb':
hess = tau_save_fs[-1].json.hessian.read(locs)
hess_str = autofile.data_types.swrite.hessian(hess)
samp_hessians.append(hess_str)
# Read a geometry, grad, and hessian for a reference geom if needed
ref_geom, ref_grad, ref_hessian = [], [], []
if vib_model != 'tau':
# Get harmonic filesystem information
[harm_save_fs, _, harm_min_locs, _, _] = pf_filesystems['harm']
# Read the geometr, gradient, and Hessian
geo = harm_save_fs[-1].file.geometry.read(harm_min_locs)
geo_str = autofile.data_types.swrite.geometry(geo)
ref_geom.append(geo_str)
grad = harm_save_fs[-1].file.gradient.read(harm_min_locs)
grad_str = autofile.data_types.swrite.gradient(grad)
ref_grad.append(grad_str)
hess = harm_save_fs[-1].file.hessian.read(harm_min_locs)
hess_str = autofile.data_types.swrite.hessian(hess)
ref_hessian.append(hess_str)
# Obtain symmetry factor
ioprinter.info_message('Determining the symmetry factor...', newline=1)
sym_factor = symm.symmetry_factor(
pf_filesystems, spc_mod_dct_i, spc_dct_i, rotors,
)
# Create info dictionary
keys = ['geom', 'sym_factor', 'elec_levels', 'freqs', 'flux_mode_str',
'samp_geoms', 'samp_enes', 'samp_grads', 'samp_hessians',
'ref_geom', 'ref_grad', 'ref_hessian',
'zpe_chnlvl', 'reference_energy']
vals = [geom, sym_factor, spc_dct_i['elec_levels'], freqs, flux_str,
samp_geoms, samp_enes, samp_grads, samp_hessians,
ref_geom, ref_grad, ref_hessian,
zpe_chnlvl, reference_energy]
inf_dct = dict(zip(keys, vals))
return inf_dct | 28,855 |
def list_container_registries() -> None:
"""List all available container registries from service."""
service = Repository().get_service()
cli_utils.title("Container registries:")
cli_utils.print_table(
cli_utils.format_component_list(service.container_registries)
) | 28,856 |
def get_atten(log, atten_obj):
"""Get attenuator current attenuation value.
Args:
log: log object.
atten_obj: attenuator object.
Returns:
Current attenuation value.
"""
return atten_obj.get_atten() | 28,857 |
def lfs_hsm_remove(log, fpath, host=None):
"""
HSM remove
"""
command = ("lfs hsm_remove %s" % (fpath))
extra_string = ""
if host is None:
retval = utils.run(command)
else:
retval = host.sh_run(log, command)
extra_string = ("on host [%s]" % host.sh_hostname)
if retval.cr_exit_status != 0:
log.cl_error("failed to run command [%s]%s, "
"ret = [%d], stdout = [%s], stderr = [%s]",
command, extra_string,
retval.cr_exit_status, retval.cr_stdout,
retval.cr_stderr)
return -1
return 0 | 28,858 |
def area_under_curve_score(table,scoring_function):
"""Takes a run and produces the total area under the curve until the end of the run.
mean_area_under_curve_score is probably more informative."""
assert_run(table)
scores = get_scores(table,scoring_function)
return np.trapz(scores) | 28,859 |
def read_key_value(file):
"""支持注释,支持中文"""
return_dict = {}
lines = readlines(file)
for line in lines:
line = line.strip().split(':')
if line[0][0] == '#':
continue
key = line[0].strip()
value = line[1].strip()
return_dict[key] = value
return return_dict | 28,860 |
def binarize_image(image):
"""Binarize image pixel values to 0 and 255."""
unique_values = np.unique(image)
if len(unique_values) == 2:
if (unique_values == np.array([0., 255.])).all():
return image
mean = image.mean()
image[image > mean] = 255
image[image <= mean] = 0
return image | 28,861 |
def LineMatcher_fixture(request: FixtureRequest) -> Type["LineMatcher"]:
"""A reference to the :class: `LineMatcher`.
This is instantiable with a list of lines (without their trailing newlines).
This is useful for testing large texts, such as the output of commands.
"""
return LineMatcher | 28,862 |
def Delay(opts, args):
"""Sleeps for a while
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the duration
the sleep
@rtype: int
@return: the desired exit code
"""
delay = float(args[0])
op = opcodes.OpTestDelay(duration=delay,
on_master=opts.on_master,
on_nodes=opts.on_nodes,
repeat=opts.repeat,
interruptible=opts.interruptible,
no_locks=opts.no_locks)
SubmitOrSend(op, opts)
return 0 | 28,863 |
def generatorObjectIds():
""" for multiple generator_object, each have a different id """
print("\nfor multiple generator_object, each have a different id ")
g1 = generate123()
g2 = generate123()
if g1 is not g2:
print("g1 is not g2")
print(f"g1: {g1}")
print(f"g1: {g2}") | 28,864 |
def ffs(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ffs.html
:param x: Argument.
:type x: int32
:rtype: int32
""" | 28,865 |
def logout():
""" Simply loading the logout page while logged in will log the user out """
logout_user()
return render_template(f"{app_name}/logout.html") | 28,866 |
def test_LeafConstructionError_upon_update():
"""Tests that a `LeafConstructionError` is raised if both `record` and `digest`
are provided as arguments to the `MerkleTree.update()` method
"""
t = MerkleTree()
with pytest.raises(LeafConstructionError):
t.update(
record='some record',
digest='540ef8fc9eefa3ec0fbe55bc5d10dbea03d5bac5591b3d7db3af79ec24b3f74c'
) | 28,867 |
def deletebucket(bucket_choices):
"""
This function is used to delete the bucket/s in S3
"""
progressbar("Deleting Bucket")
bucketnames=bucket_choices['bucket']
try:
for bucketname in bucketnames:
s3.delete_bucket( Bucket=str(bucketname))
print("\n \n Bucket " +bucketname +" has been deleted \n \n")
except botocore.exceptions.ClientError as e:
coloredtext("There was an error while deleting Bucket: \n\n\n")
print(e) | 28,868 |
def identify_larger_definition(
one: ObjectDefinition,
two: ObjectDefinition
) -> Dict[str, Any]:
"""Return the larger (in dimensions) of the two given definitions."""
if not one:
return two
if not two:
return one
# TODO Handle if one has a larger X but other has a larger Z
return one if (
one.dimensions.x > two.dimensions.x or
one.dimensions.z > two.dimensions.z
) else two | 28,869 |
def duration_to_timedelta(obj):
"""Converts duration to timedelta
>>> duration_to_timedelta("10m")
>>> datetime.timedelta(0, 600)
"""
matches = DURATION_PATTERN.search(obj)
matches = matches.groupdict(default="0")
matches = {k: int(v) for k, v in matches.items()}
return timedelta(**matches) | 28,870 |
async def create_mock_hlk_sw16_connection(fail):
"""Create a mock HLK-SW16 client."""
client = MockSW16Client(fail)
await client.setup()
return client | 28,871 |
def gc_collect():
"""Force jako many objects jako possible to be collected.
In non-CPython implementations of Python, this jest needed because timely
deallocation jest nie guaranteed by the garbage collector. (Even w CPython
this can be the case w case of reference cycles.) This means that __del__
methods may be called later than expected oraz weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
jeżeli is_jython:
time.sleep(0.1)
gc.collect()
gc.collect() | 28,872 |
def inv_dist_weight(distances, b):
"""Inverse distance weight
Parameters
----------
distances : numpy.array of floats
Distances to point of interest
b : float
The parameter of the inverse distance weight. The higher, the
higher the influence of closeby stations.
Returns
-------
lambdas : numpy.array of floats
The lambda parameters of the stations
"""
lambdas = 1/distances**b / np.sum(1/distances**b)
return lambdas | 28,873 |
def _uniformly_named_arguments(captured_arguments):
"""Iterate the captured arguments as uniform name/value pairs."""
args, kwargs = captured_arguments
# For positional arguments, the name is 1-based index padded with
# leading zeroes to the length of the last index.
width = len(str(len(args)))
for i, arg in enumerate(args, 1):
name = str(i).zfill(width)
yield name, arg
# For keyword arguments, the keyword is taken as a name.
yield from kwargs.items() | 28,874 |
def load_inferred_fishing(table, id_list, project_id, threshold=True):
"""Load inferred data and generate comparison data
"""
query_template = """
SELECT vessel_id, start_time, end_time, nnet_score FROM
TABLE_DATE_RANGE([{table}],
TIMESTAMP('{year}-01-01'), TIMESTAMP('{year}-12-31'))
WHERE vessel_id in ({ids})
"""
ids = ','.join('"{}"'.format(x) for x in id_list)
ranges = defaultdict(list)
for year in range(2012, 2018):
query = query_template.format(table=table, year=year, ids=ids)
print(query)
for x in pd.read_gbq(query, project_id=project_id).itertuples():
score = x.nnet_score
if threshold:
score = score > 0.5
start = x.start_time.replace(tzinfo=pytz.utc)
end = x.end_time.replace(tzinfo=pytz.utc)
ranges[x.vessel_id].append(FishingRange(score, start, end))
print([(key, len(val)) for (key, val) in ranges.items()])
return ranges | 28,875 |
def is_optional(value: Any) -> CheckerReturn:
"""
It is a rather special validator because it never returns False and emits an exception
signal when the value is correct instead of returning True.
Its user should catch the signal to short-circuit the validation chain.
"""
if value is None:
raise exceptions.ValueNotRequired()
return True | 28,876 |
def cli(env):
"""List health check types."""
mgr = SoftLayer.LoadBalancerManager(env.client)
hc_types = mgr.get_hc_types()
table = formatting.KeyValueTable(['ID', 'Name'])
table.align['ID'] = 'l'
table.align['Name'] = 'l'
table.sortby = 'ID'
for hc_type in hc_types:
table.add_row([hc_type['id'], hc_type['name']])
env.fout(table) | 28,877 |
def test_tag_format(tmp_image):
"""test --tag-format"""
from exif2findertags.cli import cli
runner = CliRunner()
result = runner.invoke(
cli,
[
"--tag",
"Keywords",
"--tag-format",
"{TAG}={VALUE}",
"--verbose",
str(tmp_image),
],
)
assert result.exit_code == 0
md = osxmetadata.OSXMetaData(str(tmp_image))
tags = [t.name for t in md.tags]
assert sorted(tags) == ["Keywords=Fruit", "Keywords=Travel"]
# reset tags for next test
md.tags = [] | 28,878 |
def approximate_bounding_box_dyn_obstacles(obj: list, time_step=0) -> Union[
Tuple[list], None]:
"""
Compute bounding box of dynamic obstacles at time step
:param obj: All possible objects. DynamicObstacles are filtered.
:return:
"""
def update_bounds(new_point: np.ndarray, bounds: List[list]):
"""Update bounds with new point"""
if new_point[0] < bounds[0][0]:
bounds[0][0] = new_point[0]
if new_point[1] < bounds[1][0]:
bounds[1][0] = new_point[1]
if new_point[0] > bounds[0][1]:
bounds[0][1] = new_point[0]
if new_point[1] > bounds[1][1]:
bounds[1][1] = new_point[1]
return bounds
dynamic_obstacles_filtered = []
for o in obj:
if type(o) == DynamicObstacle:
dynamic_obstacles_filtered.append(o)
elif type(o) == Scenario:
dynamic_obstacles_filtered.extend(o.dynamic_obstacles)
x_int = [np.inf, -np.inf]
y_int = [np.inf, -np.inf]
bounds = [x_int, y_int]
shapely_set = None
for obs in dynamic_obstacles_filtered:
occ = obs.occupancy_at_time(time_step)
if occ is None:
continue
shape = occ.shape
if hasattr(shape, "_shapely_polygon"):
if shapely_set is None:
shapely_set = shape._shapely_polygon
else:
shapely_set = shapely_set.union(shape._shapely_polygon)
elif hasattr(shape, 'center'): # Rectangle, Circle
bounds = update_bounds(shape.center, bounds=bounds)
elif hasattr(shape, 'vertices'): # Polygon, Triangle
v = shape.vertices
bounds = update_bounds(np.min(v, axis=0), bounds=bounds)
bounds = update_bounds(np.max(v, axis=0), bounds=bounds)
envelope_bounds = shapely_set.envelope.bounds
envelope_bounds = np.array(envelope_bounds).reshape((2, 2))
bounds = update_bounds(envelope_bounds[0], bounds)
bounds = update_bounds(envelope_bounds[1], bounds)
if np.inf in bounds[0] or -np.inf in bounds[0] or np.inf in bounds[
1] or -np.inf in bounds[1]:
return None
else:
return tuple(bounds) | 28,879 |
def sky_spectrum_from_fibres_using_file(
rss_file,
fibre_list=[],
win_sky=151,
n_sky=0,
skyflat="",
apply_throughput=True,
correct_ccd_defects=False,
fix_wavelengths=False,
sol=[0, 0, 0],
xmin=0,
xmax=0,
ymin=0,
ymax=0,
verbose=True,
plot=True,
):
"""
Parameters
----------
rss_file
fibre_list
win_sky
n_sky
skyflat
apply_throughput
correct_ccd_defects
fix_wavelengths
sol
xmin
xmax
ymin
ymax
verbose
plot
Returns
-------
"""
from koala import KOALA_RSS # TODO: currently importing like this for workaround of circular imports
# Similar to in cube_alignement
# TODO: this function is never called it seems
if skyflat == "":
apply_throughput = False
plot_rss = False
else:
apply_throughput = True
plot_rss = True
if n_sky != 0:
sky_method = "self"
is_sky = False
if verbose:
print("\n> Obtaining 1D sky spectrum using {} lowest fibres in this rss ...".format(n_sky))
else:
sky_method = "none"
is_sky = True
if verbose:
print("\n> Obtaining 1D sky spectrum using fibre list = {} ...".format(fibre_list))
_test_rss_ = KOALA_RSS(
rss_file,
apply_throughput=apply_throughput,
skyflat=skyflat,
correct_ccd_defects=correct_ccd_defects,
fix_wavelengths=fix_wavelengths,
sol=sol,
sky_method=sky_method,
n_sky=n_sky,
is_sky=is_sky,
win_sky=win_sky,
do_extinction=False,
plot=plot_rss,
verbose=False,
)
if n_sky != 0:
print("\n> Sky fibres used: {}".format(_test_rss_.sky_fibres))
sky = _test_rss_.sky_emission
else:
sky = _test_rss_.plot_combined_spectrum(list_spectra=fibre_list, median=True)
if plot:
plt.figure(figsize=(14, 4))
if n_sky != 0:
plt.plot(_test_rss_.wavelength, sky, "b", linewidth=2, alpha=0.5)
ptitle = "Sky spectrum combining using {} lowest fibres".format(n_sky)
else:
for i in range(len(fibre_list)):
plt.plot(
_test_rss_.wavelength, _test_rss_.intensity_corrected[i], alpha=0.5
)
plt.plot(_test_rss_.wavelength, sky, "b", linewidth=2, alpha=0.5)
ptitle = "Sky spectrum combining " + np.str(len(fibre_list)) + " fibres"
plot_plot(_test_rss_.wavelength, sky, ptitle=ptitle)
print("\n> Sky spectrum obtained!")
return sky | 28,880 |
def binary_class_accuracy_score(y_pred, data):
"""LightGBM binary class accuracy-score function.
Parameters
----------
y_pred
LightGBM predictions.
data
LightGBM ``'Dataset'``.
Returns
-------
(eval_name, eval_result, is_higher_better)
``'eval_name'`` : string
is always 'accuracy' - the name of the metric
``'eval_result'`` : float
is the result of the metric
``'is_higher_better'`` : bool
is always 'True' because higher accuracy score is better
See Also
--------
* `sklearn.metrics.accuracy_score: <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html>`
* `LightGBM Training API: <https://lightgbm.readthedocs.io/en/latest/Python-API.html#training-api>`
"""
y_true = data.get_label()
y_pred = np.round(y_pred)
return 'accuracy', accuracy_score(y_true, y_pred), True | 28,881 |
def win32_clipboard_get():
""" Get the current clipboard's text on Windows.
Requires Mark Hammond's pywin32 extensions.
"""
try:
import win32clipboard
except ImportError:
message = ("Getting text from the clipboard requires the pywin32 "
"extensions: http://sourceforge.net/projects/pywin32/")
raise Exception(message)
win32clipboard.OpenClipboard()
text = win32clipboard.GetClipboardData(win32clipboard.CF_TEXT)
# FIXME: convert \r\n to \n?
win32clipboard.CloseClipboard()
return text | 28,882 |
def open_add_folder_dialog(*args):
"""
Set folder name entry with clipboard contents,
Start dialog,
when done hide it.
"""
folder_name = __.Builder.get_object('folder_name')
clipboard_content = __.Jimmy.receive()
folder_name.set_text(clipboard_content)
__.folder_dialog = __.Builder.get_object('folder_dialog')
__.folder_dialog.run()
__.folder_dialog.hide() | 28,883 |
def is_prime(n):
"""Given an integer n, return True if n is prime and False if not.
"""
return True | 28,884 |
def path_to_newname(path, name_level=1):
"""
Takes one path and returns a new name, combining the directory structure
with the filename.
Parameters
----------
path : String
name_level : Integer
Form the name using items this far back in the path. E.g. if
path = mydata/1234/3.txt and name_level == 2, then name = 1234_3
Returns
-------
name : String
"""
name_plus_ext = path.split('/')[-name_level:]
name, ext = os.path.splitext('_'.join(name_plus_ext))
return name | 28,885 |
def run_command(state, config_file: str, tool: str, report: str = "console", scan_type: str = None) -> None:
"""
manually run a scan using given tool
aka
eze tools run safety --debug
"""
log_debug(
f"""Running scan:
=========================
tool: {tool}
report: {report}
scan_type: {scan_type if scan_type else 'default'}
"""
)
[tool_name, run_type] = extract_embedded_run_type(tool)
tool_manager = ToolManager.get_instance()
if tool_name not in tool_manager.tools:
log(f"Could not find tool '{tool_name}', use 'eze tools list' to get available tools")
sys.exit(1)
tool_class = tool_manager.tools[tool_name]
tool_version = tool_class.check_installed()
if not tool_version:
log(
f"'{tool_name}' Tool not installed, use 'eze tools help --tool {tool_name}' to get help installing {tool_name}"
)
sys.exit(1)
eze_core = EzeCore.get_instance()
asyncio.run(eze_core.run([tool], [], [report], scan_type)) | 28,886 |
def mount_if_unmounted(drive_path):
""" Mount given drive path, if not mounted. """
filesystem_mount_cli = [
'sudo',
'mount', '-a'
]
if path.ismount(drive_path) == False:
print "Mounting filesystem .."
try:
check_output(filesystem_mount_cli)
except:
raise Exception("Failed to mount filesystem") | 28,887 |
def load_model_from_json(model_path=None, weights_path=None):
"""
load dataset and weights from file
input:
model_path path to the model file, should be json format
weights_path path to the weights file, should be HDF5 format
output:
Keras model
"""
# default model path
home_path = os.path.abspath(".")
if model_path is None:
model_path = os.path.join(home_path, "resModel.json")
# default weights path
if weights_path is None:
weights_path = os.path.join(home_path, "modelWeights.h5")
# read json model file
json = None
with open(model_path, "r") as f:
json = f.read()
# load model
model = model_from_json(json)
# add weights to the model
model.load_weights(weights_path)
return model | 28,888 |
def update_parameters(parameters,grads,learning_rate,optimizer,beta1=0.9,beta2=0.999, epsilon=1e-8):
"""
Description:
Updates the neural networks parameters (weights, biases) based on the optomizer selected
Arguments:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
optimizer -- the optimizer information that tracks the optimizer and it's state.
Optional Arguments:
beta1 -- Exponential decay hyperparameter for the first moment estimates
-Used in: Momentum, ADAM
-Common values for beta1 range from 0.8 to 0.999. If you don't feel inclined to tune this, beta = 0.9
is often a reasonable default.
beta2 -- Exponential decay hyperparameter for the second moment estimates
-Used in: ADAM(RMS PROP)
epsilon -- hyperparameter preventing division by zero in Adam updates
-Used in ADAM(RMS PROP)
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
optimizer -- the optimizer information that tracks the optimizer and it's state.
"""
# Update parameters via GD
if optimizer["optimizer_type"] == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
# Update pramaeters with Momentum
elif optimizer["optimizer_type"] == "momentum":
parameters, optimizer["v"] = update_parameters_with_momentum(parameters, grads, optimizer["v"], beta1,
learning_rate)
#update parameters with ADAM
elif optimizer["optimizer_type"] == "adam":
optimizer["t"] = optimizer["t"] + 1 # Adam counter for bias correction
parameters, optimizer["v"], optimizer["s"] = update_parameters_with_adam(parameters, grads, optimizer["v"],
optimizer["s"], optimizer["t"], learning_rate, beta1, beta2,
epsilon)
else:
print("ERROR: update_parameters - no optimizer_type was selected")
print("optimizer_type=" + optimizer["optimizer_type"])
sys.exit(1)
return parameters, optimizer | 28,889 |
def test_create_boot_session():
"""Test creating new boot session
"""
template_uuid = str(uuid.uuid4())
session_request = {"operation": "boot",
"templateUuid": template_uuid}
response = requests.post(BOOT_SESSION_URI, headers=HEADERS, verify=HTTPS_VERIFY,
json=session_request)
assert response.status_code == requests.codes['created']
result_data = response.json()
assert len(result_data) == 1
assert 'href' in result_data['links'][0]
assert 'operation' in result_data['links'][0]
assert 'templateUuid' in result_data['links'][0]
assert 'jobId' in result_data['links'][0]
assert result_data['links'][0]['operation'] == "boot"
assert result_data['links'][0]['templateUuid'] == template_uuid | 28,890 |
def get_request_file():
"""
Method to implement REST API call of GET on address /file
"""
try:
content_file = open("html/file_get.html", "r")
content = content_file.read()
except:
logging.info("Could not load source HTML file '%s'")
raise
return content | 28,891 |
def pytest_configure(config):
""" Create a log file if log_file is not mentioned in *.ini file"""
timestamp = datetime.strftime(datetime.now(), '%Y-%m-%d_%H-%M-%S')
if not config.option.log_file:
log_file_name = "log_" + timestamp + ".log"
config.option.log_file = os.path.join(Config.LOGS_PATH, log_file_name)
if not config.option.htmlpath:
report_file_name = "report_" + timestamp + ".html"
config.option.htmlpath = os.path.join(Config.REPORTS_PATH, report_file_name) | 28,892 |
def sock_merchant(arr: Iterable[int]) -> int:
"""
>>> sock_merchant([10, 20, 20, 10, 10, 30, 50, 10, 20])
3
>>> sock_merchant([6, 5, 2, 3, 5, 2, 2, 1, 1, 5, 1, 3, 3, 3, 5])
6
"""
from collections import Counter
count = Counter(arr).values()
ret = sum(n // 2 for n in count)
return ret | 28,893 |
def new_user_registration(email: str) -> dict:
"""Alert the CIDC admin mailing list to a new user registration."""
subject = "New User Registration"
html_content = (
f"A new user, {email}, has registered for the CIMAC-CIDC Data Portal ({ENV}). If you are a CIDC Admin, "
"please visit the accounts management tab in the Portal to review their request."
)
email = {
"to_emails": [CIDC_MAILING_LIST],
"subject": subject,
"html_content": html_content,
}
return email | 28,894 |
def article_detail():
"""文章详情"""
id = request.form.get('id')
if id is None:
raise Exception('ARTICLE_NOT_EXIST')
article = Article.find(id)
if article is None:
raise Exception('ARTICLE_NOT_EXIST')
# 获取标签
if article.tags is None:
article.tags = []
else:
all_tags = Tag.find_all({'_id': {'$in': article.tags}})
all_tags = {str(tag._id): {'id': str(tag._id), 'name': tag.name} for tag in all_tags}
article.tags = [all_tags[str(id)] for id in article.tags if str(id) in all_tags]
return {'article': article.filter('title', 'draft', 'tags',
img=lambda article: images.url(article.img) if article.img else '')} | 28,895 |
def add_state_names_column(my_df):
"""
Add a column of corresponding state names to a dataframe
Params (my_df) a DataFrame with a column called "abbrev" that has state abbreviations.
Return a copy of the original dataframe, but with an extra column.
"""
new_df = my_df.copy()
names_map = {"CA": "Cali", "CO": "Colorado", "CT": "Connecticut", "NJ": "New Jersey"}
new_df = df["name"] = new_df["abbrev"].map(names_map)
return my_df | 28,896 |
def list_subdir_paths(directory):
"""
Generates a list of subdirectory paths
:param directory: str pathname of target parent directory
:return: list of paths for each subdirectory in the target parent
directory
"""
subdir_paths = glob("{}/*/".format(directory))
return subdir_paths | 28,897 |
def random_replacement_(batch: torch.LongTensor, index: int, selection: slice, size: int, max_index: int) -> None:
"""
Replace a column of a batch of indices by random indices.
:param batch: shape: `(*batch_dims, d)`
the batch of indices
:param index:
the index (of the last axis) which to replace
:param selection:
a selection of the batch, e.g., a slice or a mask
:param size:
the size of the selection
:param max_index:
the maximum index value at the chosen position
"""
# At least make sure to not replace the triples by the original value
# To make sure we don't replace the {head, relation, tail} by the
# original value we shift all values greater or equal than the original value by one up
# for that reason we choose the random value from [0, num_{heads, relations, tails} -1]
replacement = torch.randint(
high=max_index - 1,
size=(size,),
device=batch.device,
)
replacement += (replacement >= batch[selection, index]).long()
batch[selection, index] = replacement | 28,898 |
def logic_method_with_bkg(plots_per_cycle, cycle_time, sigma_s=160, m=3, n=4):
"""
:param plots_per_cycle:
:param cycle_time:
:param sigma_s:
:param m:
:param n:
:return:
"""
N = plots_per_cycle.shape[0] # number of cycles
tracks = [] # ret
track_cnt = 0
# 取滑动窗口
succeed = False
for i in range(2, N - n): # cycle i
if succeed:
break
# 取滑窗(连续5个cycle)
window = slide_window(plots_per_cycle, n, start_cycle=i, skip_cycle=2)
# ----------对窗口中进行m/n统计
# 构建mapping链
K = min([cycle_plots.shape[0] for cycle_plots in window]) # 最小公共点迹数
mappings = defaultdict(dict)
for j in range(len(window) - 1, 0, -1):
# ----- 构建相邻cycle的mapping
mapping = matching_plots_nn(window[j], window[j - 1], K)
# -----
if len(set(mapping.values())) != len(set(mapping.keys())):
break
else:
mappings[j] = mapping
if len(mappings) < m: # 至少有m个cycle有效数据, 对应m-1个mapping
continue # 滑动到下一个window
# 对mapping结果进行排序(按照key降序排列)
mappings = sorted(mappings.items(), key=lambda x: x[0], reverse=True)
# print(mappings)
# 构建暂时航迹
for k in range(K): # 遍历每个暂时航迹
# ----- 航迹状态记录
# 窗口检出数计数: 每个暂时航迹单独计数
n_pass = 0
# 窗口运动状态记录: 每个航迹单独记录(速度, 加速度, 航向偏转角)
window_states = defaultdict(dict)
# -----
# ----- 构建暂时航迹组成的点迹(plots)
plot_ids = []
id = -1
# 提取倒序第一个有效cycle的第k个plot id
keys = mappings[0][1].keys()
keys = sorted(keys, reverse=False) # 按照当前window最大的有效cycle的点迹序号升序排列
id = keys[k]
plot_ids.append(id)
# 按照mapping链递推其余cycle的plot id
for (c, mapping) in mappings: # mapping已经按照cycle倒序排列过了
id = mapping[id] # 倒推映射链plot id
plot_ids.append(id)
# print(ids) # ids是按照cycle倒排的
# 根据ids链接构建plot链: 暂时航迹
cycle_ids = [c for (c, mapping) in mappings] # 按照cycle编号倒排
cycle_ids.extend([mappings[-1][0] - 1])
assert len(cycle_ids) == len(plot_ids)
plots = [window[cycle][plot_id]
for cycle, plot_id in zip(cycle_ids, plot_ids)]
# print(plots)
# window内逐一门限测试
# for l, (cycle_id, plot) in enumerate(zip(cycle_ids_to_test, plots_to_test)):
for l in range(len(plots) - 2):
cycle_id = cycle_ids[l]
# 构建连续三个cycle的plots
# plots_2 = [plots[l + 1], plots[l]]
plots_3 = [plots[l + 2], plots[l + 1], plots[l]]
# plot_plots(plots_2, [cycle_ids[l+1], cycle_ids[l]])
# plot_plots(plots_3, [cycle_ids[l+2], cycle_ids[l+1], cycle_ids[l]])
# 估算当前点迹的运动状态
v, a, angle_in_radians = get_v_a_angle(plots_3, cycle_time)
# v = get_v(plots_2, cycle_time)
# 航向偏移角度估算
angle_in_degrees = math.degrees(angle_in_radians)
angle_in_degrees = angle_in_degrees if angle_in_degrees >= 0.0 else angle_in_degrees + 360.0
angle_in_degrees = angle_in_degrees if angle_in_degrees <= 360.0 else angle_in_degrees - 360.0
# 初始波门判定: j是当前判定序列的第二次扫描
if start_gate_check(cycle_time, plots[l + 2], plots[l + 1], v0=340):
# --- 对通过初始波门判定的航迹建立暂时航迹, 继续判断相关波门
# 相关(跟踪)波门判定page71-72
if relate_gate_check(cycle_time, v, a, plots[l + 2], plots[l + 1], plots[l], sigma_s=sigma_s):
n_pass += 1
# window运动状态记录
state_dict = {
'cycle': cycle_id,
'x': plots[l][0],
'y': plots[l][1],
'v': v,
'a': a,
'angle_in_degrees': angle_in_degrees
}
window_states[cycle_id] = state_dict
## ----- 记录window中最前面的两个点迹的运动状态
if l == len(plots) - 2 - 1:
print('Add plot for the first 2 plots in the window...')
plots_2 = [plots[l + 1], plots[l]]
v = get_v(plots_2, cycle_time)
# window第1号点迹运动状态记录
state_dict = {
'cycle': cycle_id - 1,
'x': plots[l + 1][0],
'y': plots[l + 1][1],
'v': v,
'a': -1,
'angle_in_degrees': -1
}
window_states[cycle_id - 1] = state_dict
# window第0号点迹运动状态记录
state_dict = {
'cycle': cycle_id - 2,
'x': plots[l + 2][0],
'y': plots[l + 2][1],
'v': -1,
'a': -1,
'angle_in_degrees': -1
}
window_states[cycle_id - 2] = state_dict
else:
print('Track init failed @cycle{:d}, object(plot) is not in relating gate.'.format(i))
else:
print('Track init failed @cycle{:d} @window{:d}, object(plot) is not in the starting gate.'
.format(i, j))
# 判定是否当前航迹初始化成功
if n_pass >= m:
print(
'Track {:d} inited successfully @cycle {:d}.'.format(k, i))
# -----初始化航迹对象
track = Track()
track.id_ = track_cnt # 航迹编号
track.state_ = 2 # 航迹状态: 可靠航迹
track.init_cycle_ = i # 航迹起始cycle
window_states = sorted(window_states.items(
), key=lambda x: x[0], reverse=False) # 升序重排
# 添加已初始化点迹
for k, v in window_states:
# print(k, v)
plot = Plot(v['cycle'], v['x'], v['y'],
v['v'], v['a'], v['angle_in_degrees'])
plot.state_ = 1 # 'Related'
plot.correlated_track_id_ = track.id_
track.add_plot(plot)
track.quality_counter_ += 1 # 航迹质量得分更新
tracks.append(track)
# -----
# 更新航迹编号
track_cnt += 1
# 航迹起始成功标识
succeed = True
# 清空窗口状态
window_states = defaultdict(dict)
# 跳出当前航迹检测, 到下一个暂时航迹
continue
return succeed, tracks | 28,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.