hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
45964458a6856b2a86f4f46cc5817ea558997e6a
| 65
|
py
|
Python
|
tests/test_version_cmd.py
|
ixje/app-neo3
|
079b178017684958cdf66fdf144f317ea37d65ae
|
[
"MIT"
] | null | null | null |
tests/test_version_cmd.py
|
ixje/app-neo3
|
079b178017684958cdf66fdf144f317ea37d65ae
|
[
"MIT"
] | null | null | null |
tests/test_version_cmd.py
|
ixje/app-neo3
|
079b178017684958cdf66fdf144f317ea37d65ae
|
[
"MIT"
] | null | null | null |
def test_version(cmd):
assert cmd.get_version() == (0, 1, 0)
| 21.666667
| 41
| 0.630769
|
fd1609990ef1071c6f0ff02bf1497be19ed39662
| 11,531
|
py
|
Python
|
api/anubis/views/admin/courses.py
|
ShubhamGG/Anubis
|
2c538ef258a1edf5463596a33bc66caa2ef7e35b
|
[
"MIT"
] | null | null | null |
api/anubis/views/admin/courses.py
|
ShubhamGG/Anubis
|
2c538ef258a1edf5463596a33bc66caa2ef7e35b
|
[
"MIT"
] | null | null | null |
api/anubis/views/admin/courses.py
|
ShubhamGG/Anubis
|
2c538ef258a1edf5463596a33bc66caa2ef7e35b
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
from sqlalchemy.exc import IntegrityError, DataError
from anubis.models import db, Course, TAForCourse, ProfessorForCourse, User, InCourse
from anubis.utils.auth.http import require_admin, require_superuser
from anubis.utils.auth.user import current_user
from anubis.utils.data import row2dict, req_assert
from anubis.utils.http.decorators import json_response, json_endpoint
from anubis.utils.http import error_response, success_response
from anubis.lms.courses import assert_course_superuser, course_context
from anubis.lms.courses import valid_join_code
courses_ = Blueprint("admin-courses", __name__, url_prefix="/admin/courses")
@courses_.route("/")
@courses_.route("/list")
@require_admin()
@json_response
def admin_courses_list():
"""
List the data for the current course context.
:return:
"""
# Return the course context broken down
return success_response({
"course": row2dict(course_context),
})
@courses_.route("/new")
@require_superuser()
@json_response
def admin_courses_new():
"""
Create a new course will placeholder
in all the fields.
* Requires superuser *
:return:
"""
# Create a new course with placeholder
# in all the fields.
course = Course(
name="placeholder",
course_code="placeholder",
section="a",
professor_display_name="placeholder",
)
# Add it to the session
db.session.add(course)
# Commit the new Course
db.session.commit()
# Return the status
return success_response({
"course": course.data,
"status": "Created new course",
})
@courses_.route("/save", methods=["POST"])
@require_admin()
@json_endpoint(required_fields=[("course", dict)])
def admin_courses_save_id(course: dict):
"""
Update information about the course.
:param course:
:return:
"""
# Get the course id from the posted data
course_id = course.get("id", None)
# Try to get the database object corresponding to that course
db_course: Course = Course.query.filter(Course.id == course_id).first()
# Make sure we got a course
req_assert(db_course is not None, message='course not found')
# Assert that the current user is a professor or a superuser
assert_course_superuser(course_id)
# Check that the join code is valid
req_assert(
valid_join_code(course['join_code']),
message='Invalid join code. Lowercase letters and numbers only.'
)
# Update all the items in the course with the posted data
for column in Course.__table__.columns:
if column.name in course:
key, value = column.name, course[column.name]
if isinstance(value, str):
value = value.strip()
setattr(db_course, key, value)
# Commit the changes
try:
db.session.commit()
except (IntegrityError, DataError) as e:
db.session.rollback()
return error_response("Unable to save " + str(e))
# Return the status
return success_response({"course": db_course.data, "status": "Changes saved."})
@courses_.route('/list/students')
@require_admin()
@json_response
def admin_course_list_students():
"""
List all students for the current course context.
:return:
"""
# Get all the students in the current course context
students = User.query.join(InCourse).filter(
InCourse.course_id == course_context.id,
).all()
# Return the list of basic user information about the tas
return success_response({'users': [
{
'id': user.id, 'netid': user.netid,
'name': user.name, 'github_username': user.github_username
}
for user in students
]})
@courses_.route('/list/tas')
@require_admin()
@json_response
def admin_course_list_tas():
"""
List all TAs for the current course context.
:return:
"""
# Get all the TAs in the current course context
tas = User.query.join(TAForCourse).filter(
TAForCourse.course_id == course_context.id,
).all()
# Return the list of basic user information about the tas
return success_response({'users': [
{
'id': user.id, 'netid': user.netid,
'name': user.name, 'github_username': user.github_username
}
for user in tas
]})
@courses_.route('/list/professors')
@require_admin()
@json_response
def admin_course_list_professors():
"""
Get all the professors for the current course context.
:return:
"""
# Get all the professors within the current course context
professors = User.query.join(ProfessorForCourse).filter(
ProfessorForCourse.course_id == course_context.id,
).all()
# Return the list of basic user information about the professors
return success_response({'users': [
{
'id': user.id, 'netid': user.netid,
'name': user.name, 'github_username': user.github_username,
}
for user in professors
]})
@courses_.route('/make/student/<string:user_id>')
@require_admin()
@json_response
def admin_course_make_student_id(user_id: str):
"""
Make a user a professor for a course
:param user_id:
:return:
"""
# Get the other user
other = User.query.filter(User.id == user_id).first()
# Make sure they exist
req_assert(other is not None, message='user does not exist')
# Check to see if the other user is already a professor
# for this course
student = InCourse.query.filter(
InCourse.owner_id == user_id,
InCourse.course_id == course_context.id,
).first()
# If they are already a professor, then stop
req_assert(student is None, message='they are already a student')
# Create a new professor
student = InCourse(
owner_id=user_id,
course_id=course_context.id,
)
# Add and commit the change
db.session.add(student)
db.session.commit()
# Return the status
return success_response({
'status': 'Student added to course'
})
@courses_.route('/remove/student/<string:user_id>')
@require_admin()
@json_response
def admin_course_remove_student_id(user_id: str):
"""
Remove a professor from a course.
:param user_id:
:return:
"""
# Get the other user
other = User.query.filter(User.id == user_id).first()
# Make sure the other user exists
req_assert(other is not None, message='user does not exist')
# Delete the professor
InCourse.query.filter(
InCourse.owner_id == user_id,
InCourse.course_id == course_context.id,
).delete()
# Commit the delete
db.session.commit()
# Return the status
return success_response({
'status': 'Student removed from course',
'variant': 'warning',
})
@courses_.route('/make/ta/<string:user_id>')
@require_admin()
@json_response
def admin_course_make_ta_id(user_id: str):
"""
Make a user a ta for the current course.
:param user_id:
:return:
"""
# Get the user that will be the TA
other = User.query.filter(User.id == user_id).first()
# Check that the user exists
req_assert(other is not None, message='user does not exist')
# Check to see if the user is already a ta
ta = TAForCourse.query.filter(
TAForCourse.owner_id == user_id,
TAForCourse.course_id == course_context.id,
).first()
# Check that they are not already a TA
req_assert(ta is None, message='they are already a TA')
# Make the user a TA if they are not already
ta = TAForCourse(
owner_id=user_id,
course_id=course_context.id,
)
# Make sure they are in the course
in_course = InCourse.query.filter(
InCourse.course_id == course_context.id,
InCourse.owner_id == current_user.id,
).first()
if in_course is None:
in_course = InCourse(course_id=course_context.id, owner_id=current_user.id)
db.session.add(in_course)
# Add and commit the change
db.session.add(ta)
db.session.commit()
# Return the status
return success_response({
'status': 'TA added to course'
})
@courses_.route('/remove/ta/<string:user_id>')
@require_admin()
@json_response
def admin_course_remove_ta_id(user_id: str):
"""
Remove a TA from the current course context
:param user_id:
:return:
"""
# Assert that the current user is a professor or superuser
assert_course_superuser(course_context.id)
# Get the user object for the specified user
other = User.query.filter(User.id == user_id).first()
# Make sure that the other user exists
req_assert(other is not None, message='user does not exist')
# If the other user is the current user, then stop
if not current_user.is_superuser:
req_assert(other.id != current_user.id, message='cannot remove yourself')
# Delete the TA
TAForCourse.query.filter(
TAForCourse.owner_id == user_id,
TAForCourse.course_id == course_context.id,
).delete()
# Commit the delete
db.session.commit()
# Return the status
return success_response({
'status': 'TA removed from course',
'variant': 'warning',
})
@courses_.route('/make/professor/<string:user_id>')
@require_superuser()
@json_response
def admin_course_make_professor_id(user_id: str):
"""
Make a user a professor for a course
:param user_id:
:return:
"""
# Get the other user
other = User.query.filter(User.id == user_id).first()
# Make sure they exist
req_assert(other is not None, message='user does not exist')
# Check to see if the other user is already a professor
# for this course
prof = ProfessorForCourse.query.filter(
ProfessorForCourse.owner_id == user_id,
ProfessorForCourse.course_id == course_context.id,
).first()
# If they are already a professor, then stop
req_assert(prof is None, message='they are already a professor')
# Create a new professor
prof = ProfessorForCourse(
owner_id=user_id,
course_id=course_context.id,
)
# Make sure they are in the course
in_course = InCourse.query.filter(
InCourse.course_id == course_context.id,
InCourse.owner_id == current_user.id,
).first()
if in_course is None:
in_course = InCourse(course_id=course_context.id, owner_id=current_user.id)
db.session.add(in_course)
# Add and commit the change
db.session.add(prof)
db.session.commit()
# Return the status
return success_response({
'status': 'Professor added to course'
})
@courses_.route('/remove/professor/<string:user_id>')
@require_superuser()
@json_response
def admin_course_remove_professor_id(user_id: str):
"""
Remove a professor from a course.
:param user_id:
:return:
"""
# Get the other user
other = User.query.filter(User.id == user_id).first()
# Make sure the other user exists
req_assert(other is not None, message='user does not exist')
# Delete the professor
ProfessorForCourse.query.filter(
ProfessorForCourse.owner_id == user_id,
ProfessorForCourse.course_id == course_context.id,
).delete()
# Commit the delete
db.session.commit()
# Return the status
return success_response({
'status': 'Professor removed from course',
'variant': 'warning',
})
| 26.147392
| 85
| 0.661695
|
b3c8adab475095026cb1cc9187c978d84d66a6f1
| 20,351
|
py
|
Python
|
nilmtk/disaggregate/hart_85.py
|
raktim2015/nilmtk
|
2e5ffc32a52df2be48a6b7e464f41a8dbb7981ff
|
[
"Apache-2.0"
] | 1
|
2019-02-28T16:44:28.000Z
|
2019-02-28T16:44:28.000Z
|
nilmtk/disaggregate/hart_85.py
|
raktim2015/nilmtk
|
2e5ffc32a52df2be48a6b7e464f41a8dbb7981ff
|
[
"Apache-2.0"
] | null | null | null |
nilmtk/disaggregate/hart_85.py
|
raktim2015/nilmtk
|
2e5ffc32a52df2be48a6b7e464f41a8dbb7981ff
|
[
"Apache-2.0"
] | 1
|
2021-05-15T16:18:28.000Z
|
2021-05-15T16:18:28.000Z
|
from __future__ import print_function, division
from collections import OrderedDict, deque
import pandas as pd
from nilmtk.feature_detectors.cluster import hart85_means_shift_cluster
from nilmtk.feature_detectors.steady_states import find_steady_states_transients
from nilmtk.disaggregate import Disaggregator
# Fix the seed for repeatability of experiments
SEED = 42
import numpy as np
np.random.seed(SEED)
class MyDeque(deque):
def popmiddle(self, pos):
self.rotate(-pos)
ret = self.popleft()
self.rotate(pos)
return ret
class PairBuffer(object):
"""
Attributes:
* transitionList (list of tuples)
* matchedPairs (dataframe containing matched pairs of transitions)
"""
def __init__(self, buffer_size, min_tolerance, percent_tolerance,
large_transition, num_measurements):
"""
Parameters
----------
buffer_size: int, optional
size of the buffer to use for finding edges
min_tolerance: int, optional
variance in power draw allowed for pairing a match
percent_tolerance: float, optional
if transition is greater than large_transition, then use percent of large_transition
large_transition: float, optional
power draw of a Large transition
num_measurements: int, optional
2 if only active power
3 if both active and reactive power
"""
# We use a deque here, because it allows us quick access to start and end popping
# and additionally, we can set a maxlen which drops oldest items. This nicely
# suits Hart's recomendation that the size should be tunable.
self._buffer_size = buffer_size
self._min_tol = min_tolerance
self._percent_tol = percent_tolerance
self._large_transition = large_transition
self.transition_list = MyDeque([], maxlen=self._buffer_size)
self._num_measurements = num_measurements
if self._num_measurements == 3:
# Both active and reactive power is available
self.pair_columns = ['T1 Time', 'T1 Active', 'T1 Reactive',
'T2 Time', 'T2 Active', 'T2 Reactive']
elif self._num_measurements == 2:
# Only active power is available
self.pair_columns = ['T1 Time', 'T1 Active',
'T2 Time', 'T2 Active']
self.matched_pairs = pd.DataFrame(columns=self.pair_columns)
def clean_buffer(self):
# Remove any matched transactions
for idx, entry in enumerate(self.transition_list):
if entry[self._num_measurements]:
self.transition_list.popmiddle(idx)
self.clean_buffer()
break
# Remove oldest transaction if buffer cleaning didn't remove anything
# if len(self.transitionList) == self._bufferSize:
# self.transitionList.popleft()
def add_transition(self, transition):
# Check transition is as expected.
assert isinstance(transition, (tuple, list))
# Check that we have both active and reactive powers.
assert len(transition) == self._num_measurements
# Convert as appropriate
if isinstance(transition, tuple):
mtransition = list(transition)
# Add transition to List of transitions (set marker as unpaired)
mtransition.append(False)
self.transition_list.append(mtransition)
# checking for pairs
# self.pairTransitions()
# self.cleanBuffer()
def pair_transitions(self):
"""
Hart 85, P 33.
When searching the working buffer for pairs, the order in which
entries are examined is very important. If an Appliance has
on and off several times in succession, there can be many
pairings between entries in the buffer. The algorithm must not
allow an 0N transition to match an OFF which occurred at the end
of a different cycle, so that only ON/OFF pairs which truly belong
together are paired up. Otherwise the energy consumption of the
appliance will be greatly overestimated. The most straightforward
search procedures can make errors of this nature when faced with
types of transition sequences.
Hart 85, P 32.
For the two-state load monitor, a pair is defined as two entries
which meet the following four conditions:
(1) They are on the same leg, or are both 240 V,
(2) They are both unmarked,
(3) The earlier has a positive real power component, and
(4) When added together, they result in a vector in which the
absolute value of the real power component is less than 35
Watts (or 3.5% of the real power, if the transitions are
over 1000 W) and the absolute value of the reactive power
component is less than 35 VAR (or 3.5%).
... the correct way to search the buffer is to start by checking
elements which are close together in the buffer, and gradually
increase the distance. First, adjacent elements are checked for
pairs which meet all four requirements above; if any are found
they are processed and marked. Then elements two entries apart
are checked, then three, and so on, until the first and last
element are checked...
"""
tlength = len(self.transition_list)
pairmatched = False
if tlength < 2:
return pairmatched
# Can we reduce the running time of this algorithm?
# My gut feeling is no, because we can't re-order the list...
# I wonder if we sort but then check the time... maybe. TO DO
# (perhaps!).
new_matched_pairs = []
# Start the element distance at 1, go up to current length of buffer
for eDistance in range(1, tlength):
idx = 0
while idx < tlength - 1:
# We don't want to go beyond length of array
compindex = idx + eDistance
if compindex < tlength:
val = self.transition_list[idx]
# val[1] is the active power and
# val[self._num_measurements] is match status
if (val[1] > 0) and (val[self._num_measurements] is False):
compval = self.transition_list[compindex]
if compval[self._num_measurements] is False:
# Add the two elements for comparison
vsum = np.add(
val[1:self._num_measurements],
compval[1:self._num_measurements])
# Set the allowable tolerance for reactive and
# active
matchtols = [self._min_tol, self._min_tol]
for ix in range(1, self._num_measurements):
matchtols[ix - 1] = (
self._min_tol
if (max(np.fabs([val[ix], compval[ix]])) < self._large_transition)
else (self._percent_tol * max(np.fabs([val[ix], compval[ix]])))
)
if self._num_measurements == 3:
condition = (np.fabs(vsum[0]) < matchtols[0]) and (np.fabs(vsum[1]) < matchtols[1])
elif self._num_measurements == 2:
condition = np.fabs(vsum[0]) < matchtols[0]
if condition:
# Mark the transition as complete
self.transition_list[idx][self._num_measurements] = True
self.transition_list[compindex][self._num_measurements] = True
pairmatched = True
# Append the OFF transition to the ON. Add to the list.
matchedpair = val[0:self._num_measurements] + compval[0:self._num_measurements]
new_matched_pairs.append(matchedpair)
# Iterate Index
idx += 1
else:
break
# Process new pairs in a single operation (faster than growing the dataframe)
if pairmatched:
if self.matched_pairs.empty:
self.matched_pairs = pd.DataFrame(new_matched_pairs, columns=self.pair_columns)
else:
self.matched_pairs = self.matched_pairs.append(pd.DataFrame(new_matched_pairs, columns=self.pair_columns))
return pairmatched
class Hart85(Disaggregator):
"""1 or 2 dimensional Hart 1985 algorithm.
Attributes
----------
model : dict
Each key is either the instance integer for an ElecMeter,
or a tuple of instances for a MeterGroup.
Each value is a sorted list of power in different states.
"""
def __init__(self):
self.model = {}
self.MODEL_NAME = "Hart85"
def train(self, metergroup, columns=[('power', 'active')],
buffer_size=20, noise_level=70, state_threshold=15,
min_tolerance=100, percent_tolerance=0.035,
large_transition=1000, **kwargs):
"""
Train using Hart85. Places the learnt model in `model` attribute.
Parameters
----------
metergroup : a nilmtk.MeterGroup object
columns: nilmtk.Measurement, should be one of the following
[('power','active')]
[('power','apparent')]
[('power','reactive')]
[('power','active'), ('power', 'reactive')]
buffer_size: int, optional
size of the buffer to use for finding edges
min_tolerance: int, optional
variance in power draw allowed for pairing a match
percent_tolerance: float, optional
if transition is greater than large_transition,
then use percent of large_transition
large_transition: float, optional
power draw of a Large transition
"""
self.columns = columns
self.state_threshold = state_threshold
self.noise_level = noise_level
[self.steady_states, self.transients] = find_steady_states_transients(
metergroup, columns, noise_level, state_threshold, **kwargs)
self.pair_df = self.pair(
buffer_size, min_tolerance, percent_tolerance, large_transition)
self.centroids = hart85_means_shift_cluster(self.pair_df, columns)
self.model = dict(
columns=columns,
state_threshold=state_threshold,
noise_level=noise_level,
steady_states=self.steady_states,
transients=self.transients,
# pair_df=self.pair_df,
centroids=self.centroids
)
def pair(self, buffer_size, min_tolerance, percent_tolerance,
large_transition):
subset = list(self.transients.itertuples())
buffer = PairBuffer(
min_tolerance=min_tolerance, buffer_size=buffer_size,
percent_tolerance=percent_tolerance,
large_transition=large_transition,
num_measurements=len(self.transients.columns) + 1)
for s in subset:
# if len(buffer.transitionList) < bsize
if len(buffer.transition_list) == buffer_size:
buffer.clean_buffer()
buffer.add_transition(s)
buffer.pair_transitions()
return buffer.matched_pairs
def disaggregate_chunk(self, chunk, prev, transients):
"""
Parameters
----------
chunk : pd.DataFrame
mains power
prev
transients : returned by find_steady_state_transients
Returns
-------
states : pd.DataFrame
with same index as `chunk`.
"""
states = pd.DataFrame(
-1, index=chunk.index, columns=self.centroids.index.values)
for transient_tuple in transients.itertuples():
if transient_tuple[0] < chunk.index[0]:
# Transient occurs before chunk has started; do nothing
pass
elif transient_tuple[0] > chunk.index[-1]:
# Transient occurs after chunk has ended; do nothing
pass
else:
# Absolute value of transient
abs_value = np.abs(transient_tuple[1:])
positive = transient_tuple[1] > 0
abs_value_transient_minus_centroid = pd.DataFrame(
(self.centroids - abs_value).abs())
if len(transient_tuple) == 2:
# 1d data
index_least_delta = (
abs_value_transient_minus_centroid.idxmin().values[0])
else:
# 2d data.
# Need to find absolute value before computing minimum
columns = abs_value_transient_minus_centroid.columns
abs_value_transient_minus_centroid["multidim"] = (
abs_value_transient_minus_centroid[columns[0]] ** 2
+
abs_value_transient_minus_centroid[columns[1]] ** 2)
index_least_delta = (
abs_value_transient_minus_centroid["multidim"].idxmin())
if positive:
# Turned on
states.loc[transient_tuple[0]][index_least_delta] = 1
else:
# Turned off
states.loc[transient_tuple[0]][index_least_delta] = 0
prev = states.iloc[-1].to_dict()
power_chunk_dict = self.assign_power_from_states(states, prev)
return pd.DataFrame(power_chunk_dict, index=chunk.index)
def assign_power_from_states(self, states_chunk, prev):
di = {}
ndim = len(self.centroids.columns)
for appliance in states_chunk.columns:
values = states_chunk[[appliance]].values.flatten()
if ndim == 1:
power = np.zeros(len(values), dtype=int)
else:
power = np.zeros((len(values), 2), dtype=int)
# on = False
i = 0
while i < len(values) - 1:
if values[i] == 1:
# print("A", values[i], i)
on = True
i = i + 1
power[i] = self.centroids.ix[appliance].values
while values[i] != 0 and i < len(values) - 1:
# print("B", values[i], i)
power[i] = self.centroids.ix[appliance].values
i = i + 1
elif values[i] == 0:
# print("C", values[i], i)
on = False
i = i + 1
power[i] = 0
while values[i] != 1 and i < len(values) - 1:
# print("D", values[i], i)
if ndim == 1:
power[i] = 0
else:
power[i] = [0, 0]
i = i + 1
else:
# print("E", values[i], i)
# Unknown state. If previously we know about this
# appliance's state, we can
# use that. Else, it defaults to 0
if prev[appliance] == -1 or prev[appliance] == 0:
# print("F", values[i], i)
on = False
power[i] = 0
while values[i] != 1 and i < len(values) - 1:
# print("G", values[i], i)
if ndim == 1:
power[i] = 0
else:
power[i] = [0, 0]
i = i + 1
else:
# print("H", values[i], i)
on = True
power[i] = self.centroids.ix[appliance].values
while values[i] != 0 and i < len(values) - 1:
# print("I", values[i], i)
power[i] = self.centroids.ix[appliance].values
i = i + 1
di[appliance] = power
# print(power.sum())
return di
def disaggregate(self, mains, output_datastore, **load_kwargs):
"""Disaggregate mains according to the model learnt previously.
Parameters
----------
mains : nilmtk.ElecMeter or nilmtk.MeterGroup
output_datastore : instance of nilmtk.DataStore subclass
For storing power predictions from disaggregation algorithm.
sample_period : number, optional
The desired sample period in seconds.
**load_kwargs : key word arguments
Passed to `mains.power_series(**kwargs)`
"""
load_kwargs = self._pre_disaggregation_checks(load_kwargs)
load_kwargs.setdefault('sample_period', 60)
load_kwargs.setdefault('sections', mains.good_sections())
timeframes = []
building_path = '/building{}'.format(mains.building())
mains_data_location = building_path + '/elec/meter1'
data_is_available = False
[_, transients] = find_steady_states_transients(
mains, columns=self.columns, state_threshold=self.state_threshold,
noise_level=self.noise_level, **load_kwargs)
# For now ignoring the first transient
# transients = transients[1:]
# Initially all appliances/meters are in unknown state (denoted by -1)
prev = OrderedDict()
learnt_meters = self.centroids.index.values
for meter in learnt_meters:
prev[meter] = -1
timeframes = []
# Now iterating over mains data and disaggregating chunk by chunk
for chunk in mains.power_series(**load_kwargs):
# Record metadata
timeframes.append(chunk.timeframe)
measurement = chunk.name
power_df = self.disaggregate_chunk(
chunk, prev, transients)
columns = pd.MultiIndex.from_tuples([chunk.name])
for meter in learnt_meters:
data_is_available = True
df = power_df[[meter]]
df.columns = columns
key = '{}/elec/meter{:d}'.format(building_path, meter + 2)
output_datastore.append(key, df)
output_datastore.append(key=mains_data_location,
value=pd.DataFrame(chunk, columns=columns))
if data_is_available:
self._save_metadata_for_disaggregation(
output_datastore=output_datastore,
sample_period=load_kwargs['sample_period'],
measurement=measurement,
timeframes=timeframes,
building=mains.building(),
supervised=False,
num_meters=len(self.centroids)
)
"""
def export_model(self, filename):
model_copy = {}
for appliance, appliance_states in self.model.iteritems():
model_copy[
"{}_{}".format(appliance.name, appliance.instance)] = appliance_states
j = json.dumps(model_copy)
with open(filename, 'w+') as f:
f.write(j)
def import_model(self, filename):
with open(filename, 'r') as f:
temp = json.loads(f.read())
for appliance, centroids in temp.iteritems():
appliance_name = appliance.split("_")[0].encode("ascii")
appliance_instance = int(appliance.split("_")[1])
appliance_name_instance = ApplianceID(
appliance_name, appliance_instance)
self.model[appliance_name_instance] = centroids
"""
| 42.48643
| 123
| 0.555599
|
d0df4c09c876658930e2b85c18d30c810b9b9beb
| 933
|
py
|
Python
|
uninas/modules/activations/common.py
|
cogsys-tuebingen/uninas
|
06729b9cf517ec416fb798ae387c5bd9c3a278ac
|
[
"MIT"
] | 18
|
2020-11-22T16:03:08.000Z
|
2022-03-15T12:11:46.000Z
|
uninas/modules/activations/common.py
|
cogsys-tuebingen/uninas
|
06729b9cf517ec416fb798ae387c5bd9c3a278ac
|
[
"MIT"
] | 2
|
2022-01-04T08:10:17.000Z
|
2022-01-05T08:13:14.000Z
|
uninas/modules/activations/common.py
|
cogsys-tuebingen/uninas
|
06729b9cf517ec416fb798ae387c5bd9c3a278ac
|
[
"MIT"
] | 6
|
2021-03-08T07:08:52.000Z
|
2022-02-24T12:00:43.000Z
|
"""
common activation functions
"""
import torch.nn as nn
from uninas.register import Register
@Register.act_fun()
def identity(inplace=False) -> nn.Module:
return nn.Identity()
@Register.act_fun()
def skip(inplace=False) -> nn.Module:
return nn.Identity()
@Register.act_fun()
def softmax(inplace=False) -> nn.Module:
return nn.Softmax(dim=-1)
@Register.act_fun()
def relu(inplace=False) -> nn.Module:
return nn.ReLU(inplace=inplace)
@Register.act_fun()
def relu6(inplace=False) -> nn.Module:
return nn.ReLU6(inplace=inplace)
@Register.act_fun()
def sigmoid(inplace=False) -> nn.Module:
return nn.Sigmoid()
@Register.act_fun()
def hsigmoid(inplace=False) -> nn.Module:
return nn.Hardsigmoid(inplace=inplace)
@Register.act_fun()
def tanh(inplace=False) -> nn.Module:
return nn.Tanh()
@Register.act_fun()
def htanh(inplace=False) -> nn.Module:
return nn.Hardtanh(inplace=inplace)
| 17.942308
| 42
| 0.709539
|
ad5564a263937df6e89d1158885e68380e2a9fdc
| 345
|
py
|
Python
|
hello-world/hello-world.py
|
this-should-never-happen/this-should-never-happen
|
3641622934e6f187a6e8a7ea393119cd1498acea
|
[
"Apache-2.0"
] | 5
|
2017-09-07T01:06:31.000Z
|
2019-04-16T22:03:11.000Z
|
hello-world/hello-world.py
|
this-should-never-happen/this-should-never-happen
|
3641622934e6f187a6e8a7ea393119cd1498acea
|
[
"Apache-2.0"
] | 1
|
2017-09-04T03:09:03.000Z
|
2017-09-04T03:09:03.000Z
|
hello-world/hello-world.py
|
this-should-never-happen/this-should-never-happen
|
3641622934e6f187a6e8a7ea393119cd1498acea
|
[
"Apache-2.0"
] | 2
|
2017-09-30T02:01:59.000Z
|
2019-12-03T22:23:37.000Z
|
#!/usr/bin/python
class Data(object):
"""__name__"""
def __init__(__main__):
Hello = None
World = None
if (globals()[Data.__doc__] ==
''.join(Data.__init__.__code__.co_varnames[
:Data.__init__.__code__.co_argcount])):
print(' '.join(Data.__init__.__code__.co_varnames[
Data.__init__.__code__.co_argcount:]));
| 21.5625
| 52
| 0.663768
|
881b456b6f2f3a641d5fc0ee7ce1b868704a5d7e
| 136
|
py
|
Python
|
cli/jobs/basics/src/hello-git.py
|
jplummer01/azureml-examples
|
6a073d157f21060312941f71cfbcf25d0c541183
|
[
"MIT"
] | null | null | null |
cli/jobs/basics/src/hello-git.py
|
jplummer01/azureml-examples
|
6a073d157f21060312941f71cfbcf25d0c541183
|
[
"MIT"
] | null | null | null |
cli/jobs/basics/src/hello-git.py
|
jplummer01/azureml-examples
|
6a073d157f21060312941f71cfbcf25d0c541183
|
[
"MIT"
] | null | null | null |
from pygit2 import Repository
try:
print(Repository(".").head.shorthand)
except:
print("Repository('.').head.shorthand error")
| 19.428571
| 49
| 0.705882
|
43f53773f77d4acb582d0d4ba9e2995e57364a06
| 11,579
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowBgpAllNeighborsAdvertisedRoutes/cli/equal/golden_output2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowBgpAllNeighborsAdvertisedRoutes/cli/equal/golden_output2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowBgpAllNeighborsAdvertisedRoutes/cli/equal/golden_output2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
expected_output = {
"vrf": {
"default": {
"neighbor": {
"10.106.102.3": {
"address_family": {
"ipv4 multicast": {
"advertised": {
"10.9.1.0/24": {
"index": {
1: {
"localprf": 100,
"next_hop": "10.106.101.1",
"origin_codes": "i",
"path": "2 3 4",
"status_codes": "*>i",
"weight": 0,
}
}
},
"10.9.2.0/24": {
"index": {
1: {
"localprf": 100,
"next_hop": "10.106.101.1",
"origin_codes": "i",
"path": "2 3 4",
"status_codes": "*>i",
"weight": 0,
}
}
},
"10.25.1.0/24": {
"index": {
1: {
"localprf": 100,
"next_hop": "10.106.102.4",
"origin_codes": "i",
"path": "2 3 4",
"status_codes": "*>i",
"weight": 0,
}
}
},
"10.25.2.0/24": {
"index": {
1: {
"localprf": 100,
"next_hop": "10.106.102.4",
"origin_codes": "i",
"path": "2 3 4",
"status_codes": "*>i",
"weight": 0,
}
}
},
"10.36.2.0/24": {
"index": {
1: {
"localprf": 500,
"metric": 5555,
"next_hop": "10.106.102.4",
"origin_codes": "i",
"path": "2 3 4 5 6 7 8 9 10 11 12",
"status_codes": "*>i",
"weight": 32788,
}
}
},
},
"bgp_table_version": 175,
"local_router_id": "10.145.0.6",
},
"ipv4 unicast": {
"advertised": {
"10.4.1.0/24": {
"index": {
1: {
"localprf": 100,
"next_hop": "10.106.102.4",
"origin_codes": "i",
"path": "{62112 33492 4872 41787 13166 50081 21461 58376 29755 1135}",
"status_codes": "*>i",
"weight": 0,
}
}
},
"10.4.2.0/24": {
"index": {
1: {
"localprf": 100,
"next_hop": "10.106.102.4",
"path": "{62112 33492 4872 41787 13166 50081 21461 58376 29755 1135}",
"origin_codes": "i",
"status_codes": "*>i",
"weight": 0,
}
}
},
"10.36.0.0/24": {
"index": {
1: {
"metric": 100,
"next_hop": "10.106.102.3",
"origin_codes": "i",
"path": "10 20 30 40 50 60 70 80 90",
"status_codes": "*>i",
"weight": 0,
}
}
},
"10.49.0.0/16": {
"index": {
1: {
"localprf": 100,
"next_hop": "10.106.101.1",
"origin_codes": "i",
"path": "10 20 30 40 50 60 70 80 90",
"status_codes": "*>i",
"weight": 0,
}
}
},
},
"bgp_table_version": 174,
"local_router_id": "10.145.0.6",
},
"ipv6 multicast": {
"bgp_table_version": 6,
"local_router_id": "10.145.0.6",
"advertised": {},
},
"ipv6 unicast": {
"bgp_table_version": 173,
"local_router_id": "10.145.0.6",
"advertised": {},
},
"link-state": {
"advertised": {
"[2]:[77][7,0][10.69.9.9,1,151587081][10.135.1.1,22][10.106.101.1,10.76.1.30]/616": {
"index": {
1: {
"localprf": 100,
"metric": 4444,
"next_hop": "10.106.101.1",
"origin_codes": "i",
"path": "3 10 20 30 40 50 60 70 80 90",
"status_codes": "*>i",
"weight": 0,
},
2: {
"localprf": 100,
"metric": 4444,
"next_hop": "10.106.102.3",
"origin_codes": "i",
"path": "3 10 20 30 40 50 60 70 80 90",
"status_codes": "*>i",
"weight": 0,
},
}
},
"[2]:[77][7,0][10.69.9.9,2,151587081][10.135.1.1,22][10.106.101.1,10.76.1.31]/616": {
"index": {
1: {
"localprf": 200,
"metric": 555,
"next_hop": "10.106.103.2",
"origin_codes": "i",
"path": "3 10 20 30 40 50 60 70 80 90",
"status_codes": "*>i",
"weight": 0,
}
}
},
},
"bgp_table_version": 173,
"local_router_id": "10.145.0.6",
},
"vpnv4 unicast": {
"bgp_table_version": 183,
"local_router_id": "10.145.0.6",
"advertised": {},
},
"vpnv4 unicast RD 0:0": {
"bgp_table_version": 183,
"local_router_id": "10.145.0.6",
"route_distinguisher": "0:0",
"advertised": {},
},
"vpnv4 unicast RD 101:100": {
"bgp_table_version": 183,
"local_router_id": "10.145.0.6",
"route_distinguisher": "101:100",
"advertised": {},
},
"vpnv4 unicast RD 102:100": {
"bgp_table_version": 183,
"local_router_id": "10.145.0.6",
"route_distinguisher": "102:100",
"advertised": {},
},
"vpnv6 unicast": {
"bgp_table_version": 13,
"local_router_id": "10.145.0.6",
"advertised": {},
},
"vpnv6 unicast RD 0xbb00010000000000": {
"bgp_table_version": 13,
"local_router_id": "10.145.0.6",
"route_distinguisher": "0xbb00010000000000",
"advertised": {},
},
"vpnv6 unicast RD 100:200": {
"bgp_table_version": 13,
"local_router_id": "10.145.0.6",
"route_distinguisher": "100:200",
"advertised": {},
},
}
}
}
}
}
}
| 51.462222
| 117
| 0.195699
|
ce9f47af53f068be90756f4a1b47ba5e3d411668
| 174
|
py
|
Python
|
detector/utils/__init__.py
|
dingjiangang/light-head-rcnn
|
7ab24c1655b9872666b163fe36fd063aef359f2e
|
[
"MIT"
] | null | null | null |
detector/utils/__init__.py
|
dingjiangang/light-head-rcnn
|
7ab24c1655b9872666b163fe36fd063aef359f2e
|
[
"MIT"
] | null | null | null |
detector/utils/__init__.py
|
dingjiangang/light-head-rcnn
|
7ab24c1655b9872666b163fe36fd063aef359f2e
|
[
"MIT"
] | null | null | null |
from .box_utils import prune_outside_window, clip_by_window,\
batch_decode, encode, iou, decode, area, intersection
from .nms import batch_multiclass_non_max_suppression
| 43.5
| 61
| 0.833333
|
ca06798ae39e1bb26d1eab54fb03a276166cb425
| 10,197
|
py
|
Python
|
src/benchmark.py
|
snakers4/pyarmor
|
cba32d36ceb8129b54e9fd610f5fcc7fa0a0b0af
|
[
"Python-2.0",
"OLDAP-2.7"
] | 1,463
|
2017-09-30T02:46:56.000Z
|
2022-03-30T15:11:05.000Z
|
src/benchmark.py
|
JangKyumin/pyarmor
|
a9c2535e097467ab58bf52de78b487fef87c5504
|
[
"Python-2.0",
"OLDAP-2.7"
] | 702
|
2016-12-02T23:47:21.000Z
|
2022-03-31T08:14:00.000Z
|
src/benchmark.py
|
JangKyumin/pyarmor
|
a9c2535e097467ab58bf52de78b487fef87c5504
|
[
"Python-2.0",
"OLDAP-2.7"
] | 208
|
2018-01-17T05:55:55.000Z
|
2022-03-29T18:27:47.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
#############################################################
# #
# Copyright @ 2013 - 2017 Dashingsoft corp. #
# All rights reserved. #
# #
# pyarmor #
# #
# Version: 1.7.0 - #
# #
#############################################################
#
#
# @File: benchmark.py
#
# @Author: Jondy Zhao(jondy.zhao@gmail.com)
#
# @Create Date: 2017/11/21
#
# @Description:
#
# Check performance of pyarmor.
#
import logging
import os
import shutil
import sys
import subprocess
import time
from ctypes import c_int, c_void_p, py_object, pythonapi, PYFUNCTYPE
import pytransform
OBF_MODULE_MODE = 'none', 'des', 'aes'
OBF_CODE_MODE = 'none', 'fast', 'aes', 'wrap'
PYARMOR_PATH = os.path.dirname(__file__)
PYARMOR = 'pyarmor.py'
def make_test_script(filename):
lines = [
'def empty():',
' return 0',
'',
'def call_1k_function(n):',
' for i in range(n):',
' one_thousand()',
'',
'def call_10k_function(n):',
' for i in range(n):',
' ten_thousand()',
'',
'def one_thousand():',
' if True:',
' i = 0',
]
lines.extend([' i += 1'] * 100)
lines.append('\n return 1000\n')
lines.extend(['def ten_thousand():',
' if True:',
' i = 0'])
lines.extend([' i += 1'] * 1000)
lines.append('\n return 10000\n')
with open(filename, 'wb') as f:
f.write('\n'.join(lines).encode())
def call_pyarmor(args):
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
def obffuscate_scripts(output, filename,
mod_mode, code_mode, wrap_mode, adv_mode):
project = os.path.join(output, 'project')
if os.path.exists(project):
shutil.rmtree(project)
args = [sys.executable, PYARMOR, 'init', '--src', output,
'--entry', filename, project]
call_pyarmor(args)
args = [sys.executable, PYARMOR, 'config',
'--manifest', 'include %s' % filename,
'--obf-mod', mod_mode,
'--obf-code', code_mode,
'--wrap-mode', wrap_mode,
'--advanced', adv_mode,
'--restrict-mode', '0',
'--package-runtime', '0',
project]
call_pyarmor(args)
args = [sys.executable, PYARMOR, 'build', '-B', project]
call_pyarmor(args)
for s in os.listdir(os.path.join(project, 'dist')):
shutil.copy(os.path.join(project, 'dist', s), output)
def metricmethod(func):
if not hasattr(time, 'process_time'):
time.process_time = time.clock
def wrap(*args, **kwargs):
t1 = time.process_time()
result = func(*args, **kwargs)
t2 = time.process_time()
logging.info('%-50s: %10.6f ms', func.__name__, (t2 - t1) * 1000)
return result
return wrap
@metricmethod
def verify_license(m):
try:
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('get_registration_code', m))
code = dlfunc()
except Exception:
logging.warning('Verify license failed')
code = ''
return code
@metricmethod
def init_pytransform(m):
major, minor = sys.version_info[0:2]
# Python2.5 no sys.maxsize but sys.maxint
# bitness = 64 if sys.maxsize > 2**32 else 32
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_void_p)
init_module = prototype(('init_module', m))
init_module(major, minor, pythonapi._handle)
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int)
init_runtime = prototype(('init_runtime', m))
init_runtime(0, 0, 0, 0)
@metricmethod
def load_pytransform():
return pytransform._load_library(PYARMOR_PATH, is_runtime=1)
@metricmethod
def total_extra_init_time():
m = load_pytransform()
init_pytransform(m)
verify_license(m)
@metricmethod
def import_first_no_obfuscated_module(name):
return __import__(name)
@metricmethod
def import_first_obfuscated_module(name):
return __import__(name)
@metricmethod
def re_import_no_obfuscated_module(name):
return __import__(name)
@metricmethod
def re_import_obfuscated_module(name):
return __import__(name)
@metricmethod
def run_empty_obfuscated_code_object(foo):
return foo.empty()
@metricmethod
def run_obfuscated_1k_bytecode(foo):
return foo.one_thousand()
@metricmethod
def run_obfuscated_10k_bytecode(foo):
return foo.ten_thousand()
@metricmethod
def run_empty_no_obfuscated_code_object(foo):
return foo.empty()
@metricmethod
def run_no_obfuscated_1k_bytecode(foo):
return foo.one_thousand()
@metricmethod
def run_no_obfuscated_10k_bytecode(foo):
return foo.ten_thousand()
@metricmethod
def import_many_obfuscated_modules(name, n=100):
for i in range(n):
__import__(name % i)
@metricmethod
def import_many_no_obfuscated_modules(name, n=100):
for i in range(n):
__import__(name % i)
@metricmethod
def call_1000_no_obfuscated_1k_bytecode(foo):
return foo.call_1k_function(1000)
@metricmethod
def call_1000_obfuscated_1k_bytecode(foo):
return foo.call_1k_function(1000)
@metricmethod
def call_1000_no_obfuscated_10k_bytecode(foo):
return foo.call_10k_function(1000)
@metricmethod
def call_1000_obfuscated_10k_bytecode(foo):
return foo.call_10k_function(1000)
@metricmethod
def call_10000_no_obfuscated_1k_bytecode(foo):
return foo.call_1k_function(10000)
@metricmethod
def call_10000_obfuscated_1k_bytecode(foo):
return foo.call_1k_function(10000)
@metricmethod
def call_10000_no_obfuscated_10k_bytecode(foo):
return foo.call_10k_function(10000)
@metricmethod
def call_10000_obfuscated_10k_bytecode(foo):
return foo.call_10k_function(10000)
def main():
if not os.path.exists('benchmark.py'):
logging.warning('Please change current path to %s', PYARMOR_PATH)
return
output = '.benchtest'
name = 'bfoo'
filename = os.path.join(output, name + '.py')
obname = 'obfoo'
obfilename = os.path.join(output, obname + '.py')
if len(sys.argv) > 1 and 'bootstrap'.startswith(sys.argv[1]):
if len(sys.argv) < 6:
sys.argv.extend(['1', '1', '1', '0'])
obf_mod, obf_code, wrap_mode, adv_mode = sys.argv[2:6]
if os.path.exists(output) and output.endswith('.benchtest'):
logging.info('Clean output path: %s', output)
shutil.rmtree(output)
logging.info('Create output path: %s', output)
os.makedirs(output)
logging.info('Generate test script %s ...', filename)
make_test_script(filename)
logging.info('Obffuscate test script ...')
shutil.copy(filename, obfilename)
obffuscate_scripts(output, os.path.basename(obfilename),
obf_mod, obf_code, wrap_mode, adv_mode)
if not os.path.exists(obfilename):
logging.info('Something is wrong to obsfucate the script')
return
logging.info('Generate obffuscated script %s', obfilename)
logging.info('Copy benchmark.py to %s', output)
shutil.copy('benchmark.py', output)
logging.info('')
logging.info('Now change to "%s"', output)
logging.info('Run "%s benchmark.py".', sys.executable)
return
filename = os.path.basename(filename)
if os.path.exists(filename):
logging.info('Test script: %s', filename)
else:
logging.warning('Test script: %s not found', filename)
logging.info('Run "%s benchmark.py bootstrap" first.', sys.executable)
return
obfilename = os.path.basename(obfilename)
if os.path.exists(obfilename):
logging.info('Obfuscated script: %s', obfilename)
else:
logging.warning('Obfuscated script: %s not found', obfilename)
logging.info('Run "%s benchmark.py bootstrap" first.', sys.executable)
return
logging.info('--------------------------------------')
# It doens't work for super mode
# logging.info('')
# total_extra_init_time()
logging.info('')
foo = import_first_no_obfuscated_module(name)
obfoo = import_first_obfuscated_module(obname)
logging.info('')
foo = re_import_no_obfuscated_module(name)
obfoo = re_import_obfuscated_module(obname)
logging.info('')
n = 10
logging.info('--- Import %d modules ---', n)
for i in range(n):
shutil.copy(filename, filename.replace('.py', '_%s.py' % i))
with open(obfilename) as f:
lines = f.readlines()
with open(obfilename.replace('.py', '_%s.py' % i), 'w') as f:
f.write(lines[2] if lines[0].find('pyarmor_runtime') > 0 \
else ''.join(lines))
import_many_no_obfuscated_modules('bfoo_%s', n)
import_many_obfuscated_modules('obfoo_%s', n)
logging.info('')
run_empty_no_obfuscated_code_object(foo)
run_empty_obfuscated_code_object(obfoo)
logging.info('')
run_no_obfuscated_1k_bytecode(foo)
run_obfuscated_1k_bytecode(obfoo)
logging.info('')
run_no_obfuscated_10k_bytecode(foo)
run_obfuscated_10k_bytecode(obfoo)
logging.info('')
call_1000_no_obfuscated_1k_bytecode(foo)
call_1000_obfuscated_1k_bytecode(obfoo)
logging.info('')
call_1000_no_obfuscated_10k_bytecode(foo)
call_1000_obfuscated_10k_bytecode(obfoo)
logging.info('')
call_10000_no_obfuscated_1k_bytecode(foo)
call_10000_obfuscated_1k_bytecode(obfoo)
logging.info('')
call_10000_no_obfuscated_10k_bytecode(foo)
call_10000_obfuscated_10k_bytecode(obfoo)
logging.info('')
logging.info('--------------------------------------')
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format='%(message)s',
)
main()
| 26.624021
| 78
| 0.612827
|
4e35efcf90e4f1630d15a7d4cd8e32c4ad85a869
| 11,106
|
py
|
Python
|
mlens/ensemble/temporal.py
|
mehrdad-shokri/mlens
|
6cbc11354b5f9500a33d9cefb700a1bba9d3199a
|
[
"MIT"
] | 760
|
2017-03-13T10:11:45.000Z
|
2022-03-30T20:59:20.000Z
|
mlens/ensemble/temporal.py
|
rahulsaini/mlens
|
6cbc11354b5f9500a33d9cefb700a1bba9d3199a
|
[
"MIT"
] | 115
|
2017-01-18T22:10:33.000Z
|
2022-03-17T12:42:34.000Z
|
mlens/ensemble/temporal.py
|
rahulsaini/mlens
|
6cbc11354b5f9500a33d9cefb700a1bba9d3199a
|
[
"MIT"
] | 96
|
2017-03-13T10:12:48.000Z
|
2022-02-23T17:12:39.000Z
|
"""ML-ENSEMBLE
:author: Sebastian Flennerhag
:copyright: 2017-2018
:licence: MIT
Temporal ensemble class. Fully integrable with Scikit-learn.
"""
from __future__ import division
from .base import BaseEnsemble
from ..index import TemporalIndex, FullIndex
class TemporalEnsemble(BaseEnsemble):
r"""Temporal ensemble class.
The temporal ensemble class uses a time series cross-validation
strategy to create training and test folds that preserve temporal
ordering in the data. The cross validation strategy is unrolled
through time. For instance:
==== ================= ==========
fold train obs test obs
==== ================= ==========
0 0, 1, 2, 3 4
1 0, 1, 2, 3, 4 5
2 0, 1, 2, 3, 4, 5 6
==== ================= ==========
Different estimators in the ensemble can operate on different time scales,
allow efficient combinations of different temporal patterns in one model.
See Also
--------
:class:`SuperLearner`, :class:`BlendEnsemble`, :class:`SequentialEnsemble`
.. note :: All parameters can be overriden in the :attr:`add` method unless
otherwise specified. Notably, the ``backend`` and ``n_jobs`` cannot
be altered in the :attr:`add` method.
Parameters
----------
step_size : int (default=1)
number of samples to use in each test fold. The final window
size may be smaller if too few observations remain.
burn_in : int (default=None)
number of samples to use for first training fold. These observations
will be dropped from the output. Defaults to ``step_size``.
window: int (default=None)
number of previous samples to use in each training fold, except first
which is determined by ``burn_in``. If ``None``, will use all previous
observations.
lag: int (default=0)
distance between the most recent training point in the training fold and
the first test point. For ``lag>0``, the training fold and the test fold
will not be contiguous.
scorer : object (default = None)
scoring function. If a function is provided, base estimators will be
scored on the training set assembled for fitting the meta estimator.
Since those predictions are out-of-sample, the scores represent valid
test scores. The scorer should be a function that accepts an array of
true values and an array of predictions: ``score = f(y_true, y_pred)``.
raise_on_exception : bool (default = True)
whether to issue warnings on soft exceptions or raise error.
Examples include lack of layers, bad inputs, and failed fit of an
estimator in a layer. If set to ``False``, warnings are issued instead
but estimation continues unless exception is fatal. Note that this
can result in unexpected behavior unless the exception is anticipated.
verbose : int or bool (default = False)
level of verbosity.
* ``verbose = 0`` silent (same as ``verbose = False``)
* ``verbose = 1`` messages at start and finish (same as
``verbose = True``)
* ``verbose = 2`` messages for each layer
If ``verbose >= 50`` prints to ``sys.stdout``, else ``sys.stderr``.
For verbosity in the layers themselves, use ``fit_params``.
n_jobs : int (default = -1)
Degree of parallel processing. Set to -1 for maximum parallelism and
1 for sequential processing. Cannot be overriden in the :attr:`add` method.
backend : str or object (default = 'threading')
backend infrastructure to use during call to
:class:`mlens.externals.joblib.Parallel`. See Joblib for further
documentation. To set global backend, set ``mlens.config._BACKEND``.
Cannot be overriden in the :attr:`add` method.
model_selection: bool (default=False)
Whether to use the ensemble in model selection mode. If ``True``,
this will alter the ``transform`` method. When calling ``transform``
on new data, the ensemble will call ``predict``, while calling
``transform`` with the training data reproduces predictions from the
``fit`` call. Hence the ensemble can be used as a pure transformer
in a preprocessing pipeline passed to the :class:`Evaluator`, as
training folds are faithfully reproduced as during a ``fit``call and
test folds are transformed with the ``predict`` method.
sample_size: int (default=20)
size of training set sample
(``[min(sample_size, X.size[0]), min(X.size[1], sample_size)]``)
Examples
--------
>>> from sklearn.linear_model import LinearRegression
>>> from mlens.ensemble import TemporalEnsemble
>>> import numpy as np
>>>
>>> x = np.linspace(0, 1, 100)
>>> y = x[1:]
>>> x = x[:-1]
>>> x = x.reshape(-1, 1)
>>>
>>> ens = TemporalEnsemble(window=1)
>>> ens.add(LinearRegression())
>>>
>>> ens.fit(x, y)
>>> p = ens.predict(x)
>>>
>>>
>>> print("{:5} | {:5}".format('pred', 'truth'))
>>> for i in range(5, 10):
... print("{:.3f} | {:.3f}".format(p[i], y[i]))
>>>
pred | truth
0.061 | 0.061
0.071 | 0.071
0.081 | 0.081
0.091 | 0.091
0.101 | 0.101
"""
def __init__(
self, step_size=1, burn_in=None, window=None, lag=0, scorer=None,
raise_on_exception=True, array_check=None, verbose=False, n_jobs=-1,
backend='threading', model_selection=False, sample_size=20, layers=None):
super(TemporalEnsemble, self).__init__(
shuffle=False, random_state=None, scorer=scorer,
raise_on_exception=raise_on_exception, verbose=verbose,
n_jobs=n_jobs, layers=layers, backend=backend,
array_check=array_check, model_selection=model_selection,
sample_size=sample_size)
self.__initialized__ = 0 # Unlock parameter setting
self.step_size = step_size
self.burn_in = burn_in
self.window = window
self.lag = lag
self.__initialized__ = 1 # Protect against param resets
def add_meta(self, estimator, **kwargs):
"""Meta Learner.
Meta learner to be used for final predictions.
Parameters
----------
estimator : instance
estimator instance.
**kwargs : optional
optional keyword arguments.
"""
return self.add(estimators=estimator, meta=True, **kwargs)
def add(self, estimators, preprocessing=None,
proba=False, meta=False, propagate_features=None, **kwargs):
"""Add layer to ensemble.
Parameters
----------
estimators: dict of lists or list or instance
estimators constituting the layer. If preprocessing is none and the
layer is meant to be the meta estimator, it is permissible to pass
a single instantiated estimator. If ``preprocessing`` is
``None`` or ``list``, ``estimators`` should be a ``list``.
The list can either contain estimator instances,
named tuples of estimator instances, or a combination of both. ::
option_1 = [estimator_1, estimator_2]
option_2 = [("est-1", estimator_1), ("est-2", estimator_2)]
option_3 = [estimator_1, ("est-2", estimator_2)]
If different preprocessing pipelines are desired, a dictionary
that maps estimators to preprocessing pipelines must be passed.
The names of the estimator dictionary must correspond to the
names of the estimator dictionary. ::
preprocessing_cases = {"case-1": [trans_1, trans_2],
"case-2": [alt_trans_1, alt_trans_2]}
estimators = {"case-1": [est_a, est_b],
"case-2": [est_c, est_d]}
The lists for each dictionary entry can be any of ``option_1``,
``option_2`` and ``option_3``.
preprocessing: dict of lists or list, optional (default = None)
preprocessing pipelines for given layer. If
the same preprocessing applies to all estimators, ``preprocessing``
should be a list of transformer instances. The list can contain the
instances directly, named tuples of transformers,
or a combination of both. ::
option_1 = [transformer_1, transformer_2]
option_2 = [("trans-1", transformer_1),
("trans-2", transformer_2)]
option_3 = [transformer_1, ("trans-2", transformer_2)]
If different preprocessing pipelines are desired, a dictionary
that maps preprocessing pipelines must be passed. The names of the
preprocessing dictionary must correspond to the names of the
estimator dictionary. ::
preprocessing_cases = {"case-1": [trans_1, trans_2],
"case-2": [alt_trans_1, alt_trans_2]}
estimators = {"case-1": [est_a, est_b],
"case-2": [est_c, est_d]}
The lists for each dictionary entry can be any of ``option_1``,
``option_2`` and ``option_3``.
proba : bool
whether layer should predict class probabilities. Note: setting
``proba=True`` will attempt to call an the estimators
``predict_proba`` method.
propagate_features : list, optional
List of column indexes to propagate from the input of
the layer to the output of the layer. Propagated features are
concatenated and stored in the leftmost columns of the output
matrix. The ``propagate_features`` list should define a slice of
the numpy array containing the input data, e.g. ``[0, 1]`` to
propagate the first two columns of the input matrix to the output
matrix.
meta : bool (default = False)
indicator if the layer added is the final meta estimator. This will
prevent folded or blended fits of the estimators and only fit them
once on the full input data.
**kwargs : optional
optional keyword arguments.
Returns
-------
self : instance
ensemble instance with layer instantiated.
"""
s = kwargs.pop('step_size', self.step_size)
b = kwargs.pop('burn_in', self.burn_in)
w = kwargs.pop('window', self.window)
l = kwargs.pop('lag', self.lag)
if meta:
idx = FullIndex()
else:
idx = TemporalIndex(
s, b, w, l, raise_on_exception=self.raise_on_exception)
return super(TemporalEnsemble, self).add(
estimators=estimators, indexer=idx, preprocessing=preprocessing,
proba=proba, propagate_features=propagate_features, **kwargs)
| 40.093863
| 85
| 0.609941
|
ae4c3dd53948846840661bcedf9b4e0fd17deeeb
| 3,881
|
py
|
Python
|
powershell_kernel/powershell_proxy.py
|
VasuBhog/jupyter-powershell
|
a69b8ee9667f95e80ef09cc538de7884efc0365f
|
[
"MIT"
] | 114
|
2016-02-09T04:20:10.000Z
|
2022-03-09T22:52:24.000Z
|
powershell_kernel/powershell_proxy.py
|
VasuBhog/jupyter-powershell
|
a69b8ee9667f95e80ef09cc538de7884efc0365f
|
[
"MIT"
] | 34
|
2016-06-22T16:10:53.000Z
|
2020-12-11T01:19:11.000Z
|
powershell_kernel/powershell_proxy.py
|
VasuBhog/jupyter-powershell
|
a69b8ee9667f95e80ef09cc538de7884efc0365f
|
[
"MIT"
] | 28
|
2016-06-22T16:03:21.000Z
|
2022-03-26T18:08:39.000Z
|
import threading
try:
import queue
except ImportError:
import Queue as queue
from threading import Timer, Lock
from time import sleep
class ReplReader(threading.Thread):
def __init__(self, repl):
super(ReplReader, self).__init__()
self.repl = repl
self.daemon = True
self.queue = queue.Queue()
self.start()
def run(self):
r = self.repl
q = self.queue
while True:
result = r.read()
q.put(result)
if result is None:
break
class ReplProxy(object):
def __init__(self, repl):
self.runCmdLock = Lock()
self._repl = repl
self._repl_reader = ReplReader(repl)
self.stop_flag = False
self.output = ''
self.timer = Timer(0.1, self.update_view_loop)
self.timer.start()
self.output_prefix_stripped = True
self.expected_output_prefix = ''
self.expected_output_len = 0
# Returns a generator that yields string messages as they are returned from powershell via stdout
# this is a hack to detect when we stop processing this input
for temp in self.run_command('function prompt() {"^"}'):
continue
def run_command(self, input):
self.runCmdLock.acquire()
try:
self.output = ''
self.stop_flag = False
# Append newline to the original input to handle single line comments on the last line
#
# Also, for multiline statements we should send 1 extra new line at the end
# https://stackoverflow.com/questions/13229066/how-to-end-a-multi-line-command-in-powershell
input = '. {\n' + input + '\n}\n'
self.expected_output_prefix = input.replace('\n', '\n>> ') + '\n'
self.expected_output_len = len(self.expected_output_prefix)
self.output_prefix_stripped = False
self._repl.write(input + '\n')
while not self.stop_flag:
sleep(0.05)
# Allows for interactive streaming of output
if not self.stop_flag:
powershell_message = self.output
self.output = ''
if powershell_message != '':
yield powershell_message
yield self.output
finally:
self.runCmdLock.release()
def handle_repl_output(self):
"""Returns new data from Repl and bool indicating if Repl is still
working"""
if self.stop_flag:
return True
try:
while True:
packet = self._repl_reader.queue.get_nowait()
if packet is None:
return False
self.write(packet)
except queue.Empty:
return True
def update_view_loop(self):
is_still_working = self.handle_repl_output()
if is_still_working:
self.timer = Timer(0.1, self.update_view_loop)
self.timer.start()
else:
self.write("\n***Repl Killed***\n""")
def write(self, packet):
# this is a hack to detect when we stop processing this input
if packet.endswith('^'):
self.stop_flag = True
self.output += packet[:-1]
return
self.output += packet
if not self.output_prefix_stripped and len(self.output) >= self.expected_output_len:
if self.output[:self.expected_output_len] != self.expected_output_prefix:
print("Unexpected prefix: %r : Expected %r" % (
self.output[:self.expected_output_len], self.expected_output_prefix
))
else:
self.output_prefix_stripped = True
self.output = self.output[self.expected_output_len:]
| 33.17094
| 105
| 0.569441
|
eccbf7f386a7fb6585eaaa989f99dfa691b23b9d
| 2,256
|
py
|
Python
|
ceph_deploy/gatherkeys.py
|
zidarsk8/ceph-deploy
|
e286d7d0cf6e161708909c91ea7f99aacf08c7c4
|
[
"MIT"
] | 1
|
2018-01-03T03:13:24.000Z
|
2018-01-03T03:13:24.000Z
|
ceph_deploy/gatherkeys.py
|
zidarsk8/ceph-deploy
|
e286d7d0cf6e161708909c91ea7f99aacf08c7c4
|
[
"MIT"
] | null | null | null |
ceph_deploy/gatherkeys.py
|
zidarsk8/ceph-deploy
|
e286d7d0cf6e161708909c91ea7f99aacf08c7c4
|
[
"MIT"
] | null | null | null |
import os.path
import logging
from .cliutil import priority
from . import hosts
LOG = logging.getLogger(__name__)
def fetch_file(args, frompath, topath, _hosts):
if os.path.exists(topath):
LOG.debug('Have %s', topath)
return True
else:
for hostname in _hosts:
LOG.debug('Checking %s for %s', hostname, frompath)
distro = hosts.get(hostname, username=args.username)
key = distro.conn.remote_module.get_file(
frompath.format(hostname=hostname)
)
if key is not None:
LOG.debug('Got %s key from %s.', topath, hostname)
with file(topath, 'w') as f:
f.write(key)
return True
distro.conn.exit()
LOG.warning('Unable to find %s on %s', frompath, _hosts)
return False
def gatherkeys(args):
ret = 0
# client.admin
r = fetch_file(
args=args,
frompath='/etc/ceph/{cluster}.client.admin.keyring'.format(
cluster=args.cluster),
topath='{cluster}.client.admin.keyring'.format(
cluster=args.cluster),
_hosts=args.mon,
)
if not r:
ret = 1
# mon.
r = fetch_file(
args=args,
frompath='/var/lib/ceph/mon/%s-{hostname}/keyring' % args.cluster,
topath='{cluster}.mon.keyring'.format(cluster=args.cluster),
_hosts=args.mon,
)
if not r:
ret = 1
# bootstrap
for what in ['osd', 'mds']:
r = fetch_file(
args=args,
frompath='/var/lib/ceph/bootstrap-{what}/{cluster}.keyring'.format(
cluster=args.cluster,
what=what),
topath='{cluster}.bootstrap-{what}.keyring'.format(
cluster=args.cluster,
what=what),
_hosts=args.mon,
)
if not r:
ret = 1
return ret
@priority(40)
def make(parser):
"""
Gather authentication keys for provisioning new nodes.
"""
parser.add_argument(
'mon',
metavar='HOST',
nargs='+',
help='monitor host to pull keys from',
)
parser.set_defaults(
func=gatherkeys,
)
| 25.066667
| 79
| 0.537234
|
53708cdbd4d6ccf80a8d9cd9d2c8497cef8067b0
| 15,308
|
py
|
Python
|
python/ccxt/async_support/btcbox.py
|
bmschwartz/ccxt
|
9197b497ce765251efcd3ca2e750f78edd7c20c6
|
[
"MIT"
] | 2
|
2019-07-15T22:39:54.000Z
|
2021-05-15T16:13:00.000Z
|
python/ccxt/async_support/btcbox.py
|
bmschwartz/ccxt
|
9197b497ce765251efcd3ca2e750f78edd7c20c6
|
[
"MIT"
] | 3
|
2021-06-26T15:32:50.000Z
|
2021-06-28T12:07:01.000Z
|
python/ccxt/async_support/btcbox.py
|
bmschwartz/ccxt
|
9197b497ce765251efcd3ca2e750f78edd7c20c6
|
[
"MIT"
] | 2
|
2020-09-08T01:41:24.000Z
|
2021-04-30T00:07:59.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import InvalidNonce
from ccxt.base.precise import Precise
class btcbox(Exchange):
def describe(self):
return self.deep_extend(super(btcbox, self).describe(), {
'id': 'btcbox',
'name': 'BtcBox',
'countries': ['JP'],
'rateLimit': 1000,
'version': 'v1',
'has': {
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'fetchBalance': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTickers': False,
'fetchTrades': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/87327317-98c55400-c53c-11ea-9a11-81f7d951cc74.jpg',
'api': 'https://www.btcbox.co.jp/api',
'www': 'https://www.btcbox.co.jp/',
'doc': 'https://blog.btcbox.jp/en/archives/8762',
'fees': 'https://support.btcbox.co.jp/hc/en-us/articles/360001235694-Fees-introduction',
},
'api': {
'public': {
'get': [
'depth',
'orders',
'ticker',
],
},
'private': {
'post': [
'balance',
'trade_add',
'trade_cancel',
'trade_list',
'trade_view',
'wallet',
],
},
},
'markets': {
'BTC/JPY': {'id': 'btc', 'symbol': 'BTC/JPY', 'base': 'BTC', 'quote': 'JPY', 'baseId': 'btc', 'quoteId': 'jpy', 'taker': 0.05 / 100, 'maker': 0.05 / 100},
'ETH/JPY': {'id': 'eth', 'symbol': 'ETH/JPY', 'base': 'ETH', 'quote': 'JPY', 'baseId': 'eth', 'quoteId': 'jpy', 'taker': 0.10 / 100, 'maker': 0.10 / 100},
'LTC/JPY': {'id': 'ltc', 'symbol': 'LTC/JPY', 'base': 'LTC', 'quote': 'JPY', 'baseId': 'ltc', 'quoteId': 'jpy', 'taker': 0.10 / 100, 'maker': 0.10 / 100},
'BCH/JPY': {'id': 'bch', 'symbol': 'BCH/JPY', 'base': 'BCH', 'quote': 'JPY', 'baseId': 'bch', 'quoteId': 'jpy', 'taker': 0.10 / 100, 'maker': 0.10 / 100},
},
'exceptions': {
'104': AuthenticationError,
'105': PermissionDenied,
'106': InvalidNonce,
'107': InvalidOrder, # price should be an integer
'200': InsufficientFunds,
'201': InvalidOrder, # amount too small
'202': InvalidOrder, # price should be [0 : 1000000]
'203': OrderNotFound,
'401': OrderNotFound, # cancel canceled, closed or non-existent order
'402': DDoSProtection,
},
})
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostBalance(params)
result = {'info': response}
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
currency = self.currency(code)
currencyId = currency['id']
free = currencyId + '_balance'
if free in response:
account = self.account()
used = currencyId + '_lock'
account['free'] = self.safe_string(response, free)
account['used'] = self.safe_string(response, used)
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
numSymbols = len(self.symbols)
if numSymbols > 1:
request['coin'] = market['baseId']
response = await self.publicGetDepth(self.extend(request, params))
return self.parse_order_book(response, symbol)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market is not None:
symbol = market['symbol']
last = self.safe_number(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': self.safe_number(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_number(ticker, 'vol'),
'quoteVolume': self.safe_number(ticker, 'volume'),
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
numSymbols = len(self.symbols)
if numSymbols > 1:
request['coin'] = market['baseId']
response = await self.publicGetTicker(self.extend(request, params))
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
timestamp = self.safe_timestamp(trade, 'date')
symbol = None
if market is not None:
symbol = market['symbol']
id = self.safe_string(trade, 'tid')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
type = None
side = self.safe_string(trade, 'type')
return {
'info': trade,
'id': id,
'order': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
numSymbols = len(self.symbols)
if numSymbols > 1:
request['coin'] = market['baseId']
response = await self.publicGetOrders(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'amount': amount,
'price': price,
'type': side,
'coin': market['baseId'],
}
response = await self.privatePostTradeAdd(self.extend(request, params))
#
# {
# "result":true,
# "id":"11"
# }
#
return self.parse_order(response, market)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
# a special case for btcbox – default symbol is BTC/JPY
if symbol is None:
symbol = 'BTC/JPY'
market = self.market(symbol)
request = {
'id': id,
'coin': market['baseId'],
}
response = await self.privatePostTradeCancel(self.extend(request, params))
#
# {"result":true, "id":"11"}
#
return self.parse_order(response, market)
def parse_order_status(self, status):
statuses = {
# TODO: complete list
'part': 'open', # partially or not at all executed
'all': 'closed', # fully executed
'cancelled': 'canceled',
'closed': 'closed', # never encountered, seems to be bug in the doc
'no': 'closed', # not clarified in the docs...
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# {
# "id":11,
# "datetime":"2014-10-21 10:47:20",
# "type":"sell",
# "price":42000,
# "amount_original":1.2,
# "amount_outstanding":1.2,
# "status":"closed",
# "trades":[]
# }
#
id = self.safe_string(order, 'id')
datetimeString = self.safe_string(order, 'datetime')
timestamp = None
if datetimeString is not None:
timestamp = self.parse8601(order['datetime'] + '+09:00') # Tokyo time
amount = self.safe_number(order, 'amount_original')
remaining = self.safe_number(order, 'amount_outstanding')
price = self.safe_number(order, 'price')
# status is set by fetchOrder method only
status = self.parse_order_status(self.safe_string(order, 'status'))
# fetchOrders do not return status, use heuristic
if status is None:
if remaining is not None and remaining == 0:
status = 'closed'
trades = None # todo: self.parse_trades(order['trades'])
symbol = None
if market is not None:
symbol = market['symbol']
side = self.safe_string(order, 'type')
return self.safe_order({
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'amount': amount,
'remaining': remaining,
'filled': None,
'side': side,
'type': None,
'timeInForce': None,
'postOnly': None,
'status': status,
'symbol': symbol,
'price': price,
'stopPrice': None,
'cost': None,
'trades': trades,
'fee': None,
'info': order,
'average': None,
})
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
# a special case for btcbox – default symbol is BTC/JPY
if symbol is None:
symbol = 'BTC/JPY'
market = self.market(symbol)
request = self.extend({
'id': id,
'coin': market['baseId'],
}, params)
response = await self.privatePostTradeView(self.extend(request, params))
return self.parse_order(response, market)
async def fetch_orders_by_type(self, type, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
# a special case for btcbox – default symbol is BTC/JPY
if symbol is None:
symbol = 'BTC/JPY'
market = self.market(symbol)
request = {
'type': type, # 'open' or 'all'
'coin': market['baseId'],
}
response = await self.privatePostTradeList(self.extend(request, params))
orders = self.parse_orders(response, market, since, limit)
# status(open/closed/canceled) is None
# btcbox does not return status, but we know it's 'open' as we queried for open orders
if type == 'open':
for i in range(0, len(orders)):
orders[i]['status'] = 'open'
return orders
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_type('all', symbol, since, limit, params)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_type('open', symbol, since, limit, params)
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/' + path
if api == 'public':
if params:
url += '?' + self.urlencode(params)
else:
self.check_required_credentials()
nonce = str(self.nonce())
query = self.extend({
'key': self.apiKey,
'nonce': nonce,
}, params)
request = self.urlencode(query)
secret = self.hash(self.encode(self.secret))
query['signature'] = self.hmac(self.encode(request), self.encode(secret))
body = self.urlencode(query)
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # resort to defaultErrorHandler
# typical error response: {"result":false,"code":"401"}
if httpCode >= 400:
return # resort to defaultErrorHandler
result = self.safe_value(response, 'result')
if result is None or result is True:
return # either public API(no error codes expected) or success
code = self.safe_value(response, 'code')
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions, code, feedback)
raise ExchangeError(feedback) # unknown message
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None, config={}, context={}):
response = await self.fetch2(path, api, method, params, headers, body, config, context)
if isinstance(response, basestring):
# sometimes the exchange returns whitespace prepended to json
response = self.strip(response)
if not self.is_json_encoded_object(response):
raise ExchangeError(self.id + ' ' + response)
response = json.loads(response)
return response
| 39.658031
| 170
| 0.532009
|
51e0251319938e5451326d1ec5a6cb80efbe706b
| 12,371
|
py
|
Python
|
openapps/userialbridge/userialbridge_tx_serial.py
|
renfernand/openwsn-fw
|
1d85821f455feae6f0bb4d72bdbd2c3f1a5379de
|
[
"BSD-3-Clause"
] | null | null | null |
openapps/userialbridge/userialbridge_tx_serial.py
|
renfernand/openwsn-fw
|
1d85821f455feae6f0bb4d72bdbd2c3f1a5379de
|
[
"BSD-3-Clause"
] | null | null | null |
openapps/userialbridge/userialbridge_tx_serial.py
|
renfernand/openwsn-fw
|
1d85821f455feae6f0bb4d72bdbd2c3f1a5379de
|
[
"BSD-3-Clause"
] | null | null | null |
import serial
import threading
import traceback
import time
import logging
import socket
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
#============================ defines =========================================
COMPORT = 'COM5'
MODE = 'udp' # 'periodic' or 'udp'
#============================ classes =========================================
class PeriodicTransmitter(threading.Thread):
def __init__(self,moteProbe):
self.moteProbe = moteProbe
self.counter = 0
threading.Thread.__init__(self)
self.name = "PeriodicTransmitter"
self.start()
def run(self):
while True:
time.sleep(1.000)
msgToSend = 'B'+chr(ord('a')+self.counter)*32
self.moteProbe.send(msgToSend)
self.counter = (self.counter+1)%26
log.debug('trigger sent {0}'.format(msgToSend))
class UdpTransmitter(threading.Thread):
def __init__(self,moteProbe):
self.moteProbe = moteProbe
threading.Thread.__init__(self)
self.name = "UdpTransmitter"
self.start()
def run(self):
sock = socket.socket(
socket.AF_INET, # IPv4
socket.SOCK_DGRAM, # UDP
)
sock.bind(('', 3000))
while True:
(msgRx,addr) = sock.recvfrom(1024)
assert len(msgRx)==32
self.moteProbe.send('B'+msgRx)
class MoteProbe(threading.Thread):
XOFF = 0x13
XON = 0x11
XONXOFF_ESCAPE = 0x12
XONXOFF_MASK = 0x10
# XOFF is transmitted as [XONXOFF_ESCAPE, XOFF^XONXOFF_MASK]==[0x12,0x13^0x10]==[0x12,0x03]
# XON is transmitted as [XONXOFF_ESCAPE, XON^XONXOFF_MASK]==[0x12,0x11^0x10]==[0x12,0x01]
# XONXOFF_ESCAPE is transmitted as [XONXOFF_ESCAPE, XONXOFF_ESCAPE^XONXOFF_MASK]==[0x12,0x12^0x10]==[0x12,0x02]
def __init__(self,serialport=None):
# store params
self.serialport = serialport
# local variables
self.hdlc = OpenHdlc()
self.lastRxByte = self.hdlc.HDLC_FLAG
self.busyReceiving = False
self.inputBuf = ''
self.dataLock = threading.Lock()
self.dataToSend = None
self.num_rx_position = 0
self.num_tx_action = 0
self.polulu_ADs = []
self.buf_positions = []
self.buf_host_processing_delay = []
self.buf_time_since_last = []
self.pid_P = 0
self.pid_I = 0
self.pid_D = 0
self.last_pid_P = 0
# flag to permit exit from read loop
self.goOn = True
# initialize the parent class
threading.Thread.__init__(self)
# give this thread a name
self.name = 'moteProbe@'+self.serialport
# start myself
self.start()
def formatBuf(self,buf):
if type(buf)==str:
buf = [ord(b) for b in buf]
return '({0:>2}B) {1}'.format(
len(buf),
'-'.join(["%02x" % b for b in buf]),
)
#======================== thread ==========================================
def run(self):
try:
while self.goOn: # open serial port
self.serial = serial.Serial(
port = self.serialport,
baudrate = 115200,
xonxoff = True,
)
while self.goOn: # read bytes from serial port
try:
rxByte = self.serial.read(1)
except Exception as err:
log.error(err)
time.sleep(1)
break
else:
if (
(not self.busyReceiving) and
self.lastRxByte==self.hdlc.HDLC_FLAG and
rxByte!=self.hdlc.HDLC_FLAG
):
# start of frame
self.busyReceiving = True
self.xonxoffEscaping = False
self.inputBuf = self.hdlc.HDLC_FLAG
self._addToInputBuf(rxByte)
elif (
self.busyReceiving and
rxByte!=self.hdlc.HDLC_FLAG
):
# middle of frame
self._addToInputBuf(rxByte)
elif (
self.busyReceiving and
rxByte==self.hdlc.HDLC_FLAG
):
# end of frame
self.busyReceiving = False
self._addToInputBuf(rxByte)
log.debug('RX: '+self.formatBuf(self.inputBuf))
try:
tempBuf = self.inputBuf
self.inputBuf = self.hdlc.dehdlcify(self.inputBuf)
except Exception as err:
log.error('{0}: invalid serial frame: {2} {1}'.format(self.name, err, tempBuf))
else:
self._handle_input(self.inputBuf)
self.lastRxByte = rxByte
except Exception:
log.critical(traceback.print_exc())
#======================== public ==========================================
def send(self,outputBuf):
# frame with HDLC
hdlcData = self.hdlc.hdlcify(outputBuf)
# write to serial
self.serial.write(hdlcData)
def close(self):
self.goOn = False
#======================== private =========================================
def _addToInputBuf(self,byte):
if byte==chr(self.XONXOFF_ESCAPE):
self.xonxoffEscaping = True
else:
if self.xonxoffEscaping==True:
self.inputBuf += chr(ord(byte)^self.XONXOFF_MASK)
self.xonxoffEscaping=False
else:
self.inputBuf += byte
def _handle_input(self,inputBuf):
if inputBuf[0] == ord('E'):
if inputBuf[4]==0x39:
print '{0}: CRC error'.format(time.time())
else:
print inputBuf
class OpenHdlc(object):
HDLC_FLAG = '\x7e'
HDLC_FLAG_ESCAPED = '\x5e'
HDLC_ESCAPE = '\x7d'
HDLC_ESCAPE_ESCAPED = '\x5d'
HDLC_CRCINIT = 0xffff
HDLC_CRCGOOD = 0xf0b8
FCS16TAB = (
0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78,
)
#============================ public ======================================
def hdlcify(self,inBuf):
'''
Build an hdlc frame.
Use 0x00 for both addr byte, and control byte.
'''
# make copy of input
outBuf = inBuf[:]
# calculate CRC
crc = self.HDLC_CRCINIT
for b in outBuf:
crc = self._crcIteration(crc,b)
crc = 0xffff-crc
# append CRC
outBuf = outBuf + chr(crc & 0xff) + chr((crc & 0xff00) >> 8)
# stuff bytes
outBuf = outBuf.replace(self.HDLC_ESCAPE, self.HDLC_ESCAPE+self.HDLC_ESCAPE_ESCAPED)
outBuf = outBuf.replace(self.HDLC_FLAG, self.HDLC_ESCAPE+self.HDLC_FLAG_ESCAPED)
# add flags
outBuf = self.HDLC_FLAG + outBuf + self.HDLC_FLAG
return outBuf
def dehdlcify(self,inBuf):
'''
Parse an hdlc frame.
:returns: the extracted frame, or -1 if wrong checksum
'''
assert inBuf[ 0]==self.HDLC_FLAG
assert inBuf[-1]==self.HDLC_FLAG
# make copy of input
outBuf = inBuf[:]
# remove flags
outBuf = outBuf[1:-1]
# unstuff
outBuf = outBuf.replace(self.HDLC_ESCAPE+self.HDLC_FLAG_ESCAPED, self.HDLC_FLAG)
outBuf = outBuf.replace(self.HDLC_ESCAPE+self.HDLC_ESCAPE_ESCAPED, self.HDLC_ESCAPE)
if len(outBuf)<2:
raise Exception('packet too short')
# check CRC
crc = self.HDLC_CRCINIT
for b in outBuf:
crc = self._crcIteration(crc,b)
if crc!=self.HDLC_CRCGOOD:
raise Exception('wrong CRC')
# remove CRC
outBuf = outBuf[:-2] # remove CRC
return [ord(b) for b in outBuf]
#============================ private =====================================
def _crcIteration(self,crc,b):
return (crc>>8)^self.FCS16TAB[((crc^(ord(b))) & 0xff)]
#============================ main ============================================
def main():
# start the threads
mp = MoteProbe(COMPORT)
if MODE=='periodic':
t = PeriodicTransmitter(mp)
elif MODE=='udp':
t = UdpTransmitter(mp)
else:
raise SystemError()
if __name__=="__main__":
main()
| 37.716463
| 116
| 0.480317
|
c0ff35d293d3e7af76a836e6a960e5d881d845b8
| 11,794
|
py
|
Python
|
preprocessing/annotate_variants/annot_variants.py
|
keoughkath/AlleleAnalyzer
|
854f16edf3053e9319c73a23cb83b93934ad0cd5
|
[
"MIT"
] | 11
|
2018-09-21T16:49:19.000Z
|
2022-03-03T19:29:16.000Z
|
preprocessing/annotate_variants/annot_variants.py
|
keoughkath/ExcisionFinder
|
a33adc53f515fe5f00e519b703dd9abf006b2804
|
[
"MIT"
] | 26
|
2018-03-01T04:58:17.000Z
|
2018-08-30T20:49:30.000Z
|
preprocessing/annotate_variants/annot_variants.py
|
keoughkath/AlleleAnalyzer
|
854f16edf3053e9319c73a23cb83b93934ad0cd5
|
[
"MIT"
] | 1
|
2020-01-28T20:06:02.000Z
|
2020-01-28T20:06:02.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
annot_variants.py generates a dataframe that stores annotations for each variant in
the specified locus and genome that tell us whether the variant generates allele-specific
sgRNA sites for the Cas variety/varieties specified. Written in Python v 3.6.1.
Kathleen Keough et al. 2018.
Usage:
annot_variants.py [-v] <gens_file> <cas> <pams_dir> <ref_genome_fasta> <out> [--guide_len=<S>]
annot_variants.py -C | --cas-list
Arguments:
gens_file Explicit genotypes file generated by get_gens_df.py
cas Types of cas, comma-separated.
pams_dir Directory where pam locations in ref_genome are located.
ref_genome_fasta Fasta file for reference genome.
out Prefix for output files.
Options:
-C --cas-list List available cas types and exits.
-v Verbose mode.
--guide_len=<S> Guide length, commonly 20 bp, for annotating guides near a PAM [default: 20].
"""
import pandas as pd
import numpy as np
from docopt import docopt
import os, sys, logging
from collections import Counter
from pyfaidx import Fasta
import regex
__version__ = "1.0.0"
# 3 and 5 prime cas lists
# Get apsolute path for gen_targ_dfs.py, and edit it for cas_object.py
ef_path = os.path.dirname(os.path.realpath(__file__))
cas_obj_path = ef_path.replace("preprocessing/annotate_variants", "scripts/")
sys.path.append(cas_obj_path)
metadata_path = ef_path.replace("annotate_variants", "")
sys.path.append(metadata_path)
# Import cas_object
import cas_object as cas_obj
from get_metadata import add_metadata
def norm_chr(chrom_str, gens_chrom):
"""
Returns the chromosome string that matches the chromosome annotation of the input gens file
"""
chrom_str = str(chrom_str)
if not gens_chrom:
return chrom_str.replace("chr", "")
elif gens_chrom and not chrom_str.startswith("chr"):
return "chr" + chrom_str
else:
return chrom_str
def get_range_upstream(pam_pos, guide_len):
"""
Get positions N bp upstream, i.e. for forward 3' PAMs or reverse 5' PAMs
:param pam_pos: position of PAM, int.
:return: sgRNA seed region positions, set of ints.
"""
sgrna = set(range(pam_pos - guide_len, pam_pos))
return sgrna
def get_range_downstream(pam_pos, guide_len):
"""
Get positions N bp downstream, i.e. for forward 5' PAMs or reverse 3' PAMs
:param pam_pos: position of PAM, int.
:return: sgRNA seed region positions, set of ints.
"""
sgrna = set(range(pam_pos + 1, pam_pos + (guide_len + 1)))
return sgrna
def find_spec_pams(cas_obj, python_string, orient="3prime"):
# orient specifies whether this is a 3prime PAM (e.g. Cas9, PAM seq 3' of sgRNA)
# or a 5prime PAM (e.g. cpf1, PAM 5' of sgRNA)
# get sequence
sequence = python_string
# get PAM sites (the five prime three prime thing will need to be reversed for cpf1)
def get_pam_fiveprime(pam_regex, sequence):
starts = set()
for pam in regex.finditer(
pam_regex, sequence, regex.IGNORECASE, overlapped=True
):
starts.add(pam.start() + 1)
return set(starts)
def get_pam_threeprime(pam_regex, sequence):
starts = set()
for pam in regex.finditer(
pam_regex, sequence, regex.IGNORECASE, overlapped=True
):
starts.add(pam.end())
return set(starts)
if orient == "3prime":
for_starts = get_pam_fiveprime(cas_obj.forwardPam_regex(), sequence)
rev_starts = get_pam_threeprime(cas_obj.reversePam_regex(), sequence)
elif orient == "5prime":
for_starts = get_pam_threeprime(cas_obj.forwardPam_regex(), sequence)
rev_starts = get_pam_fiveprime(cas_obj.reversePam_regex(), sequence)
return (for_starts, rev_starts)
def makes_breaks_pam(cas_obj, chrom, pos, ref, alt, ref_genome):
"""
Determine if cas in question makes or breaks PAM sites.
:param chrom: chromosome, int.
:param pos: position, int.
:param ref: ref genotype, str.
:param alt: alt genotype, str.
:param ref_genome: ref_genome fasta file, fasta.
:return:
"""
makes_pam = False
breaks_pam = False
var = pos
if "<" in alt:
return makes_pam, breaks_pam
# if alt is not a special case (CNV or SV), continue checking the new sequence
ref_seq = ref_genome[str(chrom)][pos - 11 : pos + 10]
if len(ref) > len(alt): # handles deletions
alt_seq = (
ref_genome[str(chrom)][var - 11 : var - 1]
+ alt
+ ref_genome[str(chrom)][
var + len(ref) + len(alt) - 2 : var + len(ref) + len(alt) - 2 + 10
]
)
else:
alt_seq = (
ref_genome[str(chrom)][var - 11 : var - 1]
+ alt
+ ref_genome[str(chrom)][var + len(alt) - 1 : var + len(alt) - 1 + 10]
)
if cas_obj.primeness == "5'":
ref_pams_for, ref_pams_rev = find_spec_pams(cas_obj, ref_seq, orient="5prime")
alt_pams_for, alt_pams_rev = find_spec_pams(cas_obj, alt_seq, orient="5prime")
else:
ref_pams_for, ref_pams_rev = find_spec_pams(cas_obj, ref_seq)
alt_pams_for, alt_pams_rev = find_spec_pams(cas_obj, alt_seq)
if (
len(alt_pams_for) - len(ref_pams_for) > 0
or len(alt_pams_rev) - len(ref_pams_rev) > 0
):
makes_pam = True
elif (
len(ref_pams_for) - len(alt_pams_for) > 0
or len(ref_pams_rev) - len(alt_pams_rev) > 0
):
breaks_pam = True
return makes_pam, breaks_pam
def get_made_broke_pams(df, chrom, ref_genome):
"""
Apply makes_breaks_pams to a df.
:param df: gens df generated by get_chr_tables.sh, available on EF github.
:param chrom: chromosome currently being analyzed.
:param ref_genome: ref_genome fasta, pyfaidx format.
:return: dataframe with indicators for whether each variant makes/breaks PAMs, pd df.
"""
FULL_CAS_LIST = cas_obj.get_cas_list(os.path.join(cas_obj_path, "CAS_LIST.txt"))
for cas in cas_list:
if cas not in FULL_CAS_LIST:
logging.info(f"Skipping {cas}, not in CAS_LIST.txt")
continue
current_cas = cas_obj.get_cas_enzyme(
cas, os.path.join(cas_obj_path, "CAS_LIST.txt")
)
makes, breaks = zip(
*df.apply(
lambda row: makes_breaks_pam(
current_cas, chrom, row["pos"], row["ref"], row["alt"], ref_genome
),
axis=1,
)
)
df[f"makes_{cas}"] = makes
df[f"breaks_{cas}"] = breaks
return df
def split_gens(gens_df, chroms):
"""
Takes in a gens file with multiple loci, and splits it based on chromosome.
:param gens_df: gens dataframe.
:param chroms: list of chromosome notations.
:return: list of dataframse, each dataframe with one chromosome notation at the CHROM column.
"""
return [gens_df.loc[gens_df["chrom"] == c] for c in chroms]
def main(args):
logging.info(args)
out = args["<out>"]
pams_dir = args["<pams_dir>"]
gens = args["<gens_file>"]
guide_len = int(args["--guide_len"])
ref_genome = Fasta(args["<ref_genome_fasta>"], as_raw=True)
global cas_list
cas_list = list(args["<cas>"].split(","))
# Read in gens and chroms file, and see if gens file needs to be split.
gens = pd.read_hdf(gens, "all")
if gens.empty:
print('No variants in this region.')
exit()
chroms = dict(Counter(gens.chrom)).keys()
if len(chroms) > 1:
gens = split_gens(gens, list(chroms))
else:
gens = [gens]
fasta_chrom = list(ref_genome.keys())[0].startswith("chr")
chroms = [norm_chr(ch, fasta_chrom) for ch in list(chroms)]
# # Add check to make sure the correct FASTA file was loaded. - this is too glitchy
# if set(chroms) != set(list(ref_genome.keys())):
# logging.error(f"{args['<gens_file>']} chromosomes/notations differ from {args['<ref_genome_fasta>']}: {chroms} and {list(ref_genome.keys())}.")
# exit(1)
# save locations of PAM proximal variants to dictionary
pam_prox_vars = {}
# get variants within sgRNA region for 3 prime PAMs (20 bp upstream of for pos and vice versa)
FULL_CAS_LIST = cas_obj.get_cas_list(os.path.join(cas_obj_path, "CAS_LIST.txt"))
for cas in cas_list:
if cas not in FULL_CAS_LIST:
logging.info(f"Skipping {cas}, not in CAS_LIST.txt")
cas_list.remove(cas)
combined_df = []
for i, chrom in enumerate(chroms):
chr_variants = set(gens[i]["pos"].tolist())
for cas in cas_list:
current_cas = cas_obj.get_cas_enzyme(
cas, os.path.join(cas_obj_path, "CAS_LIST.txt")
)
logging.info(f"Evaluating {current_cas.name} at {chrom}.")
cas_prox_vars = []
pam_dict = {}
pam_for_pos = np.load(
os.path.join(pams_dir, f"{chrom}_{cas}_pam_sites_for.npy")
).tolist()
pam_rev_pos = np.load(
os.path.join(pams_dir, f"{chrom}_{cas}_pam_sites_rev.npy")
).tolist()
if current_cas.primeness == "3'":
for pos in pam_for_pos:
prox_vars = set(get_range_upstream(pos, guide_len)) & chr_variants
cas_prox_vars.extend(prox_vars)
pam_dict[pos] = prox_vars
for pos in pam_rev_pos:
prox_vars = set(get_range_downstream(pos, guide_len)) & chr_variants
cas_prox_vars.extend(prox_vars)
pam_dict[pos] = prox_vars
elif current_cas.primeness == "5'":
for pos in pam_for_pos:
prox_vars = set(get_range_downstream(pos, guide_len)) & chr_variants
cas_prox_vars.extend(prox_vars)
pam_dict[pos] = prox_vars
for pos in pam_rev_pos:
prox_vars = set(get_range_upstream(pos, guide_len)) & chr_variants
cas_prox_vars.extend(prox_vars)
pam_dict[pos] = prox_vars
pam_prox_vars[cas] = cas_prox_vars
chrdf = get_made_broke_pams(gens[i], chrom, ref_genome)
for cas in cas_list:
# print(cas)
spec_pam_prox_vars = pam_prox_vars[cas]
chrdf[f"var_near_{cas}"] = chrdf["pos"].isin(spec_pam_prox_vars)
cas_cols = []
for cas in cas_list:
prelim_cols = [
w.replace("cas", cas)
for w in ["makes_cas", "breaks_cas", "var_near_cas"]
]
cas_cols.extend(prelim_cols)
keepcols = ["chrom", "pos", "ref", "alt"] + cas_cols
chrdf = chrdf[keepcols]
combined_df.append(chrdf)
combined_df = pd.concat(combined_df)
combined_df.to_hdf(
f"{out}.h5",
"all",
mode="w",
format="table",
data_columns=True,
complib="blosc",
)
add_metadata(
f"{out}.h5", args, os.path.basename(__file__), __version__, "Annotation"
)
logging.info("Done.")
if __name__ == "__main__":
arguments = docopt(__doc__, version=__version__)
if arguments["--cas-list"]:
cas_obj.print_cas_types()
exit()
if arguments["-v"]:
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s %(name)s:%(levelname)s ]%(message)s",
)
else:
logging.basicConfig(
level=logging.ERROR,
format="[%(asctime)s %(name)s:%(levelname)s ]%(message)s",
)
main(arguments)
| 33.988473
| 153
| 0.614974
|
1f1a69b171e021487051c4be04265c9272f44e8f
| 539
|
py
|
Python
|
Python/ArmstrongNumber.py
|
notakbuzz/All_Program_helper
|
f87aa346f1536854912c00989aab63d0eaa68cc9
|
[
"MIT"
] | 16
|
2021-10-03T11:15:49.000Z
|
2021-10-31T04:40:24.000Z
|
Python/ArmstrongNumber.py
|
notakbuzz/All_Program_helper
|
f87aa346f1536854912c00989aab63d0eaa68cc9
|
[
"MIT"
] | 232
|
2021-10-02T14:51:43.000Z
|
2021-11-14T08:23:27.000Z
|
Python/ArmstrongNumber.py
|
notakbuzz/All_Program_helper
|
f87aa346f1536854912c00989aab63d0eaa68cc9
|
[
"MIT"
] | 166
|
2021-10-02T13:56:34.000Z
|
2021-10-31T17:56:34.000Z
|
# Python Program to ask the user for a range and display all Armstrong numbers in that interval
# take input from the user
lower = int(input("Enter lower range: "))
upper = int(input("Enter upper range: "))
for num in range(lower,upper + 1):
# initialize sum
sum = 0
# find the sum of the cube of each digit
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** 3
temp //= 10
if num == sum:
print(num)
| 26.95
| 99
| 0.525046
|
87dc1d293f67f137dca76cf9419eaa94e6ab8c05
| 531
|
py
|
Python
|
tools/tileset/tileset.py
|
stoiandan/OpenPokemonRed
|
3ce2483d4620255c7fe182012f2821be3121c375
|
[
"MIT"
] | 204
|
2020-11-04T07:32:28.000Z
|
2022-01-16T20:39:22.000Z
|
tools/tileset/tileset.py
|
stoiandan/OpenPokemonRed
|
3ce2483d4620255c7fe182012f2821be3121c375
|
[
"MIT"
] | 11
|
2020-10-26T07:53:24.000Z
|
2021-01-07T19:03:09.000Z
|
tools/tileset/tileset.py
|
stoiandan/OpenPokemonRed
|
3ce2483d4620255c7fe182012f2821be3121c375
|
[
"MIT"
] | 14
|
2020-11-21T22:02:28.000Z
|
2022-02-15T15:26:55.000Z
|
import cv2
import os
import shutil
if os.path.exists("result"):
shutil.rmtree("result")
os.mkdir("result")
# https://www.spriters-resource.com/fullview/63033/
img = cv2.imread("tilesets.png")
x0 = 2
width = 16
def height(i: int) -> int:
if i == 18:
return 2
return 6
def calc_y0(i: int) -> int:
return 176 + 58*i
for i in range(19):
x0 = 2
y0 = calc_y0(i)
x1 = x0 + width*8
y1 = y0 + height(i)*8
tile = img[y0:y1, x0:x1]
cv2.imwrite("./result/tileset_{}.png".format(i), tile)
| 17.129032
| 58
| 0.59887
|
e24e3030594663fc3dae8c2faf418de5aadfe661
| 69,786
|
py
|
Python
|
nuagevsdsim/common/utils.py
|
pdellaert/vspk-sim
|
459a84366a9bdde82d74aca18ea866e3d55d62ee
|
[
"BSD-3-Clause"
] | null | null | null |
nuagevsdsim/common/utils.py
|
pdellaert/vspk-sim
|
459a84366a9bdde82d74aca18ea866e3d55d62ee
|
[
"BSD-3-Clause"
] | null | null | null |
nuagevsdsim/common/utils.py
|
pdellaert/vspk-sim
|
459a84366a9bdde82d74aca18ea866e3d55d62ee
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017, Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Nuage VSD Sim utils
"""
import ConfigParser
import logging
import re
import sys
import uuid
from vspk import v5_0 as vsdk
NUAGE_API_DATA = {
'ROOT_UUIDS': {
'csproot_user': '',
'csp_enterprise': ''
},
'addressmap': {},
'patnatpool_addressmap': {'_TYPE': 'child'},
'addressrange': {},
'l2domaintemplate_addressrange': {'_TYPE': 'child'},
'subnet_addressrange': {'_TYPE': 'child'},
'sharednetworkresource_addressrange': {'_TYPE': 'child'},
'l2domain_addressrange': {'_TYPE': 'child'},
'subnettemplate_addressrange': {'_TYPE': 'child'},
'aggregatemetadata': {},
'vport_aggregatemetadata': {'_TYPE': 'child'},
'alarm': {},
'domain_alarm': {'_TYPE': 'child'},
'vsgredundantport_alarm': {'_TYPE': 'child'},
'vm_alarm': {'_TYPE': 'child'},
'ssidconnection_alarm': {'_TYPE': 'child'},
'vsc_alarm': {'_TYPE': 'child'},
'vsd_alarm': {'_TYPE': 'child'},
'redundancygroup_alarm': {'_TYPE': 'child'},
'vport_alarm': {'_TYPE': 'child'},
'gateway_alarm': {'_TYPE': 'child'},
'tca_alarm': {'_TYPE': 'child'},
'container_alarm': {'_TYPE': 'child'},
'service_alarm': {'_TYPE': 'child'},
'port_alarm': {'_TYPE': 'child'},
'nsgredundancygroup_alarm': {'_TYPE': 'child'},
'shuntlink_alarm': {'_TYPE': 'child'},
'vrs_alarm': {'_TYPE': 'child'},
'hsc_alarm': {'_TYPE': 'child'},
'l2domain_alarm': {'_TYPE': 'child'},
'vlan_alarm': {'_TYPE': 'child'},
'wirelessport_alarm': {'_TYPE': 'child'},
'ikegatewayconnection_alarm': {'_TYPE': 'child'},
'nsport_alarm': {'_TYPE': 'child'},
'nsgateway_alarm': {'_TYPE': 'child'},
'enterprise_alarm': {'_TYPE': 'child'},
'allalarm': {},
'enterprise_allalarm': {'_TYPE': 'child'},
'allgateway': {},
'allredundancygroup': {},
'application': {},
'domain_application': {'_TYPE': 'child'},
'enterprise_application': {'_TYPE': 'child'},
'l2domain_application': {'_TYPE': 'child'},
'l7applicationsignature_application': {'_TYPE': 'child'},
'applicationbinding': {},
'applicationperformancemanagement_applicationbinding': {'_TYPE': 'child'},
'application_applicationbinding': {'_TYPE': 'member'},
'applicationperformancemanagement': {},
'performancemonitor_applicationperformancemanagement': {'_TYPE': 'member'},
'enterprise_applicationperformancemanagement': {'_TYPE': 'child'},
'applicationperformancemanagementbinding': {},
'applicationperformancemanagement_applicationperformancemanagementbinding': {'_TYPE': 'member'},
'domain_applicationperformancemanagementbinding': {'_TYPE': 'child'},
'l2domain_applicationperformancemanagementbinding': {'_TYPE': 'child'},
'autodiscoveredcluster': {},
'vcenterdatacenter_autodiscoveredcluster': {'_TYPE': 'child'},
'autodiscovereddatacenter': {},
'vcenter_autodiscovereddatacenter': {'_TYPE': 'child'},
'autodiscoveredgateway': {},
'autodiscoveredhypervisor': {},
'vcentercluster_autodiscoveredhypervisor': {'_TYPE': 'child'},
'vcenterdatacenter_autodiscoveredhypervisor': {'_TYPE': 'child'},
'avatar': {},
'user_avatar': {'_TYPE': 'child'},
'enterprise_avatar': {'_TYPE': 'child'},
'bfdsession': {},
'brconnection_bfdsession': {'_TYPE': 'child'},
'uplinkconnection_bfdsession': {'_TYPE': 'child'},
'bgpneighbor': {},
'subnet_bgpneighbor': {'_TYPE': 'child'},
'vport_bgpneighbor': {'_TYPE': 'child'},
'vlan_bgpneighbor': {'_TYPE': 'child'},
'bgppeer': {},
'hsc_bgppeer': {'_TYPE': 'child'},
'vsc_bgppeer': {'_TYPE': 'child'},
'bgpprofile': {},
'enterprise_bgpprofile': {'_TYPE': 'child'},
'bootstrap': {},
'nsgateway_bootstrap': {'_TYPE': 'child'},
'gateway_bootstrap': {'_TYPE': 'child'},
'bootstrapactivation': {},
'nsgateway_bootstrapactivation': {'_TYPE': 'child'},
'gateway_bootstrapactivation': {'_TYPE': 'child'},
'brconnection': {},
'vlan_brconnection': {'_TYPE': 'child'},
'vlantemplate_brconnection': {'_TYPE': 'child'},
'bridgeinterface': {},
'domain_bridgeinterface': {'_TYPE': 'child'},
'l2domain_bridgeinterface': {'_TYPE': 'child'},
'vport_bridgeinterface': {'_TYPE': 'child'},
'bulkstatistics': {},
'patnatpool_bulkstatistics': {'_TYPE': 'child'},
'captiveportalprofile': {},
'enterprise_captiveportalprofile': {'_TYPE': 'child'},
'certificate': {},
'cms': {},
'command': {},
'nsgateway_command': {'_TYPE': 'child'},
'component': {},
'vsd_component': {'_TYPE': 'child'},
'connectionendpoint': {},
'infrastructureaccessprofile_connectionendpoint': {'_TYPE': 'child'},
'container': {},
'subnet_container': {'_TYPE': 'child'},
'domain_container': {'_TYPE': 'child'},
'qos_container': {'_TYPE': 'child'},
'zone_container': {'_TYPE': 'child'},
'egressacltemplate_container': {'_TYPE': 'child'},
'l2domain_container': {'_TYPE': 'child'},
'vrs_container': {'_TYPE': 'child'},
'user_container': {'_TYPE': 'child'},
'enterprise_container': {'_TYPE': 'child'},
'vport_container': {'_TYPE': 'child'},
'ingressacltemplate_container': {'_TYPE': 'child'},
'containerinterface': {},
'subnet_containerinterface': {'_TYPE': 'child'},
'domain_containerinterface': {'_TYPE': 'child'},
'container_containerinterface': {'_TYPE': 'child'},
'zone_containerinterface': {'_TYPE': 'child'},
'l2domain_containerinterface': {'_TYPE': 'child'},
'vport_containerinterface': {'_TYPE': 'child'},
'containerresync': {},
'subnet_containerresync': {'_TYPE': 'child'},
'container_containerresync': {'_TYPE': 'child'},
'cosremarkingpolicy': {},
'cosremarkingpolicytable_cosremarkingpolicy': {'_TYPE': 'child'},
'cosremarkingpolicytable': {},
'enterprise_cosremarkingpolicytable': {'_TYPE': 'child'},
'csnatpool': {},
'link_csnatpool': {'_TYPE': 'child'},
'ctranslationmap': {},
'csnatpool_ctranslationmap': {'_TYPE': 'child'},
'customproperty': {},
'uplinkconnection_customproperty': {'_TYPE': 'child'},
'defaultgateway': {},
'subnet_defaultgateway': {'_TYPE': 'child'},
'demarcationservice': {},
'link_demarcationservice': {'_TYPE': 'child'},
'deploymentfailure': {},
'l2domain_deploymentfailure': {'_TYPE': 'child'},
'ingressprofile_deploymentfailure': {'_TYPE': 'child'},
'egressprofile_deploymentfailure': {'_TYPE': 'child'},
'redundancygroup_deploymentfailure': {'_TYPE': 'child'},
'vport_deploymentfailure': {'_TYPE': 'child'},
'bridgeinterface_deploymentfailure': {'_TYPE': 'child'},
'gateway_deploymentfailure': {'_TYPE': 'child'},
'destinationurl': {},
'tier_destinationurl': {'_TYPE': 'child'},
'dhcpoption': {},
'containerinterface_dhcpoption': {'_TYPE': 'child'},
'subnet_dhcpoption': {'_TYPE': 'child'},
'domain_dhcpoption': {'_TYPE': 'child'},
'zone_dhcpoption': {'_TYPE': 'child'},
'sharednetworkresource_dhcpoption': {'_TYPE': 'child'},
'hostinterface_dhcpoption': {'_TYPE': 'child'},
'bridgeinterface_dhcpoption': {'_TYPE': 'child'},
'vminterface_dhcpoption': {'_TYPE': 'child'},
'vport_dhcpoption': {'_TYPE': 'child'},
'l2domain_dhcpoption': {'_TYPE': 'child'},
'diskstat': {},
'domain': {},
'domain_domain': {'_TYPE': 'member'},
'enterprise_domain': {'_TYPE': 'child'},
'domaintemplate_domain': {'_TYPE': 'member'},
'firewallacl_domain': {'_TYPE': 'child'},
'domaintemplate': {},
'domain_domaintemplate': {'_TYPE': 'child'},
'enterprise_domaintemplate': {'_TYPE': 'child'},
'dscpforwardingclassmapping': {},
'dscpforwardingclasstable_dscpforwardingclassmapping': {'_TYPE': 'child'},
'dscpforwardingclasstable': {},
'enterprise_dscpforwardingclasstable': {'_TYPE': 'child'},
'dscpremarkingpolicy': {},
'dscpremarkingpolicytable_dscpremarkingpolicy': {'_TYPE': 'child'},
'dscpremarkingpolicytable': {},
'enterprise_dscpremarkingpolicytable': {'_TYPE': 'child'},
'ducgroup': {},
'ducgroupbinding': {},
'nsggroup_ducgroupbinding': {'_TYPE': 'child'},
'eamconfig': {},
'egressaclentrytemplate': {},
'domain_egressaclentrytemplate': {'_TYPE': 'child'},
'egressacltemplate_egressaclentrytemplate': {'_TYPE': 'child'},
'l2domain_egressaclentrytemplate': {'_TYPE': 'child'},
'mirrordestination_egressaclentrytemplate': {'_TYPE': 'child'},
'vport_egressaclentrytemplate': {'_TYPE': 'child'},
'egressacltemplate': {},
'l2domaintemplate_egressacltemplate': {'_TYPE': 'child'},
'domain_egressacltemplate': {'_TYPE': 'child'},
'domaintemplate_egressacltemplate': {'_TYPE': 'child'},
'l2domain_egressacltemplate': {'_TYPE': 'child'},
'egressadvfwdentrytemplate': {},
'egressadvfwdtemplate_egressadvfwdentrytemplate': {'_TYPE': 'child'},
'egressadvfwdtemplate': {},
'l2domaintemplate_egressadvfwdtemplate': {'_TYPE': 'child'},
'domain_egressadvfwdtemplate': {'_TYPE': 'child'},
'domaintemplate_egressadvfwdtemplate': {'_TYPE': 'child'},
'l2domain_egressadvfwdtemplate': {'_TYPE': 'child'},
'egressdomainfloatingipaclentrytemplate': {},
'egressdomainfloatingipacltemplate_egressdomainfloatingipaclentrytemplate': {'_TYPE': 'child'},
'egressdomainfloatingipacltemplate': {},
'domain_egressdomainfloatingipacltemplate': {'_TYPE': 'child'},
'domaintemplate_egressdomainfloatingipacltemplate': {'_TYPE': 'child'},
'egressprofile': {},
'redundancygroup_egressprofile': {'_TYPE': 'child'},
'gateway_egressprofile': {'_TYPE': 'child'},
'egressqospolicy': {},
'enterprise_egressqospolicy': {'_TYPE': 'child'},
'enterprise': {},
'enterpriseprofile_enterprise': {'_TYPE': 'child'},
'enterprisenetwork': {},
'saasapplicationtype_enterprisenetwork': {'_TYPE': 'child'},
'networkmacrogroup_enterprisenetwork': {'_TYPE': 'member'},
'enterprise_enterprisenetwork': {'_TYPE': 'child'},
'enterprisepermission': {},
'subnet_enterprisepermission': {'_TYPE': 'child'},
'vsgredundantport_enterprisepermission': {'_TYPE': 'child'},
'nsgredundancygroup_enterprisepermission': {'_TYPE': 'child'},
'nsredundantport_enterprisepermission': {'_TYPE': 'child'},
'service_enterprisepermission': {'_TYPE': 'child'},
'patnatpool_enterprisepermission': {'_TYPE': 'child'},
'sharednetworkresource_enterprisepermission': {'_TYPE': 'child'},
'vlan_enterprisepermission': {'_TYPE': 'child'},
'gateway_enterprisepermission': {'_TYPE': 'child'},
'nsgateway_enterprisepermission': {'_TYPE': 'child'},
'redundancygroup_enterprisepermission': {'_TYPE': 'child'},
'nsport_enterprisepermission': {'_TYPE': 'child'},
'port_enterprisepermission': {'_TYPE': 'child'},
'enterpriseprofile': {},
'enterprisesecureddata': {},
'enterprisesecurity_enterprisesecureddata': {'_TYPE': 'child'},
'enterprisesecurity': {},
'enterprise_enterprisesecurity': {'_TYPE': 'child'},
'eventlog': {},
'containerinterface_eventlog': {'_TYPE': 'child'},
'vsp_eventlog': {'_TYPE': 'child'},
'domain_eventlog': {'_TYPE': 'child'},
'policygroup_eventlog': {'_TYPE': 'child'},
'qos_eventlog': {'_TYPE': 'child'},
'autodiscoveredgateway_eventlog': {'_TYPE': 'child'},
'l2domaintemplate_eventlog': {'_TYPE': 'child'},
'addressrange_eventlog': {'_TYPE': 'child'},
'domaintemplate_eventlog': {'_TYPE': 'child'},
'vm_eventlog': {'_TYPE': 'child'},
'virtualip_eventlog': {'_TYPE': 'child'},
'vsc_eventlog': {'_TYPE': 'child'},
'multicastchannelmap_eventlog': {'_TYPE': 'child'},
'enterprise_eventlog': {'_TYPE': 'child'},
'redundancygroup_eventlog': {'_TYPE': 'child'},
'vport_eventlog': {'_TYPE': 'child'},
'port_eventlog': {'_TYPE': 'child'},
'tca_eventlog': {'_TYPE': 'child'},
'subnet_eventlog': {'_TYPE': 'child'},
'container_eventlog': {'_TYPE': 'child'},
'publicnetwork_eventlog': {'_TYPE': 'child'},
'service_eventlog': {'_TYPE': 'child'},
'floatingip_eventlog': {'_TYPE': 'child'},
'egressacltemplate_eventlog': {'_TYPE': 'child'},
'gateway_eventlog': {'_TYPE': 'child'},
'nsgredundancygroup_eventlog': {'_TYPE': 'child'},
'hostinterface_eventlog': {'_TYPE': 'child'},
'metadata_eventlog': {'_TYPE': 'child'},
'vrs_eventlog': {'_TYPE': 'child'},
'hsc_eventlog': {'_TYPE': 'child'},
'enterprisenetwork_eventlog': {'_TYPE': 'child'},
'permission_eventlog': {'_TYPE': 'child'},
'l2domain_eventlog': {'_TYPE': 'child'},
'vlan_eventlog': {'_TYPE': 'child'},
'wirelessport_eventlog': {'_TYPE': 'child'},
'staticroute_eventlog': {'_TYPE': 'child'},
'ipreservation_eventlog': {'_TYPE': 'child'},
'vminterface_eventlog': {'_TYPE': 'child'},
'user_eventlog': {'_TYPE': 'child'},
'redirectiontargettemplate_eventlog': {'_TYPE': 'child'},
'subnettemplate_eventlog': {'_TYPE': 'child'},
'group_eventlog': {'_TYPE': 'child'},
'ssidconnection_eventlog': {'_TYPE': 'child'},
'redirectiontarget_eventlog': {'_TYPE': 'child'},
'zone_eventlog': {'_TYPE': 'child'},
'license_eventlog': {'_TYPE': 'child'},
'enterpriseprofile_eventlog': {'_TYPE': 'child'},
'nsport_eventlog': {'_TYPE': 'child'},
'dhcpoption_eventlog': {'_TYPE': 'child'},
'policygrouptemplate_eventlog': {'_TYPE': 'child'},
'bridgeinterface_eventlog': {'_TYPE': 'child'},
'nsgateway_eventlog': {'_TYPE': 'child'},
'zonetemplate_eventlog': {'_TYPE': 'child'},
'multicastrange_eventlog': {'_TYPE': 'child'},
'vsd_eventlog': {'_TYPE': 'child'},
'ingressacltemplate_eventlog': {'_TYPE': 'child'},
'firewallacl': {},
'domain_firewallacl': {'_TYPE': 'child'},
'enterprise_firewallacl': {'_TYPE': 'child'},
'firewallrule': {},
'enterprise_firewallrule': {'_TYPE': 'child'},
'firewallacl_firewallrule': {'_TYPE': 'child'},
'floatingip': {},
'domain_floatingip': {'_TYPE': 'child'},
'forwardingpathlist': {},
'domain_forwardingpathlist': {'_TYPE': 'child'},
'forwardingpathlistentry': {},
'forwardingpathlist_forwardingpathlistentry': {'_TYPE': 'child'},
'gateway': {},
'redundancygroup_gateway': {'_TYPE': 'child'},
'l2domain_gateway': {'_TYPE': 'child'},
'enterprise_gateway': {'_TYPE': 'child'},
'gatewayredundantport': {},
'redundancygroup_gatewayredundantport': {'_TYPE': 'child'},
'gatewaysecureddata': {},
'gatewaysecurity_gatewaysecureddata': {'_TYPE': 'child'},
'gatewaysecurity': {},
'nsgateway_gatewaysecurity': {'_TYPE': 'child'},
'gateway_gatewaysecurity': {'_TYPE': 'child'},
'gatewayslocation': {},
'enterprise_gatewayslocation': {'_TYPE': 'child'},
'gatewaytemplate': {},
'enterprise_gatewaytemplate': {'_TYPE': 'child'},
'globalmetadata': {},
'containerinterface_globalmetadata': {'_TYPE': 'child'},
'groupkeyencryptionprofile_globalmetadata': {'_TYPE': 'child'},
'ipfilterprofile_globalmetadata': {'_TYPE': 'child'},
'qos_globalmetadata': {'_TYPE': 'child'},
'infrastructureaccessprofile_globalmetadata': {'_TYPE': 'child'},
'bgppeer_globalmetadata': {'_TYPE': 'child'},
'sharednetworkresource_globalmetadata': {'_TYPE': 'child'},
'virtualip_globalmetadata': {'_TYPE': 'child'},
'pspatmap_globalmetadata': {'_TYPE': 'child'},
'egressadvfwdentrytemplate_globalmetadata': {'_TYPE': 'child'},
'dscpforwardingclasstable_globalmetadata': {'_TYPE': 'child'},
'multicastchannelmap_globalmetadata': {'_TYPE': 'child'},
'enterprise_globalmetadata': {'_TYPE': 'child'},
'redundancygroup_globalmetadata': {'_TYPE': 'child'},
'tca_globalmetadata': {'_TYPE': 'child'},
'sapegressqosprofile_globalmetadata': {'_TYPE': 'child'},
'macfilterprofile_globalmetadata': {'_TYPE': 'child'},
'group_globalmetadata': {'_TYPE': 'child'},
'autodiscoveredgateway_globalmetadata': {'_TYPE': 'child'},
'zone_globalmetadata': {'_TYPE': 'child'},
'application_globalmetadata': {'_TYPE': 'child'},
'ikegatewayprofile_globalmetadata': {'_TYPE': 'child'},
'overlaymirrordestination_globalmetadata': {'_TYPE': 'child'},
'infrastructuregatewayprofile_globalmetadata': {'_TYPE': 'child'},
'diskstat_globalmetadata': {'_TYPE': 'child'},
'vnfinterface_globalmetadata': {'_TYPE': 'child'},
'eamconfig_globalmetadata': {'_TYPE': 'child'},
'location_globalmetadata': {'_TYPE': 'child'},
'enterprisesecurity_globalmetadata': {'_TYPE': 'child'},
'vrsconfig_globalmetadata': {'_TYPE': 'child'},
'ducgroup_globalmetadata': {'_TYPE': 'child'},
'enterprisenetwork_globalmetadata': {'_TYPE': 'child'},
'infrastructurevscprofile_globalmetadata': {'_TYPE': 'child'},
'permission_globalmetadata': {'_TYPE': 'child'},
'cosremarkingpolicy_globalmetadata': {'_TYPE': 'child'},
'ipreservation_globalmetadata': {'_TYPE': 'child'},
'redirectiontargettemplate_globalmetadata': {'_TYPE': 'child'},
'subnettemplate_globalmetadata': {'_TYPE': 'child'},
'applicationbinding_globalmetadata': {'_TYPE': 'child'},
'vnfdomainmapping_globalmetadata': {'_TYPE': 'child'},
'ikegatewayconnection_globalmetadata': {'_TYPE': 'child'},
'redirectiontarget_globalmetadata': {'_TYPE': 'child'},
'firewallacl_globalmetadata': {'_TYPE': 'child'},
'gatewayredundantport_globalmetadata': {'_TYPE': 'child'},
'networklayout_globalmetadata': {'_TYPE': 'child'},
'vnfinterfacedescriptor_globalmetadata': {'_TYPE': 'child'},
'patch_globalmetadata': {'_TYPE': 'child'},
'avatar_globalmetadata': {'_TYPE': 'child'},
'infrastructureevdfprofile_globalmetadata': {'_TYPE': 'child'},
'bootstrapactivation_globalmetadata': {'_TYPE': 'child'},
'licensestatus_globalmetadata': {'_TYPE': 'child'},
'gatewaytemplate_globalmetadata': {'_TYPE': 'child'},
'cms_globalmetadata': {'_TYPE': 'child'},
'ipv6filterprofile_globalmetadata': {'_TYPE': 'child'},
'vsp_globalmetadata': {'_TYPE': 'child'},
'saasapplicationtype_globalmetadata': {'_TYPE': 'child'},
'domain_globalmetadata': {'_TYPE': 'child'},
'netconfsession_globalmetadata': {'_TYPE': 'child'},
'netconfprofile_globalmetadata': {'_TYPE': 'child'},
'nsredundantport_globalmetadata': {'_TYPE': 'child'},
'dscpforwardingclassmapping_globalmetadata': {'_TYPE': 'child'},
'egressaclentrytemplate_globalmetadata': {'_TYPE': 'child'},
'overlaypatnatentry_globalmetadata': {'_TYPE': 'child'},
'vsc_globalmetadata': {'_TYPE': 'child'},
'deploymentfailure_globalmetadata': {'_TYPE': 'child'},
'vport_globalmetadata': {'_TYPE': 'child'},
'vpnconnection_globalmetadata': {'_TYPE': 'child'},
'policystatement_globalmetadata': {'_TYPE': 'child'},
'statisticspolicy_globalmetadata': {'_TYPE': 'child'},
'policygroupcategory_globalmetadata': {'_TYPE': 'child'},
'subnet_globalmetadata': {'_TYPE': 'child'},
'container_globalmetadata': {'_TYPE': 'child'},
'ratelimiter_globalmetadata': {'_TYPE': 'child'},
'keyservermonitorencryptedseed_globalmetadata': {'_TYPE': 'child'},
'policyentry_globalmetadata': {'_TYPE': 'child'},
'allgateway_globalmetadata': {'_TYPE': 'child'},
'uplinkroutedistinguisher_globalmetadata': {'_TYPE': 'child'},
'keyservermonitorseed_globalmetadata': {'_TYPE': 'child'},
'lteinformation_globalmetadata': {'_TYPE': 'child'},
'vrs_globalmetadata': {'_TYPE': 'child'},
'hsc_globalmetadata': {'_TYPE': 'child'},
'overlaymirrordestinationtemplate_globalmetadata': {'_TYPE': 'child'},
'ldapconfiguration_globalmetadata': {'_TYPE': 'child'},
'component_globalmetadata': {'_TYPE': 'child'},
'zfbrequest_globalmetadata': {'_TYPE': 'child'},
'ingressadvfwdtemplate_globalmetadata': {'_TYPE': 'child'},
'staticroute_globalmetadata': {'_TYPE': 'child'},
'connectionendpoint_globalmetadata': {'_TYPE': 'child'},
'job_globalmetadata': {'_TYPE': 'child'},
'vminterface_globalmetadata': {'_TYPE': 'child'},
'gatewaysecureddata_globalmetadata': {'_TYPE': 'child'},
'trunk_globalmetadata': {'_TYPE': 'child'},
'vnfdescriptor_globalmetadata': {'_TYPE': 'child'},
'keyservermonitor_globalmetadata': {'_TYPE': 'child'},
'ducgroupbinding_globalmetadata': {'_TYPE': 'child'},
'eventlog_globalmetadata': {'_TYPE': 'child'},
'license_globalmetadata': {'_TYPE': 'child'},
'ingressprofile_globalmetadata': {'_TYPE': 'child'},
'enterpriseprofile_globalmetadata': {'_TYPE': 'child'},
'netconfmanager_globalmetadata': {'_TYPE': 'child'},
'bridgeinterface_globalmetadata': {'_TYPE': 'child'},
'infraconfig_globalmetadata': {'_TYPE': 'child'},
'dscpremarkingpolicy_globalmetadata': {'_TYPE': 'child'},
'egressprofile_globalmetadata': {'_TYPE': 'child'},
'networkmacrogroup_globalmetadata': {'_TYPE': 'child'},
'forwardingpathlist_globalmetadata': {'_TYPE': 'child'},
'ingressadvfwdentrytemplate_globalmetadata': {'_TYPE': 'child'},
'ikesubnet_globalmetadata': {'_TYPE': 'child'},
'destinationurl_globalmetadata': {'_TYPE': 'child'},
'vnfcatalog_globalmetadata': {'_TYPE': 'child'},
'vsgredundantport_globalmetadata': {'_TYPE': 'child'},
'forwardingpathlistentry_globalmetadata': {'_TYPE': 'child'},
'multicastlist_globalmetadata': {'_TYPE': 'child'},
'nexthop_globalmetadata': {'_TYPE': 'child'},
'mirrordestination_globalmetadata': {'_TYPE': 'child'},
'natmapentry_globalmetadata': {'_TYPE': 'child'},
'egressdomainfloatingipacltemplate_globalmetadata': {'_TYPE': 'child'},
'ospfinterface_globalmetadata': {'_TYPE': 'child'},
'addressmap_globalmetadata': {'_TYPE': 'child'},
'underlay_globalmetadata': {'_TYPE': 'child'},
'gateway_globalmetadata': {'_TYPE': 'child'},
'multinicvport_globalmetadata': {'_TYPE': 'child'},
'bfdsession_globalmetadata': {'_TYPE': 'child'},
'statistics_globalmetadata': {'_TYPE': 'child'},
'nsporttemplate_globalmetadata': {'_TYPE': 'child'},
'sshkey_globalmetadata': {'_TYPE': 'child'},
'service_globalmetadata': {'_TYPE': 'child'},
'vcenterdatacenter_globalmetadata': {'_TYPE': 'child'},
'customproperty_globalmetadata': {'_TYPE': 'child'},
'policyobjectgroup_globalmetadata': {'_TYPE': 'child'},
'virtualfirewallrule_globalmetadata': {'_TYPE': 'child'},
'virtualfirewallpolicy_globalmetadata': {'_TYPE': 'child'},
'dscpremarkingpolicytable_globalmetadata': {'_TYPE': 'child'},
'vnfthresholdpolicy_globalmetadata': {'_TYPE': 'child'},
'ptranslationmap_globalmetadata': {'_TYPE': 'child'},
'ikegateway_globalmetadata': {'_TYPE': 'child'},
'csnatpool_globalmetadata': {'_TYPE': 'child'},
'shuntlink_globalmetadata': {'_TYPE': 'child'},
'vcenter_globalmetadata': {'_TYPE': 'child'},
'bulkstatistics_globalmetadata': {'_TYPE': 'child'},
'ingressaclentrytemplate_globalmetadata': {'_TYPE': 'child'},
'ltestatistics_globalmetadata': {'_TYPE': 'child'},
'routingpolicy_globalmetadata': {'_TYPE': 'child'},
'multicastrange_globalmetadata': {'_TYPE': 'child'},
'networkperformancebinding_globalmetadata': {'_TYPE': 'child'},
'certificate_globalmetadata': {'_TYPE': 'child'},
'defaultgateway_globalmetadata': {'_TYPE': 'child'},
'saasapplicationgroup_globalmetadata': {'_TYPE': 'child'},
'l2domain_globalmetadata': {'_TYPE': 'child'},
'ikegatewayconfig_globalmetadata': {'_TYPE': 'child'},
'hostinterface_globalmetadata': {'_TYPE': 'child'},
'enterprisesecureddata_globalmetadata': {'_TYPE': 'child'},
'applicationperformancemanagement_globalmetadata': {'_TYPE': 'child'},
'qospolicer_globalmetadata': {'_TYPE': 'child'},
'ikecertificate_globalmetadata': {'_TYPE': 'child'},
'statisticscollector_globalmetadata': {'_TYPE': 'child'},
'ssidconnection_globalmetadata': {'_TYPE': 'child'},
'egressadvfwdtemplate_globalmetadata': {'_TYPE': 'child'},
'usercontext_globalmetadata': {'_TYPE': 'child'},
'l7applicationsignature_globalmetadata': {'_TYPE': 'child'},
'nsgatewayssummary_globalmetadata': {'_TYPE': 'child'},
'ospfinstance_globalmetadata': {'_TYPE': 'child'},
'dhcpoption_globalmetadata': {'_TYPE': 'child'},
'keyservermember_globalmetadata': {'_TYPE': 'child'},
'nsgateway_globalmetadata': {'_TYPE': 'child'},
'nsgatewaytemplate_globalmetadata': {'_TYPE': 'child'},
'zonetemplate_globalmetadata': {'_TYPE': 'child'},
'nsggroup_globalmetadata': {'_TYPE': 'child'},
'site_globalmetadata': {'_TYPE': 'child'},
'vsd_globalmetadata': {'_TYPE': 'child'},
'alarm_globalmetadata': {'_TYPE': 'child'},
'nsgroutingpolicybinding_globalmetadata': {'_TYPE': 'child'},
'nsgatewayscount_globalmetadata': {'_TYPE': 'child'},
'bootstrap_globalmetadata': {'_TYPE': 'child'},
'bgpprofile_globalmetadata': {'_TYPE': 'child'},
'egressqospolicy_globalmetadata': {'_TYPE': 'child'},
'publicnetwork_globalmetadata': {'_TYPE': 'child'},
'l2domaintemplate_globalmetadata': {'_TYPE': 'child'},
'ingressqospolicy_globalmetadata': {'_TYPE': 'child'},
'addressrange_globalmetadata': {'_TYPE': 'child'},
'domaintemplate_globalmetadata': {'_TYPE': 'child'},
'vm_globalmetadata': {'_TYPE': 'child'},
'enterprisepermission_globalmetadata': {'_TYPE': 'child'},
'egressdomainfloatingipaclentrytemplate_globalmetadata': {'_TYPE': 'child'},
'resync_globalmetadata': {'_TYPE': 'child'},
'gatewaysecurity_globalmetadata': {'_TYPE': 'child'},
'policydecision_globalmetadata': {'_TYPE': 'child'},
'applicationperformancemanagementbinding_globalmetadata': {'_TYPE': 'child'},
'spatsourcespool_globalmetadata': {'_TYPE': 'child'},
'floatingip_globalmetadata': {'_TYPE': 'child'},
'egressacltemplate_globalmetadata': {'_TYPE': 'child'},
'monitoringport_globalmetadata': {'_TYPE': 'child'},
'monitorscope_globalmetadata': {'_TYPE': 'child'},
'sapingressqosprofile_globalmetadata': {'_TYPE': 'child'},
'port_globalmetadata': {'_TYPE': 'child'},
'nsgredundancygroup_globalmetadata': {'_TYPE': 'child'},
'gatewayslocation_globalmetadata': {'_TYPE': 'child'},
'keyservermonitorsek_globalmetadata': {'_TYPE': 'child'},
'cosremarkingpolicytable_globalmetadata': {'_TYPE': 'child'},
'vportmirror_globalmetadata': {'_TYPE': 'child'},
'patnatpool_globalmetadata': {'_TYPE': 'child'},
'overlayaddresspool_globalmetadata': {'_TYPE': 'child'},
'psnatpool_globalmetadata': {'_TYPE': 'child'},
'bgpneighbor_globalmetadata': {'_TYPE': 'child'},
'containerresync_globalmetadata': {'_TYPE': 'child'},
'vnf_globalmetadata': {'_TYPE': 'child'},
'allredundancygroup_globalmetadata': {'_TYPE': 'child'},
'allalarm_globalmetadata': {'_TYPE': 'child'},
'vlan_globalmetadata': {'_TYPE': 'child'},
'wirelessport_globalmetadata': {'_TYPE': 'child'},
'systemconfig_globalmetadata': {'_TYPE': 'child'},
'ikeencryptionprofile_globalmetadata': {'_TYPE': 'child'},
'policygrouptemplate_globalmetadata': {'_TYPE': 'child'},
'vcentercluster_globalmetadata': {'_TYPE': 'child'},
'user_globalmetadata': {'_TYPE': 'child'},
'tier_globalmetadata': {'_TYPE': 'child'},
'policygroup_globalmetadata': {'_TYPE': 'child'},
'demarcationservice_globalmetadata': {'_TYPE': 'child'},
'brconnection_globalmetadata': {'_TYPE': 'child'},
'firewallrule_globalmetadata': {'_TYPE': 'child'},
'nsport_globalmetadata': {'_TYPE': 'child'},
'vrsaddressrange_globalmetadata': {'_TYPE': 'child'},
'porttemplate_globalmetadata': {'_TYPE': 'child'},
'vlantemplate_globalmetadata': {'_TYPE': 'child'},
'uplinkconnection_globalmetadata': {'_TYPE': 'child'},
'globalmetadata_globalmetadata': {'_TYPE': 'child'},
'networkperformancemeasurement_globalmetadata': {'_TYPE': 'child'},
'ospfarea_globalmetadata': {'_TYPE': 'child'},
'ikepsk_globalmetadata': {'_TYPE': 'child'},
'ctranslationmap_globalmetadata': {'_TYPE': 'child'},
'link_globalmetadata': {'_TYPE': 'child'},
'ingressacltemplate_globalmetadata': {'_TYPE': 'child'},
'vnfmetadata_globalmetadata': {'_TYPE': 'child'},
'vcenterhypervisor_globalmetadata': {'_TYPE': 'child'},
'group': {},
'domain_group': {'_TYPE': 'child'},
'zone_group': {'_TYPE': 'child'},
'l2domaintemplate_group': {'_TYPE': 'child'},
'l2domain_group': {'_TYPE': 'child'},
'domaintemplate_group': {'_TYPE': 'child'},
'user_group': {'_TYPE': 'child'},
'enterprise_group': {'_TYPE': 'child'},
'groupkeyencryptionprofile': {},
'enterprise_groupkeyencryptionprofile': {'_TYPE': 'child'},
'hostinterface': {},
'domain_hostinterface': {'_TYPE': 'child'},
'l2domain_hostinterface': {'_TYPE': 'child'},
'vport_hostinterface': {'_TYPE': 'child'},
'hsc': {},
'vsp_hsc': {'_TYPE': 'child'},
'vrs_hsc': {'_TYPE': 'child'},
'ikecertificate': {},
'enterprise_ikecertificate': {'_TYPE': 'child'},
'ikeencryptionprofile': {},
'enterprise_ikeencryptionprofile': {'_TYPE': 'child'},
'ikegateway': {},
'enterprise_ikegateway': {'_TYPE': 'child'},
'ikegatewayconfig': {},
'ikegateway_ikegatewayconfig': {'_TYPE': 'member'},
'ikegatewayconnection': {},
'subnet_ikegatewayconnection': {'_TYPE': 'member'},
'vlan_ikegatewayconnection': {'_TYPE': 'child'},
'ikegatewayprofile': {},
'enterprise_ikegatewayprofile': {'_TYPE': 'child'},
'ikepsk': {},
'enterprise_ikepsk': {'_TYPE': 'child'},
'ikesubnet': {},
'ikegateway_ikesubnet': {'_TYPE': 'child'},
'infraconfig': {},
'nsgateway_infraconfig': {'_TYPE': 'child'},
'gateway_infraconfig': {'_TYPE': 'child'},
'infrastructureaccessprofile': {},
'infrastructureevdfprofile': {},
'infrastructuregatewayprofile': {},
'infrastructurevscprofile': {},
'ingressaclentrytemplate': {},
'domain_ingressaclentrytemplate': {'_TYPE': 'child'},
'l2domain_ingressaclentrytemplate': {'_TYPE': 'child'},
'mirrordestination_ingressaclentrytemplate': {'_TYPE': 'child'},
'vport_ingressaclentrytemplate': {'_TYPE': 'child'},
'ingressacltemplate_ingressaclentrytemplate': {'_TYPE': 'child'},
'ingressacltemplate': {},
'l2domaintemplate_ingressacltemplate': {'_TYPE': 'child'},
'domain_ingressacltemplate': {'_TYPE': 'child'},
'domaintemplate_ingressacltemplate': {'_TYPE': 'child'},
'l2domain_ingressacltemplate': {'_TYPE': 'child'},
'ingressadvfwdentrytemplate': {},
'vport_ingressadvfwdentrytemplate': {'_TYPE': 'child'},
'ingressadvfwdtemplate_ingressadvfwdentrytemplate': {'_TYPE': 'child'},
'mirrordestination_ingressadvfwdentrytemplate': {'_TYPE': 'child'},
'ingressadvfwdtemplate': {},
'l2domaintemplate_ingressadvfwdtemplate': {'_TYPE': 'child'},
'domain_ingressadvfwdtemplate': {'_TYPE': 'child'},
'domaintemplate_ingressadvfwdtemplate': {'_TYPE': 'child'},
'l2domain_ingressadvfwdtemplate': {'_TYPE': 'child'},
'ingressprofile': {},
'redundancygroup_ingressprofile': {'_TYPE': 'child'},
'gateway_ingressprofile': {'_TYPE': 'child'},
'ingressqospolicy': {},
'enterprise_ingressqospolicy': {'_TYPE': 'child'},
'ipfilterprofile': {},
'redundancygroup_ipfilterprofile': {'_TYPE': 'child'},
'gateway_ipfilterprofile': {'_TYPE': 'child'},
'ipreservation': {},
'subnet_ipreservation': {'_TYPE': 'child'},
'ipv6filterprofile': {},
'redundancygroup_ipv6filterprofile': {'_TYPE': 'child'},
'gateway_ipv6filterprofile': {'_TYPE': 'child'},
'job': {},
'domain_job': {'_TYPE': 'child'},
'vnf_job': {'_TYPE': 'child'},
'l2domaintemplate_job': {'_TYPE': 'child'},
'domaintemplate_job': {'_TYPE': 'child'},
'vsc_job': {'_TYPE': 'child'},
'vsd_job': {'_TYPE': 'child'},
'redundancygroup_job': {'_TYPE': 'child'},
'vport_job': {'_TYPE': 'child'},
'gateway_job': {'_TYPE': 'child'},
'egressacltemplate_job': {'_TYPE': 'child'},
'vcenter_job': {'_TYPE': 'child'},
'vrs_job': {'_TYPE': 'child'},
'hsc_job': {'_TYPE': 'child'},
'l2domain_job': {'_TYPE': 'child'},
'zfbrequest_job': {'_TYPE': 'child'},
'ingressadvfwdtemplate_job': {'_TYPE': 'child'},
'vcentercluster_job': {'_TYPE': 'child'},
'vcenterhypervisor_job': {'_TYPE': 'child'},
'nsgateway_job': {'_TYPE': 'child'},
'enterprise_job': {'_TYPE': 'child'},
'ingressacltemplate_job': {'_TYPE': 'child'},
'keyservermember': {},
'keyservermonitor': {},
'enterprise_keyservermonitor': {'_TYPE': 'child'},
'keyservermonitorencryptedseed': {},
'keyservermonitorseed_keyservermonitorencryptedseed': {'_TYPE': 'child'},
'keyservermonitor_keyservermonitorencryptedseed': {'_TYPE': 'child'},
'keyservermonitorseed': {},
'keyservermonitor_keyservermonitorseed': {'_TYPE': 'child'},
'keyservermonitorsek': {},
'keyservermonitor_keyservermonitorsek': {'_TYPE': 'child'},
'l2domain': {},
'redundancygroup_l2domain': {'_TYPE': 'child'},
'l2domaintemplate_l2domain': {'_TYPE': 'child'},
'gateway_l2domain': {'_TYPE': 'child'},
'enterprise_l2domain': {'_TYPE': 'child'},
'l2domaintemplate': {},
'enterprise_l2domaintemplate': {'_TYPE': 'child'},
'l4service': {},
'l4servicegroup_l4service': {'_TYPE': 'member'},
'enterprise_l4service': {'_TYPE': 'child'},
'l4servicegroup': {},
'l4service_l4servicegroup': {'_TYPE': 'member'},
'enterprise_l4servicegroup': {'_TYPE': 'child'},
'l7applicationsignature': {},
'enterprise_l7applicationsignature': {'_TYPE': 'child'},
'ldapconfiguration': {},
'enterprise_ldapconfiguration': {'_TYPE': 'child'},
'license': {},
'licensestatus': {},
'link': {},
'domain_link': {'_TYPE': 'child'},
'location': {},
'nsgateway_location': {'_TYPE': 'child'},
'gateway_location': {'_TYPE': 'child'},
'lteinformation': {},
'nsport_lteinformation': {'_TYPE': 'child'},
'ltestatistics': {},
'vlan_ltestatistics': {'_TYPE': 'child'},
'macfilterprofile': {},
'redundancygroup_macfilterprofile': {'_TYPE': 'child'},
'gateway_macfilterprofile': {'_TYPE': 'child'},
'me': {},
'metadata': {},
'containerinterface_metadata': {'_TYPE': 'child'},
'groupkeyencryptionprofile_metadata': {'_TYPE': 'child'},
'ipfilterprofile_metadata': {'_TYPE': 'child'},
'qos_metadata': {'_TYPE': 'child'},
'infrastructureaccessprofile_metadata': {'_TYPE': 'child'},
'bgppeer_metadata': {'_TYPE': 'child'},
'sharednetworkresource_metadata': {'_TYPE': 'child'},
'virtualip_metadata': {'_TYPE': 'child'},
'pspatmap_metadata': {'_TYPE': 'child'},
'egressadvfwdentrytemplate_metadata': {'_TYPE': 'child'},
'dscpforwardingclasstable_metadata': {'_TYPE': 'child'},
'multicastchannelmap_metadata': {'_TYPE': 'child'},
'enterprise_metadata': {'_TYPE': 'child'},
'redundancygroup_metadata': {'_TYPE': 'child'},
'tca_metadata': {'_TYPE': 'child'},
'sapegressqosprofile_metadata': {'_TYPE': 'child'},
'macfilterprofile_metadata': {'_TYPE': 'child'},
'group_metadata': {'_TYPE': 'child'},
'autodiscoveredgateway_metadata': {'_TYPE': 'child'},
'zone_metadata': {'_TYPE': 'child'},
'application_metadata': {'_TYPE': 'child'},
'ikegatewayprofile_metadata': {'_TYPE': 'child'},
'overlaymirrordestination_metadata': {'_TYPE': 'child'},
'infrastructuregatewayprofile_metadata': {'_TYPE': 'child'},
'diskstat_metadata': {'_TYPE': 'child'},
'vnfinterface_metadata': {'_TYPE': 'child'},
'eamconfig_metadata': {'_TYPE': 'child'},
'location_metadata': {'_TYPE': 'child'},
'enterprisesecurity_metadata': {'_TYPE': 'child'},
'vrsconfig_metadata': {'_TYPE': 'child'},
'ducgroup_metadata': {'_TYPE': 'child'},
'enterprisenetwork_metadata': {'_TYPE': 'child'},
'infrastructurevscprofile_metadata': {'_TYPE': 'child'},
'permission_metadata': {'_TYPE': 'child'},
'cosremarkingpolicy_metadata': {'_TYPE': 'child'},
'ipreservation_metadata': {'_TYPE': 'child'},
'redirectiontargettemplate_metadata': {'_TYPE': 'child'},
'subnettemplate_metadata': {'_TYPE': 'child'},
'applicationbinding_metadata': {'_TYPE': 'child'},
'vnfdomainmapping_metadata': {'_TYPE': 'child'},
'ikegatewayconnection_metadata': {'_TYPE': 'child'},
'redirectiontarget_metadata': {'_TYPE': 'child'},
'firewallacl_metadata': {'_TYPE': 'child'},
'gatewayredundantport_metadata': {'_TYPE': 'child'},
'networklayout_metadata': {'_TYPE': 'child'},
'vnfinterfacedescriptor_metadata': {'_TYPE': 'child'},
'patch_metadata': {'_TYPE': 'child'},
'avatar_metadata': {'_TYPE': 'child'},
'infrastructureevdfprofile_metadata': {'_TYPE': 'child'},
'bootstrapactivation_metadata': {'_TYPE': 'child'},
'licensestatus_metadata': {'_TYPE': 'child'},
'gatewaytemplate_metadata': {'_TYPE': 'child'},
'cms_metadata': {'_TYPE': 'child'},
'ipv6filterprofile_metadata': {'_TYPE': 'child'},
'vsp_metadata': {'_TYPE': 'child'},
'saasapplicationtype_metadata': {'_TYPE': 'child'},
'domain_metadata': {'_TYPE': 'child'},
'netconfsession_metadata': {'_TYPE': 'child'},
'netconfprofile_metadata': {'_TYPE': 'child'},
'nsredundantport_metadata': {'_TYPE': 'child'},
'dscpforwardingclassmapping_metadata': {'_TYPE': 'child'},
'egressaclentrytemplate_metadata': {'_TYPE': 'child'},
'overlaypatnatentry_metadata': {'_TYPE': 'child'},
'vsc_metadata': {'_TYPE': 'child'},
'deploymentfailure_metadata': {'_TYPE': 'child'},
'vport_metadata': {'_TYPE': 'child'},
'vpnconnection_metadata': {'_TYPE': 'child'},
'policystatement_metadata': {'_TYPE': 'child'},
'statisticspolicy_metadata': {'_TYPE': 'child'},
'policygroupcategory_metadata': {'_TYPE': 'child'},
'subnet_metadata': {'_TYPE': 'child'},
'container_metadata': {'_TYPE': 'child'},
'ratelimiter_metadata': {'_TYPE': 'child'},
'keyservermonitorencryptedseed_metadata': {'_TYPE': 'child'},
'policyentry_metadata': {'_TYPE': 'child'},
'allgateway_metadata': {'_TYPE': 'child'},
'uplinkroutedistinguisher_metadata': {'_TYPE': 'child'},
'keyservermonitorseed_metadata': {'_TYPE': 'child'},
'lteinformation_metadata': {'_TYPE': 'child'},
'vrs_metadata': {'_TYPE': 'child'},
'hsc_metadata': {'_TYPE': 'child'},
'overlaymirrordestinationtemplate_metadata': {'_TYPE': 'child'},
'ldapconfiguration_metadata': {'_TYPE': 'child'},
'component_metadata': {'_TYPE': 'child'},
'zfbrequest_metadata': {'_TYPE': 'child'},
'ingressadvfwdtemplate_metadata': {'_TYPE': 'child'},
'staticroute_metadata': {'_TYPE': 'child'},
'connectionendpoint_metadata': {'_TYPE': 'child'},
'job_metadata': {'_TYPE': 'child'},
'vminterface_metadata': {'_TYPE': 'child'},
'gatewaysecureddata_metadata': {'_TYPE': 'child'},
'trunk_metadata': {'_TYPE': 'child'},
'vnfdescriptor_metadata': {'_TYPE': 'child'},
'keyservermonitor_metadata': {'_TYPE': 'child'},
'ducgroupbinding_metadata': {'_TYPE': 'child'},
'eventlog_metadata': {'_TYPE': 'child'},
'license_metadata': {'_TYPE': 'child'},
'ingressprofile_metadata': {'_TYPE': 'child'},
'enterpriseprofile_metadata': {'_TYPE': 'child'},
'netconfmanager_metadata': {'_TYPE': 'child'},
'bridgeinterface_metadata': {'_TYPE': 'child'},
'infraconfig_metadata': {'_TYPE': 'child'},
'dscpremarkingpolicy_metadata': {'_TYPE': 'child'},
'egressprofile_metadata': {'_TYPE': 'child'},
'networkmacrogroup_metadata': {'_TYPE': 'child'},
'forwardingpathlist_metadata': {'_TYPE': 'child'},
'ingressadvfwdentrytemplate_metadata': {'_TYPE': 'child'},
'ikesubnet_metadata': {'_TYPE': 'child'},
'destinationurl_metadata': {'_TYPE': 'child'},
'vnfcatalog_metadata': {'_TYPE': 'child'},
'vsgredundantport_metadata': {'_TYPE': 'child'},
'forwardingpathlistentry_metadata': {'_TYPE': 'child'},
'multicastlist_metadata': {'_TYPE': 'child'},
'nexthop_metadata': {'_TYPE': 'child'},
'mirrordestination_metadata': {'_TYPE': 'child'},
'natmapentry_metadata': {'_TYPE': 'child'},
'egressdomainfloatingipacltemplate_metadata': {'_TYPE': 'child'},
'ospfinterface_metadata': {'_TYPE': 'child'},
'addressmap_metadata': {'_TYPE': 'child'},
'underlay_metadata': {'_TYPE': 'child'},
'gateway_metadata': {'_TYPE': 'child'},
'multinicvport_metadata': {'_TYPE': 'child'},
'bfdsession_metadata': {'_TYPE': 'child'},
'statistics_metadata': {'_TYPE': 'child'},
'nsporttemplate_metadata': {'_TYPE': 'child'},
'sshkey_metadata': {'_TYPE': 'child'},
'service_metadata': {'_TYPE': 'child'},
'vcenterdatacenter_metadata': {'_TYPE': 'child'},
'customproperty_metadata': {'_TYPE': 'child'},
'policyobjectgroup_metadata': {'_TYPE': 'child'},
'virtualfirewallrule_metadata': {'_TYPE': 'child'},
'virtualfirewallpolicy_metadata': {'_TYPE': 'child'},
'dscpremarkingpolicytable_metadata': {'_TYPE': 'child'},
'vnfthresholdpolicy_metadata': {'_TYPE': 'child'},
'ptranslationmap_metadata': {'_TYPE': 'child'},
'ikegateway_metadata': {'_TYPE': 'child'},
'csnatpool_metadata': {'_TYPE': 'child'},
'shuntlink_metadata': {'_TYPE': 'child'},
'vcenter_metadata': {'_TYPE': 'child'},
'bulkstatistics_metadata': {'_TYPE': 'child'},
'ingressaclentrytemplate_metadata': {'_TYPE': 'child'},
'ltestatistics_metadata': {'_TYPE': 'child'},
'routingpolicy_metadata': {'_TYPE': 'child'},
'multicastrange_metadata': {'_TYPE': 'child'},
'networkperformancebinding_metadata': {'_TYPE': 'child'},
'certificate_metadata': {'_TYPE': 'child'},
'defaultgateway_metadata': {'_TYPE': 'child'},
'saasapplicationgroup_metadata': {'_TYPE': 'child'},
'l2domain_metadata': {'_TYPE': 'child'},
'ikegatewayconfig_metadata': {'_TYPE': 'child'},
'hostinterface_metadata': {'_TYPE': 'child'},
'enterprisesecureddata_metadata': {'_TYPE': 'child'},
'applicationperformancemanagement_metadata': {'_TYPE': 'child'},
'qospolicer_metadata': {'_TYPE': 'child'},
'ikecertificate_metadata': {'_TYPE': 'child'},
'statisticscollector_metadata': {'_TYPE': 'child'},
'ssidconnection_metadata': {'_TYPE': 'child'},
'egressadvfwdtemplate_metadata': {'_TYPE': 'child'},
'usercontext_metadata': {'_TYPE': 'child'},
'l7applicationsignature_metadata': {'_TYPE': 'child'},
'nsgatewayssummary_metadata': {'_TYPE': 'child'},
'ospfinstance_metadata': {'_TYPE': 'child'},
'dhcpoption_metadata': {'_TYPE': 'child'},
'keyservermember_metadata': {'_TYPE': 'child'},
'nsgateway_metadata': {'_TYPE': 'child'},
'nsgatewaytemplate_metadata': {'_TYPE': 'child'},
'zonetemplate_metadata': {'_TYPE': 'child'},
'nsggroup_metadata': {'_TYPE': 'child'},
'site_metadata': {'_TYPE': 'child'},
'vsd_metadata': {'_TYPE': 'child'},
'alarm_metadata': {'_TYPE': 'child'},
'nsgroutingpolicybinding_metadata': {'_TYPE': 'child'},
'nsgatewayscount_metadata': {'_TYPE': 'child'},
'bootstrap_metadata': {'_TYPE': 'child'},
'bgpprofile_metadata': {'_TYPE': 'child'},
'egressqospolicy_metadata': {'_TYPE': 'child'},
'publicnetwork_metadata': {'_TYPE': 'child'},
'l2domaintemplate_metadata': {'_TYPE': 'child'},
'ingressqospolicy_metadata': {'_TYPE': 'child'},
'addressrange_metadata': {'_TYPE': 'child'},
'domaintemplate_metadata': {'_TYPE': 'child'},
'vm_metadata': {'_TYPE': 'child'},
'enterprisepermission_metadata': {'_TYPE': 'child'},
'egressdomainfloatingipaclentrytemplate_metadata': {'_TYPE': 'child'},
'resync_metadata': {'_TYPE': 'child'},
'gatewaysecurity_metadata': {'_TYPE': 'child'},
'policydecision_metadata': {'_TYPE': 'child'},
'applicationperformancemanagementbinding_metadata': {'_TYPE': 'child'},
'spatsourcespool_metadata': {'_TYPE': 'child'},
'floatingip_metadata': {'_TYPE': 'child'},
'egressacltemplate_metadata': {'_TYPE': 'child'},
'monitoringport_metadata': {'_TYPE': 'child'},
'monitorscope_metadata': {'_TYPE': 'child'},
'sapingressqosprofile_metadata': {'_TYPE': 'child'},
'port_metadata': {'_TYPE': 'child'},
'nsgredundancygroup_metadata': {'_TYPE': 'child'},
'gatewayslocation_metadata': {'_TYPE': 'child'},
'keyservermonitorsek_metadata': {'_TYPE': 'child'},
'cosremarkingpolicytable_metadata': {'_TYPE': 'child'},
'vportmirror_metadata': {'_TYPE': 'child'},
'patnatpool_metadata': {'_TYPE': 'child'},
'overlayaddresspool_metadata': {'_TYPE': 'child'},
'psnatpool_metadata': {'_TYPE': 'child'},
'bgpneighbor_metadata': {'_TYPE': 'child'},
'containerresync_metadata': {'_TYPE': 'child'},
'vnf_metadata': {'_TYPE': 'child'},
'allredundancygroup_metadata': {'_TYPE': 'child'},
'allalarm_metadata': {'_TYPE': 'child'},
'vlan_metadata': {'_TYPE': 'child'},
'wirelessport_metadata': {'_TYPE': 'child'},
'systemconfig_metadata': {'_TYPE': 'child'},
'ikeencryptionprofile_metadata': {'_TYPE': 'child'},
'policygrouptemplate_metadata': {'_TYPE': 'child'},
'vcentercluster_metadata': {'_TYPE': 'child'},
'user_metadata': {'_TYPE': 'child'},
'tier_metadata': {'_TYPE': 'child'},
'policygroup_metadata': {'_TYPE': 'child'},
'demarcationservice_metadata': {'_TYPE': 'child'},
'brconnection_metadata': {'_TYPE': 'child'},
'firewallrule_metadata': {'_TYPE': 'child'},
'nsport_metadata': {'_TYPE': 'child'},
'vrsaddressrange_metadata': {'_TYPE': 'child'},
'porttemplate_metadata': {'_TYPE': 'child'},
'vlantemplate_metadata': {'_TYPE': 'child'},
'uplinkconnection_metadata': {'_TYPE': 'child'},
'globalmetadata_metadata': {'_TYPE': 'child'},
'networkperformancemeasurement_metadata': {'_TYPE': 'child'},
'ospfarea_metadata': {'_TYPE': 'child'},
'ikepsk_metadata': {'_TYPE': 'child'},
'ctranslationmap_metadata': {'_TYPE': 'child'},
'link_metadata': {'_TYPE': 'child'},
'ingressacltemplate_metadata': {'_TYPE': 'child'},
'vnfmetadata_metadata': {'_TYPE': 'child'},
'vcenterhypervisor_metadata': {'_TYPE': 'child'},
'mirrordestination': {},
'monitoringport': {},
'vrs_monitoringport': {'_TYPE': 'child'},
'hsc_monitoringport': {'_TYPE': 'child'},
'vsc_monitoringport': {'_TYPE': 'child'},
'monitorscope': {},
'application_monitorscope': {'_TYPE': 'child'},
'networkperformancemeasurement_monitorscope': {'_TYPE': 'child'},
'multicastchannelmap': {},
'containerinterface_multicastchannelmap': {'_TYPE': 'child'},
'vminterface_multicastchannelmap': {'_TYPE': 'child'},
'hostinterface_multicastchannelmap': {'_TYPE': 'child'},
'multicastlist_multicastchannelmap': {'_TYPE': 'member'},
'multicastlist': {},
'enterpriseprofile_multicastlist': {'_TYPE': 'child'},
'enterprise_multicastlist': {'_TYPE': 'child'},
'multicastrange': {},
'multicastchannelmap_multicastrange': {'_TYPE': 'child'},
'multinicvport': {},
'vrs_multinicvport': {'_TYPE': 'child'},
'natmapentry': {},
'patnatpool_natmapentry': {'_TYPE': 'child'},
'netconfmanager': {},
'vsp_netconfmanager': {'_TYPE': 'child'},
'netconfprofile': {},
'enterprise_netconfprofile': {'_TYPE': 'child'},
'netconfsession': {},
'netconfmanager_netconfsession': {'_TYPE': 'child'},
'networklayout': {},
'networkmacrogroup': {},
'enterprisenetwork_networkmacrogroup': {'_TYPE': 'member'},
'enterprise_networkmacrogroup': {'_TYPE': 'child'},
'networkperformancebinding': {},
'networkperformancemeasurement_networkperformancebinding': {'_TYPE': 'member'},
'domain_networkperformancebinding': {'_TYPE': 'child'},
'l2domain_networkperformancebinding': {'_TYPE': 'child'},
'networkperformancemeasurement': {},
'enterprise_networkperformancemeasurement': {'_TYPE': 'child'},
'nexthop': {},
'link_nexthop': {'_TYPE': 'child'},
'nsgateway': {},
'ducgroup_nsgateway': {'_TYPE': 'member'},
'policyobjectgroup_nsgateway': {'_TYPE': 'member'},
'nsgredundancygroup_nsgateway': {'_TYPE': 'child'},
'nsggroup_nsgateway': {'_TYPE': 'member'},
'enterprise_nsgateway': {'_TYPE': 'child'},
'nsgatewayscount': {},
'enterprise_nsgatewayscount': {'_TYPE': 'child'},
'nsgatewayssummary': {},
'nsgateway_nsgatewayssummary': {'_TYPE': 'child'},
'domain_nsgatewayssummary': {'_TYPE': 'child'},
'l2domain_nsgatewayssummary': {'_TYPE': 'child'},
'enterprise_nsgatewayssummary': {'_TYPE': 'child'},
'nsgatewaytemplate': {},
'infrastructureaccessprofile_nsgatewaytemplate': {'_TYPE': 'child'},
'enterprise_nsgatewaytemplate': {'_TYPE': 'child'},
'nsggroup': {},
'enterprise_nsggroup': {'_TYPE': 'child'},
'nsginfo': {},
'nsgateway_nsginfo': {'_TYPE': 'child'},
'nsgpatchprofile': {},
'nsgredundancygroup': {},
'enterprise_nsgredundancygroup': {'_TYPE': 'child'},
'nsgroutingpolicybinding': {},
'domain_nsgroutingpolicybinding': {'_TYPE': 'child'},
'nsgupgradeprofile': {},
'nsport': {},
'nsgateway_nsport': {'_TYPE': 'child'},
'autodiscoveredgateway_nsport': {'_TYPE': 'child'},
'nsredundantport_nsport': {'_TYPE': 'child'},
'nsporttemplate': {},
'nsgatewaytemplate_nsporttemplate': {'_TYPE': 'child'},
'nsredundantport': {},
'nsgredundancygroup_nsredundantport': {'_TYPE': 'child'},
'ospfarea': {},
'ospfinstance_ospfarea': {'_TYPE': 'child'},
'ospfinstance': {},
'domain_ospfinstance': {'_TYPE': 'child'},
'ospfinterface': {},
'ospfarea_ospfinterface': {'_TYPE': 'child'},
'overlayaddresspool': {},
'link_overlayaddresspool': {'_TYPE': 'child'},
'overlaymirrordestination': {},
'l2domain_overlaymirrordestination': {'_TYPE': 'child'},
'overlaymirrordestinationtemplate': {},
'l2domaintemplate_overlaymirrordestinationtemplate': {'_TYPE': 'child'},
'overlaypatnatentry': {},
'overlayaddresspool_overlaypatnatentry': {'_TYPE': 'child'},
'patch': {},
'nsgateway_patch': {'_TYPE': 'child'},
'patipentry': {},
'subnet_patipentry': {'_TYPE': 'child'},
'sharednetworkresource_patipentry': {'_TYPE': 'child'},
'patmapper': {},
'patnatpool': {},
'nsgateway_patnatpool': {'_TYPE': 'member'},
'vlan_patnatpool': {'_TYPE': 'member'},
'gateway_patnatpool': {'_TYPE': 'member'},
'enterprise_patnatpool': {'_TYPE': 'child'},
'performancemonitor': {},
'ikegatewayconnection_performancemonitor': {'_TYPE': 'member'},
'enterprise_performancemonitor': {'_TYPE': 'child'},
'permission': {},
'nsport_permission': {'_TYPE': 'child'},
'domain_permission': {'_TYPE': 'child'},
'nsgredundancygroup_permission': {'_TYPE': 'child'},
'nsredundantport_permission': {'_TYPE': 'child'},
'service_permission': {'_TYPE': 'child'},
'l2domaintemplate_permission': {'_TYPE': 'child'},
'l2domain_permission': {'_TYPE': 'child'},
'vlan_permission': {'_TYPE': 'child'},
'domaintemplate_permission': {'_TYPE': 'child'},
'gateway_permission': {'_TYPE': 'child'},
'nsgateway_permission': {'_TYPE': 'child'},
'vsgredundantport_permission': {'_TYPE': 'child'},
'redundancygroup_permission': {'_TYPE': 'child'},
'zone_permission': {'_TYPE': 'child'},
'port_permission': {'_TYPE': 'child'},
'pgexpression': {},
'domain_pgexpression': {'_TYPE': 'child'},
'l2domain_pgexpression': {'_TYPE': 'child'},
'pgexpressiontemplate': {},
'l2domaintemplate_pgexpressiontemplate': {'_TYPE': 'child'},
'domaintemplate_pgexpressiontemplate': {'_TYPE': 'child'},
'policydecision': {},
'containerinterface_policydecision': {'_TYPE': 'child'},
'vminterface_policydecision': {'_TYPE': 'child'},
'hostinterface_policydecision': {'_TYPE': 'child'},
'bridgeinterface_policydecision': {'_TYPE': 'child'},
'policyentry': {},
'policystatement_policyentry': {'_TYPE': 'child'},
'policygroup': {},
'containerinterface_policygroup': {'_TYPE': 'child'},
'domain_policygroup': {'_TYPE': 'child'},
'l2domain_policygroup': {'_TYPE': 'child'},
'policygroupcategory_policygroup': {'_TYPE': 'member'},
'hostinterface_policygroup': {'_TYPE': 'child'},
'bridgeinterface_policygroup': {'_TYPE': 'child'},
'vminterface_policygroup': {'_TYPE': 'child'},
'vport_policygroup': {'_TYPE': 'member'},
'policygroupcategory': {},
'policygroup_policygroupcategory': {'_TYPE': 'member'},
'enterprise_policygroupcategory': {'_TYPE': 'child'},
'policygrouptemplate': {},
'l2domaintemplate_policygrouptemplate': {'_TYPE': 'child'},
'domaintemplate_policygrouptemplate': {'_TYPE': 'child'},
'policyobjectgroup': {},
'enterprise_policyobjectgroup': {'_TYPE': 'child'},
'policystatement': {},
'link_policystatement': {'_TYPE': 'child'},
'port': {},
'redundancygroup_port': {'_TYPE': 'child'},
'gateway_port': {'_TYPE': 'child'},
'autodiscoveredgateway_port': {'_TYPE': 'child'},
'portmapping': {},
'vport_portmapping': {'_TYPE': 'child'},
'porttemplate': {},
'gatewaytemplate_porttemplate': {'_TYPE': 'child'},
'proxyarpfilter': {},
'subnet_proxyarpfilter': {'_TYPE': 'child'},
'l2domain_proxyarpfilter': {'_TYPE': 'child'},
'psnatpool': {},
'link_psnatpool': {'_TYPE': 'child'},
'pspatmap': {},
'psnatpool_pspatmap': {'_TYPE': 'child'},
'ptranslationmap': {},
'psnatpool_ptranslationmap': {'_TYPE': 'child'},
'publicnetwork': {},
'enterprise_publicnetwork': {'_TYPE': 'child'},
'qos': {},
'subnet_qos': {'_TYPE': 'child'},
'domain_qos': {'_TYPE': 'child'},
'zone_qos': {'_TYPE': 'child'},
'l2domaintemplate_qos': {'_TYPE': 'child'},
'l2domain_qos': {'_TYPE': 'child'},
'domaintemplate_qos': {'_TYPE': 'child'},
'hostinterface_qos': {'_TYPE': 'child'},
'bridgeinterface_qos': {'_TYPE': 'child'},
'zonetemplate_qos': {'_TYPE': 'child'},
'subnettemplate_qos': {'_TYPE': 'child'},
'vport_qos': {'_TYPE': 'child'},
'policydecision_qos': {'_TYPE': 'child'},
'qospolicer': {},
'ratelimiter': {},
'enterprise_ratelimiter': {'_TYPE': 'child'},
'redirectiontarget': {},
'containerinterface_redirectiontarget': {'_TYPE': 'child'},
'domain_redirectiontarget': {'_TYPE': 'child'},
'l2domain_redirectiontarget': {'_TYPE': 'child'},
'hostinterface_redirectiontarget': {'_TYPE': 'child'},
'bridgeinterface_redirectiontarget': {'_TYPE': 'child'},
'vminterface_redirectiontarget': {'_TYPE': 'child'},
'vport_redirectiontarget': {'_TYPE': 'member'},
'redirectiontargettemplate': {},
'l2domaintemplate_redirectiontargettemplate': {'_TYPE': 'child'},
'domaintemplate_redirectiontargettemplate': {'_TYPE': 'child'},
'redundancygroup': {},
'l2domain_redundancygroup': {'_TYPE': 'child'},
'enterprise_redundancygroup': {'_TYPE': 'child'},
'resync': {},
'subnet_resync': {'_TYPE': 'child'},
'vm_resync': {'_TYPE': 'child'},
'routingpolicy': {},
'domain_routingpolicy': {'_TYPE': 'child'},
'enterprise_routingpolicy': {'_TYPE': 'child'},
'saasapplicationgroup': {},
'enterprise_saasapplicationgroup': {'_TYPE': 'child'},
'saasapplicationtype': {},
'enterprise_saasapplicationtype': {'_TYPE': 'child'},
'saasapplicationgroup_saasapplicationtype': {'_TYPE': 'member'},
'sapegressqosprofile': {},
'redundancygroup_sapegressqosprofile': {'_TYPE': 'child'},
'gateway_sapegressqosprofile': {'_TYPE': 'child'},
'sapingressqosprofile': {},
'redundancygroup_sapingressqosprofile': {'_TYPE': 'child'},
'gateway_sapingressqosprofile': {'_TYPE': 'child'},
'service': {},
'redundancygroup_service': {'_TYPE': 'child'},
'gateway_service': {'_TYPE': 'child'},
'autodiscoveredgateway_service': {'_TYPE': 'child'},
'sharednetworkresource': {},
'patmapper_sharednetworkresource': {'_TYPE': 'member'},
'enterprise_sharednetworkresource': {'_TYPE': 'child'},
'shuntlink': {},
'nsgredundancygroup_shuntlink': {'_TYPE': 'child'},
'site': {},
'spatsourcespool': {},
'domain_spatsourcespool': {'_TYPE': 'child'},
'sshkey': {},
'infrastructureaccessprofile_sshkey': {'_TYPE': 'child'},
'ssidconnection': {},
'wirelessport_ssidconnection': {'_TYPE': 'child'},
'staticroute': {},
'containerinterface_staticroute': {'_TYPE': 'child'},
'domain_staticroute': {'_TYPE': 'child'},
'sharednetworkresource_staticroute': {'_TYPE': 'child'},
'hostinterface_staticroute': {'_TYPE': 'child'},
'vminterface_staticroute': {'_TYPE': 'child'},
'l2domain_staticroute': {'_TYPE': 'child'},
'statistics': {},
'containerinterface_statistics': {'_TYPE': 'child'},
'ingressadvfwdentrytemplate_statistics': {'_TYPE': 'child'},
'domain_statistics': {'_TYPE': 'child'},
'zone_statistics': {'_TYPE': 'child'},
'subnet_statistics': {'_TYPE': 'child'},
'patnatpool_statistics': {'_TYPE': 'child'},
'l2domain_statistics': {'_TYPE': 'child'},
'vlan_statistics': {'_TYPE': 'child'},
'vrs_statistics': {'_TYPE': 'child'},
'egressaclentrytemplate_statistics': {'_TYPE': 'child'},
'hostinterface_statistics': {'_TYPE': 'child'},
'bridgeinterface_statistics': {'_TYPE': 'child'},
'vminterface_statistics': {'_TYPE': 'child'},
'vsc_statistics': {'_TYPE': 'child'},
'addressmap_statistics': {'_TYPE': 'child'},
'vport_statistics': {'_TYPE': 'child'},
'ingressaclentrytemplate_statistics': {'_TYPE': 'child'},
'nsport_statistics': {'_TYPE': 'child'},
'statisticscollector': {},
'statisticspolicy': {},
'subnet_statisticspolicy': {'_TYPE': 'child'},
'domain_statisticspolicy': {'_TYPE': 'child'},
'zone_statisticspolicy': {'_TYPE': 'child'},
'l2domain_statisticspolicy': {'_TYPE': 'child'},
'addressmap_statisticspolicy': {'_TYPE': 'child'},
'vport_statisticspolicy': {'_TYPE': 'child'},
'patnatpool_statisticspolicy': {'_TYPE': 'child'},
'nsport_statisticspolicy': {'_TYPE': 'child'},
'subnet': {},
'ikegatewayconnection_subnet': {'_TYPE': 'member'},
'domain_subnet': {'_TYPE': 'child'},
'zone_subnet': {'_TYPE': 'child'},
'nsgateway_subnet': {'_TYPE': 'child'},
'subnettemplate_subnet': {'_TYPE': 'child'},
'patmapper_subnet': {'_TYPE': 'child'},
'subnettemplate': {},
'domaintemplate_subnettemplate': {'_TYPE': 'child'},
'zonetemplate_subnettemplate': {'_TYPE': 'child'},
'systemconfig': {},
'tca': {},
'containerinterface_tca': {'_TYPE': 'child'},
'domain_tca': {'_TYPE': 'child'},
'zone_tca': {'_TYPE': 'child'},
'subnet_tca': {'_TYPE': 'child'},
'l2domain_tca': {'_TYPE': 'child'},
'hostinterface_tca': {'_TYPE': 'child'},
'bridgeinterface_tca': {'_TYPE': 'child'},
'vminterface_tca': {'_TYPE': 'child'},
'vport_tca': {'_TYPE': 'child'},
'tier': {},
'performancemonitor_tier': {'_TYPE': 'child'},
'trunk': {},
'vport_trunk': {'_TYPE': 'child'},
'enterprise_trunk': {'_TYPE': 'child'},
'underlay': {},
'uplinkconnection': {},
'nsgateway_uplinkconnection': {'_TYPE': 'child'},
'vlan_uplinkconnection': {'_TYPE': 'child'},
'vlantemplate_uplinkconnection': {'_TYPE': 'child'},
'uplinkroutedistinguisher': {},
'domain_uplinkroutedistinguisher': {'_TYPE': 'child'},
'l2domain_uplinkroutedistinguisher': {'_TYPE': 'child'},
'user': {},
'group_user': {'_TYPE': 'member'},
'enterprise_user': {'_TYPE': 'child'},
'usercontext': {},
'vcenter': {},
'vcentercluster': {},
'vcenterdatacenter_vcentercluster': {'_TYPE': 'child'},
'vcenterdatacenter': {},
'vcenter_vcenterdatacenter': {'_TYPE': 'child'},
'vcenterhypervisor': {},
'vcentercluster_vcenterhypervisor': {'_TYPE': 'child'},
'vcenterdatacenter_vcenterhypervisor': {'_TYPE': 'child'},
'virtualfirewallpolicy': {},
'l2domaintemplate_virtualfirewallpolicy': {'_TYPE': 'child'},
'domain_virtualfirewallpolicy': {'_TYPE': 'child'},
'domaintemplate_virtualfirewallpolicy': {'_TYPE': 'child'},
'l2domain_virtualfirewallpolicy': {'_TYPE': 'child'},
'virtualfirewallrule': {},
'domain_virtualfirewallrule': {'_TYPE': 'child'},
'l2domain_virtualfirewallrule': {'_TYPE': 'child'},
'virtualfirewallpolicy_virtualfirewallrule': {'_TYPE': 'child'},
'virtualip': {},
'subnet_virtualip': {'_TYPE': 'child'},
'vport_virtualip': {'_TYPE': 'child'},
'redirectiontarget_virtualip': {'_TYPE': 'child'},
'vlan': {},
'gatewayredundantport_vlan': {'_TYPE': 'child'},
'nsport_vlan': {'_TYPE': 'child'},
'nsredundantport_vlan': {'_TYPE': 'child'},
'vsgredundantport_vlan': {'_TYPE': 'child'},
'port_vlan': {'_TYPE': 'child'},
'vlantemplate': {},
'nsporttemplate_vlantemplate': {'_TYPE': 'child'},
'porttemplate_vlantemplate': {'_TYPE': 'child'},
'vm': {},
'subnet_vm': {'_TYPE': 'child'},
'domain_vm': {'_TYPE': 'child'},
'qos_vm': {'_TYPE': 'child'},
'zone_vm': {'_TYPE': 'child'},
'egressacltemplate_vm': {'_TYPE': 'child'},
'l2domain_vm': {'_TYPE': 'child'},
'vrs_vm': {'_TYPE': 'child'},
'user_vm': {'_TYPE': 'child'},
'enterprise_vm': {'_TYPE': 'child'},
'vport_vm': {'_TYPE': 'child'},
'ingressacltemplate_vm': {'_TYPE': 'child'},
'vminterface': {},
'subnet_vminterface': {'_TYPE': 'child'},
'domain_vminterface': {'_TYPE': 'child'},
'zone_vminterface': {'_TYPE': 'child'},
'l2domain_vminterface': {'_TYPE': 'child'},
'vm_vminterface': {'_TYPE': 'child'},
'vport_vminterface': {'_TYPE': 'child'},
'vnf': {},
'nsgateway_vnf': {'_TYPE': 'child'},
'enterprise_vnf': {'_TYPE': 'child'},
'vnfcatalog': {},
'vnfdescriptor': {},
'vnfcatalog_vnfdescriptor': {'_TYPE': 'child'},
'vnfdomainmapping': {},
'domain_vnfdomainmapping': {'_TYPE': 'child'},
'vnfinterface': {},
'vnf_vnfinterface': {'_TYPE': 'child'},
'vport_vnfinterface': {'_TYPE': 'child'},
'vnfinterfacedescriptor': {},
'vnfdescriptor_vnfinterfacedescriptor': {'_TYPE': 'child'},
'vnfmetadata': {},
'vnf_vnfmetadata': {'_TYPE': 'child'},
'enterprise_vnfmetadata': {'_TYPE': 'child'},
'vnfthresholdpolicy': {},
'vnf_vnfthresholdpolicy': {'_TYPE': 'child'},
'enterprise_vnfthresholdpolicy': {'_TYPE': 'child'},
'vpnconnection': {},
'sharednetworkresource_vpnconnection': {'_TYPE': 'child'},
'l2domain_vpnconnection': {'_TYPE': 'child'},
'domain_vpnconnection': {'_TYPE': 'child'},
'vport': {},
'vrs_vport': {'_TYPE': 'child'},
'subnet_vport': {'_TYPE': 'child'},
'domain_vport': {'_TYPE': 'child'},
'redirectiontarget_vport': {'_TYPE': 'member'},
'zone_vport': {'_TYPE': 'child'},
'floatingip_vport': {'_TYPE': 'child'},
'l2domain_vport': {'_TYPE': 'child'},
'overlaymirrordestination_vport': {'_TYPE': 'member'},
'ingressprofile_vport': {'_TYPE': 'child'},
'trunk_vport': {'_TYPE': 'child'},
'policygroup_vport': {'_TYPE': 'member'},
'egressprofile_vport': {'_TYPE': 'child'},
'multinicvport_vport': {'_TYPE': 'child'},
'vportmirror': {},
'vport_vportmirror': {'_TYPE': 'child'},
'mirrordestination_vportmirror': {'_TYPE': 'child'},
'vrs': {},
'hsc_vrs': {'_TYPE': 'child'},
'container_vrs': {'_TYPE': 'child'},
'vm_vrs': {'_TYPE': 'child'},
'vsc_vrs': {'_TYPE': 'child'},
'vport_vrs': {'_TYPE': 'child'},
'vrsaddressrange': {},
'vrsconfig_vrsaddressrange': {'_TYPE': 'child'},
'vcenter_vrsaddressrange': {'_TYPE': 'child'},
'vcentercluster_vrsaddressrange': {'_TYPE': 'child'},
'vcenterhypervisor_vrsaddressrange': {'_TYPE': 'child'},
'vcenterdatacenter_vrsaddressrange': {'_TYPE': 'child'},
'vrsconfig': {},
'vrsmetrics': {},
'vcenterhypervisor_vrsmetrics': {'_TYPE': 'child'},
'vrsredeploymentpolicy': {},
'vrsconfig_vrsredeploymentpolicy': {'_TYPE': 'child'},
'vcenter_vrsredeploymentpolicy': {'_TYPE': 'child'},
'vcentercluster_vrsredeploymentpolicy': {'_TYPE': 'child'},
'vcenterhypervisor_vrsredeploymentpolicy': {'_TYPE': 'child'},
'vcenterdatacenter_vrsredeploymentpolicy': {'_TYPE': 'child'},
'vsc': {},
'vsp_vsc': {'_TYPE': 'child'},
'vrs_vsc': {'_TYPE': 'child'},
'vsd': {},
'vsgredundantport': {},
'redundancygroup_vsgredundantport': {'_TYPE': 'child'},
'vsp': {},
'wirelessport': {},
'nsgateway_wirelessport': {'_TYPE': 'child'},
'autodiscoveredgateway_wirelessport': {'_TYPE': 'child'},
'zfbautoassignment': {},
'zfbrequest': {},
'enterprise_zfbrequest': {'_TYPE': 'child'},
'zone': {},
'domain_zone': {'_TYPE': 'child'},
'zonetemplate': {},
'domaintemplate_zonetemplate': {'_TYPE': 'child'},
}
INVARIANT_RESOURCES = [
'brconnections',
'cms',
'licensestatus',
'ltestatistics',
'qos',
'statistics',
'vrsmetrics'
]
def parse_config(config_file):
"""
Parses configuration file
"""
cfg = ConfigParser.ConfigParser()
cfg.read(config_file)
# Checking the LOG options
if not cfg.has_option('LOG', 'directory') or \
not cfg.has_option('LOG', 'file') or \
not cfg.has_option('LOG', 'level'):
print 'Missing options in the LOG section of configuration file {0:s}, please check the sample configuration'.format(config_file)
sys.exit(1)
return cfg
def configure_logging(level, path):
"""
Configures the logging environment
"""
logging.basicConfig(filename=path, format='%(asctime)s %(levelname)s %(message)s', level=level)
logger = logging.getLogger(__name__)
return logger
def init_base_entities():
"""
Sets up basic entities for use
"""
global NUAGE_API_DATA
csproot = vsdk.NUUser(
id=str(uuid.uuid1()),
user_name='csproot',
password='csproot',
first_name='csproot',
last_name='csproot',
email='csproot@CSP.com',
parent_type='ENTERPRISE'
)
csp = vsdk.NUEnterprise(
id=str(uuid.uuid1()),
name='CSP',
description='Enterprise that contains all the CSP users',
allowed_forwarding_classes=['E', 'F', 'G', 'H'],
allow_gateway_management=True,
allow_advanced_qos_configuration=True,
allow_trusted_forwarding_class=True,
bgp_enabled=True,
creation_date=1383734246000,
customer_id=10002,
dictionary_version=2,
enable_application_performance_management=False,
entity_scope='ENTERPRISE',
floating_ips_quota=0,
floating_ips_used=0,
ldap_authorization_enabled=False,
ldap_enabled=False,
last_updated_by=csproot.id,
last_updated_date=1499101329000
)
csproot.parent_id = csp.id
NUAGE_API_DATA['enterprise_user'][csp.id] = {csproot.id: csproot}
NUAGE_API_DATA['ROOT_UUIDS']['csp_enterprise'] = csp.id
NUAGE_API_DATA['enterprise'][csp.id] = csp
NUAGE_API_DATA['ROOT_UUIDS']['csproot_user'] = csproot.id
NUAGE_API_DATA['user'][csproot.id] = csproot
logging.info('Created base entities')
logging.debug(NUAGE_API_DATA)
def _string_clean(string):
rep = {
"IPID": "IpID",
"VCenter": "Vcenter",
"vCenter": "Vcenter",
"VPort": "Vport",
"IPv6": "Ipv6",
"IPv4": "Ipv4"
}
rep = dict((re.escape(k), v) for k, v in rep.items())
pattern = re.compile("|".join(list(rep.keys())))
return pattern.sub(lambda m: rep[re.escape(m.group(0))], string)
def get_idiomatic_name(name):
first_cap_re = re.compile("(.)([A-Z](?!s([A-Z])*)[a-z]+)")
all_cap_re = re.compile("([a-z0-9])([A-Z])")
s1 = first_cap_re.sub(r"\1_\2", _string_clean(name))
return all_cap_re.sub(r"\1_\2", s1).lower()
def get_singular_name(plural_name):
if plural_name in INVARIANT_RESOURCES:
return plural_name
if plural_name[-3:] == 'ies':
return plural_name[:-3] + 'y'
if plural_name[-1:] == 's':
return plural_name[:-1]
| 45.701375
| 137
| 0.647852
|
140c1fa38ee008f7735f2f13fe8ecc4837780c2e
| 416
|
py
|
Python
|
contact_form/helpers.py
|
joebos/django-cbv-contact-form
|
a4905de7081906af7bb1ceac24a9c3243e07e536
|
[
"BSD-3-Clause"
] | 1
|
2015-10-05T01:25:36.000Z
|
2015-10-05T01:25:36.000Z
|
contact_form/helpers.py
|
joebos/django-cbv-contact-form
|
a4905de7081906af7bb1ceac24a9c3243e07e536
|
[
"BSD-3-Clause"
] | null | null | null |
contact_form/helpers.py
|
joebos/django-cbv-contact-form
|
a4905de7081906af7bb1ceac24a9c3243e07e536
|
[
"BSD-3-Clause"
] | null | null | null |
try:
from ipware.ip import get_ip, get_real_ip
except ImportError:
raise 'django-cbv-contact-form application required django-ipware package'
def get_user_ip(request):
"""Return user ip
:param request: Django request object
:return: user ip
"""
ip = get_real_ip(request)
if ip is None:
ip = get_ip(request)
if ip is None:
ip = '127.0.0.1'
return ip
| 21.894737
| 78
| 0.637019
|
072f969dce494cdbabc660adcf5753fe41342c6f
| 2,019
|
py
|
Python
|
Individual.py
|
thanhdeku/MFLTGA
|
03517683d910f1d671db0eb6e352ecf47610cb1f
|
[
"Apache-2.0"
] | null | null | null |
Individual.py
|
thanhdeku/MFLTGA
|
03517683d910f1d671db0eb6e352ecf47610cb1f
|
[
"Apache-2.0"
] | null | null | null |
Individual.py
|
thanhdeku/MFLTGA
|
03517683d910f1d671db0eb6e352ecf47610cb1f
|
[
"Apache-2.0"
] | null | null | null |
import random
import sys
from Utils import random_Prufer
class Individual(object):
def __init__(self, genes, fitness, connector):
self.genes = genes
self.fitness = fitness
self.connector = connector
self.unchange = 0
def updateSkillFactor(self,skillFactor):
self.skillFactor = skillFactor
for i in range(len(self.fitness)):
if i is not skillFactor:
self.fitness[i] = sys.maxsize
def updateUnchange(self):
self.unchange +=1
def mutation(self, info, problems, lstDecodePos):
self.genes, self.connector = random_Prufer(info)
self.fitness = []
for j in range(len(problems)):
if j is self.skillFactor:
self.fitness.append(problems[j].computeFitness(self.genes, self.connector, lstDecodePos[j]))
else:
self.fitness.append(sys.maxsize)
self.unchange = 0
def getDivNumber(self,index,info):
n = 0
max = 0
for i in range(len(info)+1):
if i is not len(info):
max += info[i]-2
if index < max:
n = i
else:
n = len(info)
return n
def __cmp__(self, other):
if self.fitness > other.fitness:
return 1
elif self.fitness < other.fitness:
return -1
else:
return 0
def __str__(self):
'''
Converts the individual into a string representation useful for
displaying an individual.
'''
return "%s" % (str(self.fitness))
def __int__(self):
'''
Converts a binary individual's genes into a single integer. Useful
for uniqueness checking.
'''
return int("".join(map(str, self.genes)), 2)
def __hash__(self):
'''
Returns the value of ``__int__``, used when determinging sets of unique
individuals.
'''
return int(self)
| 25.556962
| 108
| 0.551758
|
c89e83a8f3f1a6209ee1139928ceeb2fbe41d6ff
| 573
|
py
|
Python
|
anondolok_library/admin_panel/migrations/0019_auto_20210808_1412.py
|
Horraira/anondolokLibrary
|
f80a9f1b620235136bec0794a67882f4cb420d50
|
[
"MIT"
] | null | null | null |
anondolok_library/admin_panel/migrations/0019_auto_20210808_1412.py
|
Horraira/anondolokLibrary
|
f80a9f1b620235136bec0794a67882f4cb420d50
|
[
"MIT"
] | null | null | null |
anondolok_library/admin_panel/migrations/0019_auto_20210808_1412.py
|
Horraira/anondolokLibrary
|
f80a9f1b620235136bec0794a67882f4cb420d50
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.1 on 2021-08-08 08:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('admin_panel', '0018_alter_wish_list_unique_together'),
]
operations = [
migrations.AddField(
model_name='books',
name='accession_number',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='books',
name='price',
field=models.BigIntegerField(null=True),
),
]
| 23.875
| 64
| 0.593368
|
1daea4367da6c0a19efe178a449bf1c2b0a9a1a4
| 395
|
py
|
Python
|
organizer/organizer/wsgi.py
|
balusio/task-organizer
|
23e294d9c4116412dcbc58d76cffdef1fbc0ed16
|
[
"MIT"
] | null | null | null |
organizer/organizer/wsgi.py
|
balusio/task-organizer
|
23e294d9c4116412dcbc58d76cffdef1fbc0ed16
|
[
"MIT"
] | null | null | null |
organizer/organizer/wsgi.py
|
balusio/task-organizer
|
23e294d9c4116412dcbc58d76cffdef1fbc0ed16
|
[
"MIT"
] | null | null | null |
"""
WSGI config for organizer project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'organizer.settings')
application = get_wsgi_application()
| 23.235294
| 78
| 0.787342
|
00888b38c6e9744d0cd4e9a60913138b132e33a1
| 10,528
|
py
|
Python
|
superset/dashboards/schemas.py
|
dylanli073/superset
|
171514360e8ba821b2343706e74c9a537b76047c
|
[
"Apache-2.0"
] | 2
|
2020-08-13T15:01:37.000Z
|
2021-04-08T20:27:09.000Z
|
superset/dashboards/schemas.py
|
dylanli073/superset
|
171514360e8ba821b2343706e74c9a537b76047c
|
[
"Apache-2.0"
] | 62
|
2020-05-06T22:51:53.000Z
|
2022-03-28T20:49:17.000Z
|
superset/dashboards/schemas.py
|
paulo-amaral/incubator-superset
|
5e64d65a8b8202b57b7a98275d4d50416211d49b
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import re
from typing import Any, Dict, Union
from marshmallow import fields, post_load, Schema
from marshmallow.validate import Length, ValidationError
from superset.exceptions import SupersetException
from superset.utils import core as utils
get_delete_ids_schema = {"type": "array", "items": {"type": "integer"}}
get_export_ids_schema = {"type": "array", "items": {"type": "integer"}}
get_fav_star_ids_schema = {"type": "array", "items": {"type": "integer"}}
thumbnail_query_schema = {
"type": "object",
"properties": {"force": {"type": "boolean"}},
}
dashboard_title_description = "A title for the dashboard."
slug_description = "Unique identifying part for the web address of the dashboard."
owners_description = (
"Owner are users ids allowed to delete or change this dashboard. "
"If left empty you will be one of the owners of the dashboard."
)
roles_description = (
"Roles is a list which defines access to the dashboard. "
"These roles are always applied in addition to restrictions on dataset "
"level access. "
"If no roles defined then the dashboard is available to all roles."
)
position_json_description = (
"This json object describes the positioning of the widgets "
"in the dashboard. It is dynamically generated when "
"adjusting the widgets size and positions by using "
"drag & drop in the dashboard view"
)
css_description = "Override CSS for the dashboard."
json_metadata_description = (
"This JSON object is generated dynamically when clicking "
"the save or overwrite button in the dashboard view. "
"It is exposed here for reference and for power users who may want to alter "
" specific parameters."
)
published_description = (
"Determines whether or not this dashboard is visible in "
"the list of all dashboards."
)
charts_description = (
"The names of the dashboard's charts. Names are used for legacy reasons."
)
openapi_spec_methods_override = {
"get": {"get": {"description": "Get a dashboard detail information."}},
"get_list": {
"get": {
"description": "Get a list of dashboards, use Rison or JSON query "
"parameters for filtering, sorting, pagination and "
" for selecting specific columns and metadata.",
}
},
"info": {
"get": {
"description": "Several metadata information about dashboard API "
"endpoints.",
}
},
"related": {
"get": {"description": "Get a list of all possible owners for a dashboard."}
},
}
def validate_json(value: Union[bytes, bytearray, str]) -> None:
try:
utils.validate_json(value)
except SupersetException:
raise ValidationError("JSON not valid")
def validate_json_metadata(value: Union[bytes, bytearray, str]) -> None:
if not value:
return
try:
value_obj = json.loads(value)
except json.decoder.JSONDecodeError:
raise ValidationError("JSON not valid")
errors = DashboardJSONMetadataSchema().validate(value_obj, partial=False)
if errors:
raise ValidationError(errors)
class DashboardJSONMetadataSchema(Schema):
show_native_filters = fields.Boolean()
# native_filter_configuration is for dashboard-native filters
native_filter_configuration = fields.List(fields.Dict(), allow_none=True)
# chart_configuration for now keeps data about cross-filter scoping for charts
chart_configuration = fields.Dict()
# filter_sets_configuration is for dashboard-native filters
filter_sets_configuration = fields.List(fields.Dict(), allow_none=True)
timed_refresh_immune_slices = fields.List(fields.Integer())
# deprecated wrt dashboard-native filters
filter_scopes = fields.Dict()
expanded_slices = fields.Dict()
refresh_frequency = fields.Integer()
# deprecated wrt dashboard-native filters
default_filters = fields.Str()
stagger_refresh = fields.Boolean()
stagger_time = fields.Integer()
color_scheme = fields.Str(allow_none=True)
label_colors = fields.Dict()
# used for v0 import/export
import_time = fields.Integer()
remote_id = fields.Integer()
class UserSchema(Schema):
id = fields.Int()
username = fields.String()
first_name = fields.String()
last_name = fields.String()
class RolesSchema(Schema):
id = fields.Int()
name = fields.String()
class DashboardGetResponseSchema(Schema):
id = fields.Int()
slug = fields.String()
url = fields.String()
dashboard_title = fields.String(description=dashboard_title_description)
thumbnail_url = fields.String()
published = fields.Boolean()
css = fields.String(description=css_description)
json_metadata = fields.String(description=json_metadata_description)
position_json = fields.String(description=position_json_description)
changed_by_name = fields.String()
changed_by_url = fields.String()
changed_by = fields.Nested(UserSchema)
changed_on = fields.DateTime()
charts = fields.List(fields.String(description=charts_description))
owners = fields.List(fields.Nested(UserSchema))
roles = fields.List(fields.Nested(RolesSchema))
changed_on_humanized = fields.String(data_key="changed_on_delta_humanized")
class DatabaseSchema(Schema):
id = fields.Int()
name = fields.String()
backend = fields.String()
allow_multi_schema_metadata_fetch = fields.Bool() # pylint: disable=invalid-name
allows_subquery = fields.Bool()
allows_cost_estimate = fields.Bool()
allows_virtual_table_explore = fields.Bool()
explore_database_id = fields.Int()
class DashboardDatasetSchema(Schema):
id = fields.Int()
uid = fields.Str()
column_formats = fields.Dict()
database = fields.Nested(DatabaseSchema)
default_endpoint = fields.String()
filter_select = fields.Bool()
filter_select_enabled = fields.Bool()
is_sqllab_view = fields.Bool()
name = fields.Str()
datasource_name = fields.Str()
table_name = fields.Str()
type = fields.Str()
schema = fields.Str()
offset = fields.Int()
cache_timeout = fields.Int()
params = fields.Str()
perm = fields.Str()
edit_url = fields.Str()
sql = fields.Str()
select_star = fields.Str()
main_dttm_col = fields.Str()
health_check_message = fields.Str()
fetch_values_predicate = fields.Str()
template_params = fields.Str()
owners = fields.List(fields.Int())
columns = fields.List(fields.Dict())
column_types = fields.List(fields.Int())
metrics = fields.List(fields.Dict())
order_by_choices = fields.List(fields.List(fields.Str()))
verbose_map = fields.Dict(fields.Str(), fields.Str())
time_grain_sqla = fields.List(fields.List(fields.Str()))
granularity_sqla = fields.List(fields.List(fields.Str()))
class BaseDashboardSchema(Schema):
# pylint: disable=no-self-use,unused-argument
@post_load
def post_load(self, data: Dict[str, Any], **kwargs: Any) -> Dict[str, Any]:
if data.get("slug"):
data["slug"] = data["slug"].strip()
data["slug"] = data["slug"].replace(" ", "-")
data["slug"] = re.sub(r"[^\w\-]+", "", data["slug"])
return data
# pylint: disable=no-self-use,unused-argument
class DashboardPostSchema(BaseDashboardSchema):
dashboard_title = fields.String(
description=dashboard_title_description,
allow_none=True,
validate=Length(0, 500),
)
slug = fields.String(
description=slug_description, allow_none=True, validate=[Length(1, 255)]
)
owners = fields.List(fields.Integer(description=owners_description))
roles = fields.List(fields.Integer(description=roles_description))
position_json = fields.String(
description=position_json_description, validate=validate_json
)
css = fields.String()
json_metadata = fields.String(
description=json_metadata_description, validate=validate_json_metadata,
)
published = fields.Boolean(description=published_description)
class DashboardPutSchema(BaseDashboardSchema):
dashboard_title = fields.String(
description=dashboard_title_description,
allow_none=True,
validate=Length(0, 500),
)
slug = fields.String(
description=slug_description, allow_none=True, validate=Length(0, 255)
)
owners = fields.List(
fields.Integer(description=owners_description, allow_none=True)
)
roles = fields.List(fields.Integer(description=roles_description, allow_none=True))
position_json = fields.String(
description=position_json_description, allow_none=True, validate=validate_json
)
css = fields.String(description=css_description, allow_none=True)
json_metadata = fields.String(
description=json_metadata_description,
allow_none=True,
validate=validate_json_metadata,
)
published = fields.Boolean(description=published_description, allow_none=True)
class ChartFavStarResponseResult(Schema):
id = fields.Integer(description="The Chart id")
value = fields.Boolean(description="The FaveStar value")
class GetFavStarIdsSchema(Schema):
result = fields.List(
fields.Nested(ChartFavStarResponseResult),
description="A list of results for each corresponding chart in the request",
)
class ImportV1DashboardSchema(Schema):
dashboard_title = fields.String(required=True)
description = fields.String(allow_none=True)
css = fields.String(allow_none=True)
slug = fields.String(allow_none=True)
uuid = fields.UUID(required=True)
position = fields.Dict()
metadata = fields.Dict()
version = fields.String(required=True)
| 36.303448
| 87
| 0.707067
|
dee7bc6218af7f5188683be4b140ff471267587d
| 722
|
py
|
Python
|
stubs/micropython-v1_13-266-esp32/urandom.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
stubs/micropython-v1_13-266-esp32/urandom.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
stubs/micropython-v1_13-266-esp32/urandom.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
"""
Module: 'urandom' on micropython-v1.13-266-esp32
"""
# MCU: {'ver': 'v1.13-266', 'port': 'esp32', 'arch': 'xtensawin', 'sysname': 'esp32', 'release': '1.13.0', 'name': 'micropython', 'mpy': 10757, 'version': '1.13.0', 'machine': 'ESP32 module (spiram) with ESP32', 'build': '266', 'nodename': 'esp32', 'platform': 'esp32', 'family': 'micropython'}
# Stubber: 1.5.4
from typing import Any
def choice(*args, **kwargs) -> Any:
...
def getrandbits(*args, **kwargs) -> Any:
...
def randint(*args, **kwargs) -> Any:
...
def random(*args, **kwargs) -> Any:
...
def randrange(*args, **kwargs) -> Any:
...
def seed(*args, **kwargs) -> Any:
...
def uniform(*args, **kwargs) -> Any:
...
| 20.628571
| 294
| 0.560942
|
dbbee67cae76d2f853ba4c18a1085ccc7acd864d
| 393
|
py
|
Python
|
LuoguCodes/P2293.py
|
Anguei/OI-Codes
|
0ef271e9af0619d4c236e314cd6d8708d356536a
|
[
"MIT"
] | null | null | null |
LuoguCodes/P2293.py
|
Anguei/OI-Codes
|
0ef271e9af0619d4c236e314cd6d8708d356536a
|
[
"MIT"
] | null | null | null |
LuoguCodes/P2293.py
|
Anguei/OI-Codes
|
0ef271e9af0619d4c236e314cd6d8708d356536a
|
[
"MIT"
] | null | null | null |
import math
a=int(input())
b=int(input())
l, r = 0, 1
while r ** a <= b:
#while math.pow(r, a) <= b:
l = r
r *= 2
while l + 1 < r:
mid = (l + r) // 2
if mid ** a <= b:
#if math.pow(mid, a) + 0.0000000001 <= b:
l = mid
else:
r = mid
if l ** a <= b:
#if math.pow(l, a) <= b:
print(l)
else:
print(r)
#print(math.floor(math.pow(b,1/a)+0.000000001))
| 17.863636
| 47
| 0.468193
|
6d2c4f5587de860a7b8ebc00b2f002f99e85b2c3
| 63,748
|
py
|
Python
|
Lib/subprocess.py
|
odeke-em/cpython
|
9ceea101c15b499824dc82dbcc5aa5c676898881
|
[
"PSF-2.0"
] | null | null | null |
Lib/subprocess.py
|
odeke-em/cpython
|
9ceea101c15b499824dc82dbcc5aa5c676898881
|
[
"PSF-2.0"
] | null | null | null |
Lib/subprocess.py
|
odeke-em/cpython
|
9ceea101c15b499824dc82dbcc5aa5c676898881
|
[
"PSF-2.0"
] | null | null | null |
# subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several older modules and functions:
os.system
os.spawn*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=-1, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=True, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False, pass_fds=()):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On POSIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On POSIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize will be supplied as the corresponding argument to the io.open()
function when creating the stdin/stdout/stderr pipe file objects:
0 means unbuffered (read & write are one system call and can return short),
1 means line buffered, any other positive value means use a buffer of
approximately that size. A negative bufsize, the default, means the system
default of io.DEFAULT_BUFFER_SIZE will be used.
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
On POSIX, if preexec_fn is set to a callable object, this object will be
called in the child process just before the child is executed. The use
of preexec_fn is not thread safe, using it in the presence of threads
could lead to a deadlock in the child process before the new executable
is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed. The default for close_fds
varies by platform: Always true on POSIX. True when stdin/stdout/stderr
are None on Windows, false otherwise.
pass_fds is an optional sequence of file descriptors to keep open between the
parent and child. Providing any pass_fds implicitly sets close_fds to true.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
On POSIX, if restore_signals is True all signals that Python sets to
SIG_IGN are restored to SIG_DFL in the child process before the exec.
Currently this includes the SIGPIPE, SIGXFZ and SIGXFSZ signals. This
parameter does nothing on Windows.
On POSIX, if start_new_session is True, the setsid() system call will be made
in the child process prior to executing the command.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is False, the file objects stdin, stdout and stderr
are opened as binary files, and no line ending conversion is done.
If universal_newlines is True, the file objects stdout and stderr are
opened as a text file, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the old Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Also, the newlines attribute
of the file objects stdout, stdin and stderr are not updated by the
communicate() method.
In either case, the process being communicated with should start up
expecting to receive bytes on its standard input and decode them with
the same encoding they are sent in.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines some shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> retcode = subprocess.call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> subprocess.check_call(["ls", "-l"])
0
getstatusoutput(cmd):
Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with 'check_output' and
return a 2-tuple (status, output). Universal newlines mode is used,
meaning that the result with be decoded to a string.
A trailing newline is stripped from the output.
The exit status for the command can be interpreted
according to the rules for the function 'wait'. Example:
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
getoutput(cmd):
Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
check_output(*popenargs, **kwargs):
Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> output = subprocess.check_output(["ls", "-l", "/dev/null"])
There is an additional optional argument, "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument.
If universal_newlines is set to True, the "input" argument must
be a string rather than bytes, and the return value will be a string.
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the child's point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
Exceptions defined within this module inherit from SubprocessError.
check_call() and check_output() will raise CalledProcessError if the
called process returns a non-zero return code. TimeoutExpired
be raised if a timeout was specified and expired.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional input argument should be data to be
sent to the child process, or None, if no data should be sent to
the child. If the Popen instance was constructed with universal_newlines
set to True, the input argument should be a string and will be encoded
using the preferred system encoding (see locale.getpreferredencoding);
if universal_newlines is False, the input argument should be a
byte string.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (POSIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
else:
print("Child returned", retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
"""
import sys
mswindows = (sys.platform == "win32")
import io
import os
import time
import signal
import builtins
import warnings
import errno
from time import monotonic as _time
# Exception classes used by this module.
class SubprocessError(Exception): pass
class CalledProcessError(SubprocessError):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
class TimeoutExpired(SubprocessError):
"""This exception is raised when the timeout expires while waiting for a
child process.
"""
def __init__(self, cmd, timeout, output=None):
self.cmd = cmd
self.timeout = timeout
self.output = output
def __str__(self):
return ("Command '%s' timed out after %s seconds" %
(self.cmd, self.timeout))
if mswindows:
import threading
import msvcrt
import _winapi
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
else:
import _posixsubprocess
import select
import selectors
try:
import threading
except ImportError:
import dummy_threading as threading
# When select or poll has indicated that the file is writable,
# we can write up to _PIPE_BUF bytes without risk of blocking.
# POSIX defines PIPE_BUF as >= 512.
_PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
# poll/select have the advantage of not requiring any extra file
# descriptor, contrarily to epoll/kqueue (also, they require a single
# syscall).
if hasattr(selectors, 'PollSelector'):
_PopenSelector = selectors.PollSelector
else:
_PopenSelector = selectors.SelectSelector
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput",
"getoutput", "check_output", "CalledProcessError", "DEVNULL"]
if mswindows:
from _winapi import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP,
STD_INPUT_HANDLE, STD_OUTPUT_HANDLE,
STD_ERROR_HANDLE, SW_HIDE,
STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW)
__all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP",
"STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE",
"STD_ERROR_HANDLE", "SW_HIDE",
"STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"])
class Handle(int):
closed = False
def Close(self, CloseHandle=_winapi.CloseHandle):
if not self.closed:
self.closed = True
CloseHandle(self)
def Detach(self):
if not self.closed:
self.closed = True
return int(self)
raise ValueError("already closed")
def __repr__(self):
return "%s(%d)" % (self.__class__.__name__, int(self))
__del__ = Close
__str__ = __repr__
# This lists holds Popen instances for which the underlying process had not
# exited at the time its __del__ method got called: those processes are wait()ed
# for synchronously from _cleanup() when a new Popen object is created, to avoid
# zombie processes.
_active = []
def _cleanup():
for inst in _active[:]:
res = inst._internal_poll(_deadstate=sys.maxsize)
if res is not None:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
DEVNULL = -3
# XXX This function is only used by multiprocessing and the test suite,
# but it's here so that it can be imported when Python is compiled without
# threads.
def _args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
flag_opt_map = {
'debug': 'd',
# 'inspect': 'i',
# 'interactive': 'i',
'optimize': 'O',
'dont_write_bytecode': 'B',
'no_user_site': 's',
'no_site': 'S',
'ignore_environment': 'E',
'verbose': 'v',
'bytes_warning': 'b',
'quiet': 'q',
'hash_randomization': 'R',
}
args = []
for flag, opt in flag_opt_map.items():
v = getattr(sys.flags, flag)
if v > 0:
if flag == 'hash_randomization':
v = 1 # Handle specification of an exact seed
args.append('-' + opt * v)
for opt in sys.warnoptions:
args.append('-W' + opt)
return args
def call(*popenargs, timeout=None, **kwargs):
"""Run command with arguments. Wait for command to complete or
timeout, then return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
with Popen(*popenargs, **kwargs) as p:
try:
return p.wait(timeout=timeout)
except:
p.kill()
p.wait()
raise
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the call function. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, timeout=None, **kwargs):
r"""Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
b'ls: non_existent_file: No such file or directory\n'
There is an additional optional argument, "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it too will be used internally. Example:
>>> check_output(["sed", "-e", "s/foo/bar/"],
... input=b"when in the course of fooman events\n")
b'when in the course of barman events\n'
If universal_newlines=True is passed, the "input" argument must be a
string and the return value will be a string rather than bytes.
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if 'input' in kwargs:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
inputdata = kwargs['input']
del kwargs['input']
kwargs['stdin'] = PIPE
else:
inputdata = None
with Popen(*popenargs, stdout=PIPE, **kwargs) as process:
try:
output, unused_err = process.communicate(inputdata, timeout=timeout)
except TimeoutExpired:
process.kill()
output, unused_err = process.communicate()
raise TimeoutExpired(process.args, timeout, output=output)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if retcode:
raise CalledProcessError(retcode, process.args, output=output)
return output
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
# Various tools for executing commands and looking at their output and status.
#
def getstatusoutput(cmd):
""" Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with 'check_output' and
return a 2-tuple (status, output). Universal newlines mode is used,
meaning that the result with be decoded to a string.
A trailing newline is stripped from the output.
The exit status for the command can be interpreted
according to the rules for the function 'wait'. Example:
>>> import subprocess
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
"""
try:
data = check_output(cmd, shell=True, universal_newlines=True, stderr=STDOUT)
status = 0
except CalledProcessError as ex:
data = ex.output
status = ex.returncode
if data[-1:] == '\n':
data = data[:-1]
return status, data
def getoutput(cmd):
"""Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> import subprocess
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
"""
return getstatusoutput(cmd)[1]
_PLATFORM_DEFAULT_CLOSE_FDS = object()
class Popen(object):
_child_created = False # Set here since __del__ checks it
def __init__(self, args, bufsize=-1, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=_PLATFORM_DEFAULT_CLOSE_FDS,
shell=False, cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False,
pass_fds=()):
"""Create new Popen instance."""
_cleanup()
# Held while anything is calling waitpid before returncode has been
# updated to prevent clobbering returncode if wait() or poll() are
# called from multiple threads at once. After acquiring the lock,
# code must re-check self.returncode to see if another thread just
# finished a waitpid() call.
self._waitpid_lock = threading.Lock()
self._input = None
self._communication_started = False
if bufsize is None:
bufsize = -1 # Restore default
if not isinstance(bufsize, int):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
any_stdio_set = (stdin is not None or stdout is not None or
stderr is not None)
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
if any_stdio_set:
close_fds = False
else:
close_fds = True
elif close_fds and any_stdio_set:
raise ValueError(
"close_fds is not supported on Windows platforms"
" if you redirect stdin/stdout/stderr")
else:
# POSIX
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
close_fds = True
if pass_fds and not close_fds:
warnings.warn("pass_fds overriding close_fds.", RuntimeWarning)
close_fds = True
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.args = args
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are -1 when not using PIPEs. The child objects are -1
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
# We wrap OS handles *before* launching the child, otherwise a
# quickly terminating child could make our fds unwrappable
# (see #8458).
if mswindows:
if p2cwrite != -1:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread != -1:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread != -1:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite != -1:
self.stdin = io.open(p2cwrite, 'wb', bufsize)
if universal_newlines:
self.stdin = io.TextIOWrapper(self.stdin, write_through=True,
line_buffering=(bufsize == 1))
if c2pread != -1:
self.stdout = io.open(c2pread, 'rb', bufsize)
if universal_newlines:
self.stdout = io.TextIOWrapper(self.stdout)
if errread != -1:
self.stderr = io.open(errread, 'rb', bufsize)
if universal_newlines:
self.stderr = io.TextIOWrapper(self.stderr)
self._closed_child_pipe_fds = False
try:
self._execute_child(args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session)
except:
# Cleanup if the child failed starting.
for f in filter(None, (self.stdin, self.stdout, self.stderr)):
try:
f.close()
except OSError:
pass # Ignore EBADF or other errors.
if not self._closed_child_pipe_fds:
to_close = []
if stdin == PIPE:
to_close.append(p2cread)
if stdout == PIPE:
to_close.append(c2pwrite)
if stderr == PIPE:
to_close.append(errwrite)
if hasattr(self, '_devnull'):
to_close.append(self._devnull)
for fd in to_close:
try:
os.close(fd)
except OSError:
pass
raise
def _translate_newlines(self, data, encoding):
data = data.decode(encoding)
return data.replace("\r\n", "\n").replace("\r", "\n")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
try: # Flushing a BufferedWriter may raise an error
if self.stdin:
self.stdin.close()
finally:
# Wait for the process to terminate, to avoid zombies.
self.wait()
def __del__(self, _maxsize=sys.maxsize):
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self._internal_poll(_deadstate=_maxsize)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def _get_devnull(self):
if not hasattr(self, '_devnull'):
self._devnull = os.open(os.devnull, os.O_RDWR)
return self._devnull
def _stdin_write(self, input):
if input:
try:
self.stdin.write(input)
except BrokenPipeError:
# communicate() must ignore broken pipe error
pass
except OSError as e:
if e.errno == errno.EINVAL and self.poll() is not None:
# Issue #19612: On Windows, stdin.write() fails with EINVAL
# if the process already exited before the write
pass
else:
raise
self.stdin.close()
def communicate(self, input=None, timeout=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate.
The optional "input" argument should be data to be sent to the
child process (if self.universal_newlines is True, this should
be a string; if it is False, "input" should be bytes), or
None, if no data should be sent to the child.
communicate() returns a tuple (stdout, stderr). These will be
bytes or, if self.universal_newlines was True, a string.
"""
if self._communication_started and input:
raise ValueError("Cannot send input after starting communication")
# Optimization: If we are not worried about timeouts, we haven't
# started communicating, and we have one or zero pipes, using select()
# or threads is unnecessary.
if (timeout is None and not self._communication_started and
[self.stdin, self.stdout, self.stderr].count(None) >= 2):
stdout = None
stderr = None
if self.stdin:
self._stdin_write(input)
elif self.stdout:
stdout = self.stdout.read()
self.stdout.close()
elif self.stderr:
stderr = self.stderr.read()
self.stderr.close()
self.wait()
else:
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
try:
stdout, stderr = self._communicate(input, endtime, timeout)
finally:
self._communication_started = True
sts = self.wait(timeout=self._remaining_time(endtime))
return (stdout, stderr)
def poll(self):
return self._internal_poll()
def _remaining_time(self, endtime):
"""Convenience for _communicate when computing timeouts."""
if endtime is None:
return None
else:
return endtime - _time()
def _check_timeout(self, endtime, orig_timeout):
"""Convenience for checking if a timeout has expired."""
if endtime is None:
return
if _time() > endtime:
raise TimeoutExpired(self.args, orig_timeout)
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (-1, -1, -1, -1, -1, -1)
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _winapi.CreatePipe(None, 0)
p2cread = Handle(p2cread)
_winapi.CloseHandle(_)
elif stdin == PIPE:
p2cread, p2cwrite = _winapi.CreatePipe(None, 0)
p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite)
elif stdin == DEVNULL:
p2cread = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _winapi.CreatePipe(None, 0)
c2pwrite = Handle(c2pwrite)
_winapi.CloseHandle(_)
elif stdout == PIPE:
c2pread, c2pwrite = _winapi.CreatePipe(None, 0)
c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite)
elif stdout == DEVNULL:
c2pwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _winapi.CreatePipe(None, 0)
errwrite = Handle(errwrite)
_winapi.CloseHandle(_)
elif stderr == PIPE:
errread, errwrite = _winapi.CreatePipe(None, 0)
errread, errwrite = Handle(errread), Handle(errwrite)
elif stderr == STDOUT:
errwrite = c2pwrite
elif stderr == DEVNULL:
errwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
h = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(), handle,
_winapi.GetCurrentProcess(), 0, 1,
_winapi.DUPLICATE_SAME_ACCESS)
return Handle(h)
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
unused_restore_signals, unused_start_new_session):
"""Execute program (MS Windows version)"""
assert not pass_fds, "pass_fds not supported on Windows."
if not isinstance(args, str):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if -1 not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= _winapi.STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _winapi.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format (comspec, args)
# Start the process
try:
hp, ht, pid, tid = _winapi.CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
finally:
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread != -1:
p2cread.Close()
if c2pwrite != -1:
c2pwrite.Close()
if errwrite != -1:
errwrite.Close()
if hasattr(self, '_devnull'):
os.close(self._devnull)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = Handle(hp)
self.pid = pid
_winapi.CloseHandle(ht)
def _internal_poll(self, _deadstate=None,
_WaitForSingleObject=_winapi.WaitForSingleObject,
_WAIT_OBJECT_0=_winapi.WAIT_OBJECT_0,
_GetExitCodeProcess=_winapi.GetExitCodeProcess):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it can only refer to objects
in its local scope.
"""
if self.returncode is None:
if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
self.returncode = _GetExitCodeProcess(self._handle)
return self.returncode
def wait(self, timeout=None, endtime=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if endtime is not None:
timeout = self._remaining_time(endtime)
if timeout is None:
timeout_millis = _winapi.INFINITE
else:
timeout_millis = int(timeout * 1000)
if self.returncode is None:
result = _winapi.WaitForSingleObject(self._handle,
timeout_millis)
if result == _winapi.WAIT_TIMEOUT:
raise TimeoutExpired(self.args, timeout)
self.returncode = _winapi.GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
fh.close()
def _communicate(self, input, endtime, orig_timeout):
# Start reader threads feeding into a list hanging off of this
# object, unless they've already been started.
if self.stdout and not hasattr(self, "_stdout_buff"):
self._stdout_buff = []
self.stdout_thread = \
threading.Thread(target=self._readerthread,
args=(self.stdout, self._stdout_buff))
self.stdout_thread.daemon = True
self.stdout_thread.start()
if self.stderr and not hasattr(self, "_stderr_buff"):
self._stderr_buff = []
self.stderr_thread = \
threading.Thread(target=self._readerthread,
args=(self.stderr, self._stderr_buff))
self.stderr_thread.daemon = True
self.stderr_thread.start()
if self.stdin:
self._stdin_write(input)
# Wait for the reader threads, or time out. If we time out, the
# threads remain reading and the fds left open in case the user
# calls communicate again.
if self.stdout is not None:
self.stdout_thread.join(self._remaining_time(endtime))
if self.stdout_thread.is_alive():
raise TimeoutExpired(self.args, orig_timeout)
if self.stderr is not None:
self.stderr_thread.join(self._remaining_time(endtime))
if self.stderr_thread.is_alive():
raise TimeoutExpired(self.args, orig_timeout)
# Collect the output from and close both pipes, now that we know
# both have been read successfully.
stdout = None
stderr = None
if self.stdout:
stdout = self._stdout_buff
self.stdout.close()
if self.stderr:
stderr = self._stderr_buff
self.stderr.close()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self):
"""Terminates the process
"""
try:
_winapi.TerminateProcess(self._handle, 1)
except PermissionError:
# ERROR_ACCESS_DENIED (winerror 5) is received when the
# process already died.
rc = _winapi.GetExitCodeProcess(self._handle)
if rc == _winapi.STILL_ACTIVE:
raise
self.returncode = rc
kill = terminate
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif stdin == DEVNULL:
p2cread = self._get_devnull()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif stdout == DEVNULL:
c2pwrite = self._get_devnull()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif stderr == DEVNULL:
errwrite = self._get_devnull()
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session):
"""Execute program (POSIX version)"""
if isinstance(args, (str, bytes)):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable:
args[0] = executable
if executable is None:
executable = args[0]
orig_executable = executable
# For transferring possible exec failure from child to parent.
# Data format: "exception name:hex errno:description"
# Pickle is not used; it is complex and involves memory allocation.
errpipe_read, errpipe_write = os.pipe()
# errpipe_write must not be in the standard io 0, 1, or 2 fd range.
low_fds_to_close = []
while errpipe_write < 3:
low_fds_to_close.append(errpipe_write)
errpipe_write = os.dup(errpipe_write)
for low_fd in low_fds_to_close:
os.close(low_fd)
try:
try:
# We must avoid complex work that could involve
# malloc or free in the child process to avoid
# potential deadlocks, thus we do all this here.
# and pass it to fork_exec()
if env is not None:
env_list = [os.fsencode(k) + b'=' + os.fsencode(v)
for k, v in env.items()]
else:
env_list = None # Use execv instead of execve.
executable = os.fsencode(executable)
if os.path.dirname(executable):
executable_list = (executable,)
else:
# This matches the behavior of os._execvpe().
executable_list = tuple(
os.path.join(os.fsencode(dir), executable)
for dir in os.get_exec_path(env))
fds_to_keep = set(pass_fds)
fds_to_keep.add(errpipe_write)
self.pid = _posixsubprocess.fork_exec(
args, executable_list,
close_fds, sorted(fds_to_keep), cwd, env_list,
p2cread, p2cwrite, c2pread, c2pwrite,
errread, errwrite,
errpipe_read, errpipe_write,
restore_signals, start_new_session, preexec_fn)
self._child_created = True
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
# self._devnull is not always defined.
devnull_fd = getattr(self, '_devnull', None)
if p2cread != -1 and p2cwrite != -1 and p2cread != devnull_fd:
os.close(p2cread)
if c2pwrite != -1 and c2pread != -1 and c2pwrite != devnull_fd:
os.close(c2pwrite)
if errwrite != -1 and errread != -1 and errwrite != devnull_fd:
os.close(errwrite)
if devnull_fd is not None:
os.close(devnull_fd)
# Prevent a double close of these fds from __init__ on error.
self._closed_child_pipe_fds = True
# Wait for exec to fail or succeed; possibly raising an
# exception (limited in size)
errpipe_data = bytearray()
while True:
part = os.read(errpipe_read, 50000)
errpipe_data += part
if not part or len(errpipe_data) > 50000:
break
finally:
# be sure the FD is closed no matter what
os.close(errpipe_read)
if errpipe_data:
try:
os.waitpid(self.pid, 0)
except ChildProcessError:
pass
try:
exception_name, hex_errno, err_msg = (
errpipe_data.split(b':', 2))
except ValueError:
exception_name = b'SubprocessError'
hex_errno = b'0'
err_msg = (b'Bad exception data from child: ' +
repr(errpipe_data))
child_exception_type = getattr(
builtins, exception_name.decode('ascii'),
SubprocessError)
err_msg = err_msg.decode(errors="surrogatepass")
if issubclass(child_exception_type, OSError) and hex_errno:
errno_num = int(hex_errno, 16)
child_exec_never_called = (err_msg == "noexec")
if child_exec_never_called:
err_msg = ""
if errno_num != 0:
err_msg = os.strerror(errno_num)
if errno_num == errno.ENOENT:
if child_exec_never_called:
# The error must be from chdir(cwd).
err_msg += ': ' + repr(cwd)
else:
err_msg += ': ' + repr(orig_executable)
raise child_exception_type(errno_num, err_msg)
raise child_exception_type(err_msg)
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS):
"""All callers to this function MUST hold self._waitpid_lock."""
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope.
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
else:
# Should never happen
raise SubprocessError("Unknown child exit status!")
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
_WNOHANG=os.WNOHANG, _ECHILD=errno.ECHILD):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it cannot reference anything
outside of the local scope (nor can any methods it calls).
"""
if self.returncode is None:
if not self._waitpid_lock.acquire(False):
# Something else is busy calling waitpid. Don't allow two
# at once. We know nothing yet.
return None
try:
if self.returncode is not None:
return self.returncode # Another thread waited.
pid, sts = _waitpid(self.pid, _WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except OSError as e:
if _deadstate is not None:
self.returncode = _deadstate
elif e.errno == _ECHILD:
# This happens if SIGCLD is set to be ignored or
# waiting for child processes has otherwise been
# disabled for our process. This child is dead, we
# can't get the status.
# http://bugs.python.org/issue15756
self.returncode = 0
finally:
self._waitpid_lock.release()
return self.returncode
def _try_wait(self, wait_flags):
"""All callers to this function MUST hold self._waitpid_lock."""
try:
(pid, sts) = os.waitpid(self.pid, wait_flags)
except ChildProcessError:
# This happens if SIGCLD is set to be ignored or waiting
# for child processes has otherwise been disabled for our
# process. This child is dead, we can't get the status.
pid = self.pid
sts = 0
return (pid, sts)
def wait(self, timeout=None, endtime=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is not None:
return self.returncode
# endtime is preferred to timeout. timeout is only used for
# printing.
if endtime is not None or timeout is not None:
if endtime is None:
endtime = _time() + timeout
elif timeout is None:
timeout = self._remaining_time(endtime)
if endtime is not None:
# Enter a busy loop if we have a timeout. This busy loop was
# cribbed from Lib/threading.py in Thread.wait() at r71065.
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
if self._waitpid_lock.acquire(False):
try:
if self.returncode is not None:
break # Another thread waited.
(pid, sts) = self._try_wait(os.WNOHANG)
assert pid == self.pid or pid == 0
if pid == self.pid:
self._handle_exitstatus(sts)
break
finally:
self._waitpid_lock.release()
remaining = self._remaining_time(endtime)
if remaining <= 0:
raise TimeoutExpired(self.args, timeout)
delay = min(delay * 2, remaining, .05)
time.sleep(delay)
else:
while self.returncode is None:
with self._waitpid_lock:
if self.returncode is not None:
break # Another thread waited.
(pid, sts) = self._try_wait(0)
# Check the pid and loop as waitpid has been known to
# return 0 even without WNOHANG in odd situations.
# http://bugs.python.org/issue14396.
if pid == self.pid:
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input, endtime, orig_timeout):
if self.stdin and not self._communication_started:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if not input:
self.stdin.close()
stdout = None
stderr = None
# Only create this mapping if we haven't already.
if not self._communication_started:
self._fileobj2output = {}
if self.stdout:
self._fileobj2output[self.stdout] = []
if self.stderr:
self._fileobj2output[self.stderr] = []
if self.stdout:
stdout = self._fileobj2output[self.stdout]
if self.stderr:
stderr = self._fileobj2output[self.stderr]
self._save_input(input)
if self._input:
input_view = memoryview(self._input)
with _PopenSelector() as selector:
if self.stdin and input:
selector.register(self.stdin, selectors.EVENT_WRITE)
if self.stdout:
selector.register(self.stdout, selectors.EVENT_READ)
if self.stderr:
selector.register(self.stderr, selectors.EVENT_READ)
while selector.get_map():
timeout = self._remaining_time(endtime)
if timeout is not None and timeout < 0:
raise TimeoutExpired(self.args, orig_timeout)
ready = selector.select(timeout)
self._check_timeout(endtime, orig_timeout)
# XXX Rewrite these to use non-blocking I/O on the file
# objects; they are no longer using C stdio!
for key, events in ready:
if key.fileobj is self.stdin:
chunk = input_view[self._input_offset :
self._input_offset + _PIPE_BUF]
try:
self._input_offset += os.write(key.fd, chunk)
except BrokenPipeError:
selector.unregister(key.fileobj)
key.fileobj.close()
else:
if self._input_offset >= len(self._input):
selector.unregister(key.fileobj)
key.fileobj.close()
elif key.fileobj in (self.stdout, self.stderr):
data = os.read(key.fd, 32768)
if not data:
selector.unregister(key.fileobj)
key.fileobj.close()
self._fileobj2output[key.fileobj].append(data)
self.wait(timeout=self._remaining_time(endtime))
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = b''.join(stdout)
if stderr is not None:
stderr = b''.join(stderr)
# Translate newlines, if requested.
# This also turns bytes into strings.
if self.universal_newlines:
if stdout is not None:
stdout = self._translate_newlines(stdout,
self.stdout.encoding)
if stderr is not None:
stderr = self._translate_newlines(stderr,
self.stderr.encoding)
return (stdout, stderr)
def _save_input(self, input):
# This method is called from the _communicate_with_*() methods
# so that if we time out while communicating, we can continue
# sending input if we retry.
if self.stdin and self._input is None:
self._input_offset = 0
self._input = input
if self.universal_newlines and input is not None:
self._input = self._input.encode(self.stdin.encoding)
def send_signal(self, sig):
"""Send a signal to the process
"""
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
| 38.058507
| 92
| 0.575265
|
a47045e09d9ec5a2138981d63e5a6287e6b49cee
| 3,825
|
py
|
Python
|
python/cuxfilter/dataframe.py
|
sean-frye/cuxfilter
|
e7291b819b01907da142f585112da66f7231d888
|
[
"Apache-2.0"
] | null | null | null |
python/cuxfilter/dataframe.py
|
sean-frye/cuxfilter
|
e7291b819b01907da142f585112da66f7231d888
|
[
"Apache-2.0"
] | null | null | null |
python/cuxfilter/dataframe.py
|
sean-frye/cuxfilter
|
e7291b819b01907da142f585112da66f7231d888
|
[
"Apache-2.0"
] | null | null | null |
import cudf
import pyarrow as pa
from typing import Type
from .dashboard import DashBoard
from .layouts import single_feature
from .themes import light
from .assets import notebook_assets
def read_arrow(source):
# print('reading arrow file as arrow table from disk')
reader = pa.RecordBatchStreamReader(source)
pa_df = reader.read_all()
return pa_df
# class DataFrame:
class DataFrame:
"""
A cuxfilter GPU DataFrame object
"""
data: Type[cudf.DataFrame] = None
@classmethod
def from_arrow(cls, dataframe_location):
"""
read an arrow file from disk as cuxfilter.DataFrame
Parameters
----------
dataframe_location: str or arrow in-memory table
Returns
-------
cuxfilter.DataFrame object
Examples
--------
Read dataframe as an arrow file from disk
>>> import cuxfilter
>>> cux_df = cuxfilter.DataFrame.from_arrow(
'./location/of/dataframe.arrow'
)
"""
if type(dataframe_location) == str:
df = cudf.DataFrame.from_arrow(read_arrow(dataframe_location))
else:
df = cudf.DataFrame.from_arrow(dataframe_location)
return DataFrame(df)
@classmethod
def from_dataframe(cls, dataframe):
"""
create a cuxfilter.DataFrame from cudf.DataFrame/dask_cudf.DataFrame
(zero-copy reference)
Parameters
----------
dataframe_location: cudf.DataFrame or dask_cudf.DataFrame
Returns
-------
cuxfilter.DataFrame object
Examples
--------
Read dataframe from a cudf.DataFrame/dask_cudf.DataFrame
>>> import cuxfilter
>>> import cudf
>>> cudf_df = cudf.DataFrame(
>>> {
>>> 'key': [0, 1, 2, 3, 4],
>>> 'val':[float(i + 10) for i in range(5)]
>>> }
>>> )
>>> cux_df = cuxfilter.DataFrame.from_dataframe(cudf_df)
"""
return DataFrame(dataframe)
def __init__(self, data):
# pn.extension()
self.backup = data
self.data = data
def dashboard(
self,
charts: list,
layout=single_feature,
theme=light,
title="Dashboard",
data_size_widget=True,
warnings=False,
):
"""
Creates a cuxfilter.DashBoard object
Parameters
----------
charts: list
list of cuxfilter.charts
layout: cuxfilter.layouts
title: str
title of the dashboard, default "Dashboard"
data_size_widget: boolean
flag to determine whether to diplay the current datapoints
selected in the dashboard, default True
warnings: boolean
flag to disable or enable runtime warnings related to layouts,
default False
Examples
--------
>>> import cudf
>>> import cuxfilter
>>> from cuxfilter.charts import bokeh
>>> df = cudf.DataFrame(
>>> {
>>> 'key': [0, 1, 2, 3, 4],
>>> 'val':[float(i + 10) for i in range(5)]
>>> }
>>> )
>>> cux_df = cuxfilter.DataFrame.from_dataframe(df)
>>> line_chart_1 = bokeh.line(
>>> 'key', 'val', data_points=5, add_interaction=False
>>> )
>>> # create a dashboard object
>>> d = cux_df.dashboard([line_chart_1])
Returns
-------
cuxfilter.DashBoard object
"""
if notebook_assets.pn.config.js_files == {}:
notebook_assets.load_notebook_assets()
return DashBoard(
charts, self.data, layout, theme, title, data_size_widget, warnings
)
| 24.837662
| 79
| 0.55268
|
fa114d5e628becd1999d91a5e97a9eb9f982fb8e
| 658
|
py
|
Python
|
detection/configs/faster_rcnn_r50_fpn_1x_dota15.py
|
chandlerbing65nm/PVT
|
e171519b2a1a44e36ebdf0732f274a190b50ce29
|
[
"Apache-2.0"
] | null | null | null |
detection/configs/faster_rcnn_r50_fpn_1x_dota15.py
|
chandlerbing65nm/PVT
|
e171519b2a1a44e36ebdf0732f274a190b50ce29
|
[
"Apache-2.0"
] | null | null | null |
detection/configs/faster_rcnn_r50_fpn_1x_dota15.py
|
chandlerbing65nm/PVT
|
e171519b2a1a44e36ebdf0732f274a190b50ce29
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'_base_/models/faster_rcnn_r50_fpn_dota15.py',
'_base_/datasets/dota15_detection.py',
'_base_/schedules/schedule_1x.py',
'_base_/default_runtime.py'
]
model = dict(
# pretrained='pretrained/pvt_small.pth',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5))
# optimizer
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
| 28.608696
| 74
| 0.650456
|
a812fd000c19a99ab3c5cb8b7905ff933eeb5e10
| 3,297
|
py
|
Python
|
solid_test_case.py
|
piotrjk/solid_test_framework
|
9aef87cc41aef20881c25c0ddb4bdfccd1cd77e5
|
[
"MIT"
] | null | null | null |
solid_test_case.py
|
piotrjk/solid_test_framework
|
9aef87cc41aef20881c25c0ddb4bdfccd1cd77e5
|
[
"MIT"
] | null | null | null |
solid_test_case.py
|
piotrjk/solid_test_framework
|
9aef87cc41aef20881c25c0ddb4bdfccd1cd77e5
|
[
"MIT"
] | null | null | null |
import sys
import os
import unittest
from solid_test_suite import SolidTestResult
class SolidTestCase(unittest.TestCase):
def get_name(self):
return self._testMethodName
def get_class(self, full=False):
if full:
return self.__class__.__name__
return self.__class__.__name__.split('.')[-1]
def get_module(self):
return os.path.dirname(os.path.abspath(__file__)).split(os.sep)[-1]
def run(self, result=None):
if not isinstance(result, SolidTestResult):
super(SolidTestCase, self).run(result)
else:
if result is None:
raise Exception('No valid test result passed in a argument')
test_method = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(test_method, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(test_method, '__unittest_skip_why__', ''))
result.add_skip(self, skip_why)
finally:
return
success = False
try:
self.setUp()
except unittest.SkipTest as e:
result.add_skip(self, unicode(e))
except KeyboardInterrupt:
raise
except:
result.add_error(self, sys.exc_info())
else:
try:
test_method()
except KeyboardInterrupt:
raise
except self.failureException:
result.add_failure(self, sys.exc_info())
except unittest.case._ExpectedFailure as e:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, e.exc_info)
else:
result.addSuccess(self)
except unittest.case._UnexpectedSuccess:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
result.addFailure(self, sys.exc_info())
except unittest.SkipTest as e:
result.add_skip(self, unicode(e))
except:
result.addError(self, sys.exc_info())
else:
success = True
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
result.add_error(self, sys.exc_info())
success = False
try:
clean_up_success = self.doCleanups()
except:
clean_up_success = False
success = success and clean_up_success
if success:
result.add_success(self)
| 38.788235
| 89
| 0.503488
|
fc11ea8b217151b0ee15e0c69b3265a3c0f600b7
| 6,305
|
py
|
Python
|
models/FlowNetS_3x3_Q.py
|
kairx772/FlowNetPytorch
|
604bffdf564331b9921efe81640a2a7aa3438a67
|
[
"MIT"
] | 2
|
2020-12-16T14:44:02.000Z
|
2021-01-29T06:23:57.000Z
|
models/FlowNetS_3x3_Q.py
|
kairx772/FlowNetPytorch
|
604bffdf564331b9921efe81640a2a7aa3438a67
|
[
"MIT"
] | null | null | null |
models/FlowNetS_3x3_Q.py
|
kairx772/FlowNetPytorch
|
604bffdf564331b9921efe81640a2a7aa3438a67
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.nn.init import kaiming_normal_, constant_
from .util_lsq import conv, predict_flow, deconv, crop_like, conv_Q, predict_flow_Q, deconv_Q, ACT_Q
from .util_lsq import QuantConvTranspose2d as ConvTrans2d_Q
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
__all__ = [
'flownets33q', 'flownets33q_bn'
]
class FlowNetS33_Q(nn.Module):
expansion = 1
def __init__(self,batchNorm=True, bitW=32, bitA=32, bias=True):
super(FlowNetS33_Q,self).__init__()
self.bitW = bitW
self.bitA = bitA
print ('bitW = ', bitW)
print ('bitA = ' , bitA)
self.batchNorm = batchNorm
self.conv1 = conv_Q(self.batchNorm, 6, 64, bias=bias, bitW=bitW, bitA=bitA) # 7x7 origin
self.conv1_1 = conv_Q(self.batchNorm, 64, 64, bias=bias, bitW=bitW, bitA=bitA)
self.conv1_2 = conv_Q(self.batchNorm, 64, 64, bias=bias, bitW=bitW, bitA=bitA, stride=2)
self.conv2 = conv_Q(self.batchNorm, 64, 128, bias=bias, bitW=bitW, bitA=bitA) # 5x5 origin
self.conv2_1 = conv_Q(self.batchNorm, 128, 128, bias=bias, bitW=bitW, bitA=bitA, stride=2)
self.conv3 = conv_Q(self.batchNorm, 128, 256, bias=bias, bitW=bitW, bitA=bitA) # 5x5 origin
self.conv3_0 = conv_Q(self.batchNorm, 256, 256, bias=bias, bitW=bitW, bitA=bitA, stride=2)
self.conv3_1 = conv_Q(self.batchNorm, 256, 256, bias=bias, bitW=bitW, bitA=bitA)
self.conv4 = conv_Q(self.batchNorm, 256, 512, bias=bias, bitW=bitW, bitA=bitA, stride=2)
self.conv4_1 = conv_Q(self.batchNorm, 512, 512, bias=bias, bitW=bitW, bitA=bitA)
self.conv5 = conv_Q(self.batchNorm, 512, 512, bias=bias, bitW=bitW, bitA=bitA, stride=2)
self.conv5_1 = conv_Q(self.batchNorm, 512, 512, bias=bias, bitW=bitW, bitA=bitA)
self.conv6 = conv_Q(self.batchNorm, 512, 1024, bias=bias, bitW=bitW, bitA=bitA, stride=2)
self.conv6_1 = conv_Q(self.batchNorm,1024, 1024, bias=bias, bitW=bitW, bitA=bitA)
self.deconv5 = deconv_Q(1024,512, bitW=bitW, bitA=bitA)
self.deconv4 = deconv_Q(1026,256, bitW=bitW, bitA=bitA)
self.deconv3 = deconv_Q(770,128, bitW=bitW, bitA=bitA)
self.deconv2 = deconv_Q(386,64, bitW=bitW, bitA=bitA)
self.predict_flow6 = predict_flow_Q(1024, bitW=bitW)
self.predict_flow5 = predict_flow_Q(1026, bitW=bitW)
self.predict_flow4 = predict_flow_Q(770, bitW=bitW)
self.predict_flow3 = predict_flow_Q(386, bitW=bitW)
self.predict_flow2 = predict_flow_Q(194, bitW=bitW)
self.upsampled_flow6_to_5 = ConvTrans2d_Q(2, 2, 4, 2, 1, bias=False, bit=bitW)
self.upsampled_flow5_to_4 = ConvTrans2d_Q(2, 2, 4, 2, 1, bias=False, bit=bitW)
self.upsampled_flow4_to_3 = ConvTrans2d_Q(2, 2, 4, 2, 1, bias=False, bit=bitW)
self.upsampled_flow3_to_2 = ConvTrans2d_Q(2, 2, 4, 2, 1, bias=False, bit=bitW)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
kaiming_normal_(m.weight, 0.1)
if m.bias is not None:
constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
constant_(m.weight, 1)
constant_(m.bias, 0)
def forward(self, x):
out_conv2 = self.conv2_1(self.conv2(self.conv1_2(self.conv1_1(self.conv1(x)))))
out_conv3 = self.conv3_1(self.conv3_0(self.conv3(out_conv2)))
out_conv4 = self.conv4_1(self.conv4(out_conv3))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
flow6 = self.predict_flow6(out_conv6)
flow6_up = crop_like(self.upsampled_flow6_to_5(flow6), out_conv5)
out_deconv5 = crop_like(self.deconv5(out_conv6), out_conv5)
concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1)
flow5 = self.predict_flow5(concat5)
flow5_up = crop_like(self.upsampled_flow5_to_4(flow5), out_conv4)
out_deconv4 = crop_like(self.deconv4(concat5), out_conv4)
concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1)
flow4 = self.predict_flow4(concat4)
flow4_up = crop_like(self.upsampled_flow4_to_3(flow4), out_conv3)
out_deconv3 = crop_like(self.deconv3(concat4), out_conv3)
concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1)
flow3 = self.predict_flow3(concat3)
flow3_up = crop_like(self.upsampled_flow3_to_2(flow3), out_conv2)
out_deconv2 = crop_like(self.deconv2(concat3), out_conv2)
concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1)
flow2 = self.predict_flow2(concat2)
if self.training:
return flow2,flow3,flow4,flow5,flow6
else:
return flow2
def weight_parameters(self):
return [param for name, param in self.named_parameters() if 'weight' in name]
def bias_parameters(self):
return [param for name, param in self.named_parameters() if 'bias' in name]
def assign_alphabit(self, alphabit):
for m in self.modules():
if isinstance(m, ACT_Q):
m.alpha_bit = alphabit
return
def flownets33q(data=None, args=None):
"""FlowNetS model architecture from the
"Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852)
Args:
data : pretrained weights of the network. will create a new one if not set
"""
model = FlowNetS33_Q(batchNorm=False, bias=args.conv_no_bias, bitW=args.qw, bitA=args.qa)
if data is not None:
model.load_state_dict(data['state_dict'], strict=False)
if args.alphabit is not None:
model.assign_alphabit(args.alphabit)
return model
def flownets33q_bn(data=None, bitW=32, bitA=32):
"""FlowNetS model architecture from the
"Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852)
Args:
data : pretrained weights of the network. will create a new one if not set
"""
model = FlowNetS33_Q(batchNorm=True, bitW=bitW, bitA=bitA)
if data is not None:
model.load_state_dict(data['state_dict'], strict=False)
return model
| 45.035714
| 102
| 0.659635
|
a5c0bde9514ad09eb51d8ec780baaef66fe56b95
| 6,505
|
py
|
Python
|
kmeans_for_anchors.py
|
SekiroRong/YOLOP
|
e59628925dfaadfa549790cd0cf1c8a7e1139a2c
|
[
"MIT"
] | 1
|
2022-02-22T04:17:17.000Z
|
2022-02-22T04:17:17.000Z
|
kmeans_for_anchors.py
|
SekiroRong/YOLOP
|
e59628925dfaadfa549790cd0cf1c8a7e1139a2c
|
[
"MIT"
] | null | null | null |
kmeans_for_anchors.py
|
SekiroRong/YOLOP
|
e59628925dfaadfa549790cd0cf1c8a7e1139a2c
|
[
"MIT"
] | null | null | null |
#-------------------------------------------------------------------------------------------------#
# kmeans虽然会对数据集中的框进行聚类,但是很多数据集由于框的大小相近,聚类出来的9个框相差不大,
# 这样的框反而不利于模型的训练。因为不同的特征层适合不同大小的先验框,越浅的特征层适合越大的先验框
# 原始网络的先验框已经按大中小比例分配好了,不进行聚类也会有非常好的效果。
#-------------------------------------------------------------------------------------------------#
import glob
import xml.etree.ElementTree as ET
import numpy as np
def bbox3Dtowh(bboxs,data):
bbox2d = []
for bbox in bboxs:
bbox_class = bbox[0]
bbox3d = bbox[1]
x, y, x2, y2 = bbox3d[8][0], bbox3d[8][1], bbox3d[9][0], bbox3d[9][1]
if x == y == x2 == y2 == 0:
continue
# b2d = [min(bbox3d[0][0], bbox3d[1][0], bbox3d[2][0], bbox3d[3][0]),
# min(bbox3d[4][1], bbox3d[5][1], bbox3d[6][1], bbox3d[7][1]),
# max(bbox3d[4][0], bbox3d[5][0], bbox3d[6][0], bbox3d[7][0]),
# max(bbox3d[0][1], bbox3d[1][1], bbox3d[2][1], bbox3d[4][1])]
data.append([x2-x, y2-y])
# print(bbox2d)
# return np.array(bbox2d)
def parse3Dbbox(path,data):
with open(path, 'r') as f:
labels = []
label = []
point = []
lines = f.readlines()
for line in lines:
line = line.strip('\n')
line = line.split(' ')
label.append(line[0])
for i in range(int(len(line) / 2) - 1):
point.append((int(line[2 * i + 1]), int(line[2 * i + 2])))
label.append(point)
point = []
labels.append(label)
label = []
# print(labels)
bbox3Dtowh(labels,data)
def cas_iou(box,cluster):
x = np.minimum(cluster[:,0],box[0])
y = np.minimum(cluster[:,1],box[1])
intersection = x * y
area1 = box[0] * box[1]
area2 = cluster[:,0] * cluster[:,1]
iou = intersection / (area1 + area2 -intersection)
return iou
def avg_iou(box,cluster):
return np.mean([np.max(cas_iou(box[i],cluster)) for i in range(box.shape[0])])
def kmeans(box,k):
#-------------------------------------------------------------#
# 取出一共有多少框
#-------------------------------------------------------------#
row = box.shape[0]
#-------------------------------------------------------------#
# 每个框各个点的位置
#-------------------------------------------------------------#
distance = np.empty((row,k))
#-------------------------------------------------------------#
# 最后的聚类位置
#-------------------------------------------------------------#
last_clu = np.zeros((row,))
np.random.seed()
#-------------------------------------------------------------#
# 随机选5个当聚类中心
#-------------------------------------------------------------#
cluster = box[np.random.choice(row,k,replace = False)]
while True:
#-------------------------------------------------------------#
# 计算每一行距离五个点的iou情况。
#-------------------------------------------------------------#
for i in range(row):
distance[i] = 1 - cas_iou(box[i],cluster)
#-------------------------------------------------------------#
# 取出最小点
#-------------------------------------------------------------#
near = np.argmin(distance,axis=1)
if (last_clu == near).all():
break
#-------------------------------------------------------------#
# 求每一个类的中位点
#-------------------------------------------------------------#
for j in range(k):
cluster[j] = np.median(
box[near == j],axis=0)
last_clu = near
return cluster
def load_data(path):
data = []
for filename in (glob.glob(path + '/*.txt')):
parse3Dbbox(filename,data)
#-------------------------------------------------------------#
# 对于每一个xml都寻找box
#-------------------------------------------------------------#
# for xml_file in glob.glob('{}/*xml'.format(path)):
# tree = ET.parse(xml_file)
# height = int(tree.findtext('./size/height'))
# width = int(tree.findtext('./size/width'))
# if height<=0 or width<=0:
# continue
#
# #-------------------------------------------------------------#
# # 对于每一个目标都获得它的宽高
# #-------------------------------------------------------------#
# for obj in tree.iter('object'):
# xmin = int(float(obj.findtext('bndbox/xmin'))) / width
# ymin = int(float(obj.findtext('bndbox/ymin'))) / height
# xmax = int(float(obj.findtext('bndbox/xmax'))) / width
# ymax = int(float(obj.findtext('bndbox/ymax'))) / height
#
# xmin = np.float64(xmin)
# ymin = np.float64(ymin)
# xmax = np.float64(xmax)
# ymax = np.float64(ymax)
# # 得到宽高
# data.append([xmax-xmin,ymax-ymin])
return np.array(data)
if __name__ == '__main__':
#-------------------------------------------------------------#
# 运行该程序会计算'./VOCdevkit/VOC2007/Annotations'的xml
# 会生成yolo_anchors.txt
#-------------------------------------------------------------#
SIZE_x = 640
SIZE_y = 480
anchors_num = 9
#-------------------------------------------------------------#
# 载入数据集,可以使用VOC的xml
#-------------------------------------------------------------#
path = r'G:\Carla_Dataset\3Dbbox\train'
#-------------------------------------------------------------#
# 载入所有的xml
# 存储格式为转化为比例后的width,height
#-------------------------------------------------------------#
data = load_data(path)
#-------------------------------------------------------------#
# 使用k聚类算法
#-------------------------------------------------------------#
out = kmeans(data,anchors_num)
out = out[np.argsort(out[:,0])]
print('acc:{:.2f}%'.format(avg_iou(data,out) * 100))
print(out*SIZE_x)
data = out
f = open("yolo_anchors.txt", 'w')
row = np.shape(data)[0]
for i in range(row):
if i == 0:
x_y = "%d,%d" % (data[i][0], data[i][1])
else:
x_y = ", %d,%d" % (data[i][0], data[i][1])
f.write(x_y)
f.close()
| 35.939227
| 100
| 0.347425
|
9c86da69d38f39667ca3b27e9646b9a4aa3cb10f
| 17,902
|
py
|
Python
|
google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py
|
geraint0923/python-aiplatform
|
f40f32289e1fbeb93b35e4b66f65d15528a6481c
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py
|
geraint0923/python-aiplatform
|
f40f32289e1fbeb93b35e4b66f65d15528a6481c
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py
|
geraint0923/python-aiplatform
|
f40f32289e1fbeb93b35e4b66f65d15528a6481c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.aiplatform_v1.types import pipeline_service
from google.cloud.aiplatform_v1.types import training_pipeline
from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO
class PipelineServiceGrpcTransport(PipelineServiceTransport):
"""gRPC backend transport for PipelineService.
A service for creating and managing AI Platform's pipelines.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes)
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
**self_signed_jwt_kwargs,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def create_training_pipeline(
self,
) -> Callable[
[pipeline_service.CreateTrainingPipelineRequest],
gca_training_pipeline.TrainingPipeline,
]:
r"""Return a callable for the create training pipeline method over gRPC.
Creates a TrainingPipeline. A created
TrainingPipeline right away will be attempted to be run.
Returns:
Callable[[~.CreateTrainingPipelineRequest],
~.TrainingPipeline]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_training_pipeline" not in self._stubs:
self._stubs["create_training_pipeline"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.PipelineService/CreateTrainingPipeline",
request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize,
response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize,
)
return self._stubs["create_training_pipeline"]
@property
def get_training_pipeline(
self,
) -> Callable[
[pipeline_service.GetTrainingPipelineRequest],
training_pipeline.TrainingPipeline,
]:
r"""Return a callable for the get training pipeline method over gRPC.
Gets a TrainingPipeline.
Returns:
Callable[[~.GetTrainingPipelineRequest],
~.TrainingPipeline]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_training_pipeline" not in self._stubs:
self._stubs["get_training_pipeline"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.PipelineService/GetTrainingPipeline",
request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize,
response_deserializer=training_pipeline.TrainingPipeline.deserialize,
)
return self._stubs["get_training_pipeline"]
@property
def list_training_pipelines(
self,
) -> Callable[
[pipeline_service.ListTrainingPipelinesRequest],
pipeline_service.ListTrainingPipelinesResponse,
]:
r"""Return a callable for the list training pipelines method over gRPC.
Lists TrainingPipelines in a Location.
Returns:
Callable[[~.ListTrainingPipelinesRequest],
~.ListTrainingPipelinesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_training_pipelines" not in self._stubs:
self._stubs["list_training_pipelines"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.PipelineService/ListTrainingPipelines",
request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize,
response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize,
)
return self._stubs["list_training_pipelines"]
@property
def delete_training_pipeline(
self,
) -> Callable[
[pipeline_service.DeleteTrainingPipelineRequest], operations_pb2.Operation
]:
r"""Return a callable for the delete training pipeline method over gRPC.
Deletes a TrainingPipeline.
Returns:
Callable[[~.DeleteTrainingPipelineRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_training_pipeline" not in self._stubs:
self._stubs["delete_training_pipeline"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.PipelineService/DeleteTrainingPipeline",
request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_training_pipeline"]
@property
def cancel_training_pipeline(
self,
) -> Callable[[pipeline_service.CancelTrainingPipelineRequest], empty_pb2.Empty]:
r"""Return a callable for the cancel training pipeline method over gRPC.
Cancels a TrainingPipeline. Starts asynchronous cancellation on
the TrainingPipeline. The server makes a best effort to cancel
the pipeline, but success is not guaranteed. Clients can use
[PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]
or other methods to check whether the cancellation succeeded or
whether the pipeline completed despite cancellation. On
successful cancellation, the TrainingPipeline is not deleted;
instead it becomes a pipeline with a
[TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error]
value with a [google.rpc.Status.code][google.rpc.Status.code] of
1, corresponding to ``Code.CANCELLED``, and
[TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state]
is set to ``CANCELLED``.
Returns:
Callable[[~.CancelTrainingPipelineRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_training_pipeline" not in self._stubs:
self._stubs["cancel_training_pipeline"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.PipelineService/CancelTrainingPipeline",
request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["cancel_training_pipeline"]
__all__ = ("PipelineServiceGrpcTransport",)
| 44.755
| 109
| 0.652218
|
dcb393ab9a1e1f0e6f5bedceb17a26cc8bd34f68
| 332
|
py
|
Python
|
app/event/migrations/0029_merge_20160819_1158.py
|
Sovol2018/sovolo
|
54250e42b4af3391d2f99690f45b93ab240563c2
|
[
"MIT"
] | 2
|
2017-06-06T11:34:49.000Z
|
2017-10-24T13:09:50.000Z
|
app/event/migrations/0029_merge_20160819_1158.py
|
Sovol2018/sovolo
|
54250e42b4af3391d2f99690f45b93ab240563c2
|
[
"MIT"
] | 346
|
2016-08-09T20:50:57.000Z
|
2018-08-28T06:52:17.000Z
|
app/event/migrations/0029_merge_20160819_1158.py
|
hejob/sovolo
|
8b73253d7bf0427c7ae0ebb6d8e3d70e118e8427
|
[
"MIT"
] | 3
|
2017-11-27T14:07:57.000Z
|
2018-08-13T15:51:01.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-19 02:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('event', '0025_auto_20160819_1109'),
('event', '0028_auto_20160819_1009'),
]
operations = [
]
| 19.529412
| 46
| 0.659639
|
7b662b0468ae5c36429890cf66f5f9060f905ad0
| 4,083
|
py
|
Python
|
benchmark/startCirq1409.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startCirq1409.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startCirq1409.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=55
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=45
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=52
c.append(cirq.Z.on(input_qubit[2])) # number=53
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=54
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=47
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.rx(2.664070570244145).on(input_qubit[1])) # number=39
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=49
c.append(cirq.CZ.on(input_qubit[3],input_qubit[2])) # number=50
c.append(cirq.H.on(input_qubit[2])) # number=51
c.append(cirq.H.on(input_qubit[4])) # number=21
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[3])) # number=40
c.append(cirq.Y.on(input_qubit[4])) # number=35
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=25
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=26
c.append(cirq.H.on(input_qubit[0])) # number=27
c.append(cirq.H.on(input_qubit[0])) # number=36
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=37
c.append(cirq.H.on(input_qubit[0])) # number=38
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=41
c.append(cirq.X.on(input_qubit[0])) # number=42
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=43
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=34
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=24
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=29
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[3])) # number=44
c.append(cirq.X.on(input_qubit[1])) # number=30
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=31
c.append(cirq.X.on(input_qubit[2])) # number=11
c.append(cirq.X.on(input_qubit[3])) # number=12
c.append(cirq.X.on(input_qubit[0])) # number=13
c.append(cirq.X.on(input_qubit[1])) # number=14
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.X.on(input_qubit[3])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq1409.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 39.259615
| 77
| 0.654666
|
0b401573e9077f3c6783b633593f84277e6cb597
| 88
|
py
|
Python
|
basic.py
|
technotekr/pywebview_example-1
|
4d04ea1b10aec422e82b8ae4fed022b9f54dd611
|
[
"MIT"
] | null | null | null |
basic.py
|
technotekr/pywebview_example-1
|
4d04ea1b10aec422e82b8ae4fed022b9f54dd611
|
[
"MIT"
] | null | null | null |
basic.py
|
technotekr/pywebview_example-1
|
4d04ea1b10aec422e82b8ae4fed022b9f54dd611
|
[
"MIT"
] | null | null | null |
import webview
webview.create_window('TechNote', 'https://technote.kr')
webview.start()
| 22
| 56
| 0.772727
|
4ceb102ea4786bfcf77fee3fb4cac0544db403b8
| 2,333
|
py
|
Python
|
analysis/extraction.py
|
nverhaaren/istanbul-game
|
9e5051346dda8d8143201eb4548b2800fc3eb67a
|
[
"MIT"
] | null | null | null |
analysis/extraction.py
|
nverhaaren/istanbul-game
|
9e5051346dda8d8143201eb4548b2800fc3eb67a
|
[
"MIT"
] | null | null | null |
analysis/extraction.py
|
nverhaaren/istanbul-game
|
9e5051346dda8d8143201eb4548b2800fc3eb67a
|
[
"MIT"
] | null | null | null |
import itertools
import typing
from lib.utils import extract_from_dict
def diff_dicts(d1, d2):
if not isinstance(d1, dict) and not isinstance(d2, dict):
return d2, set()
removed_keys = set(d1) - set(d2)
updates = {k: v for k, v in d2.items() if k not in d1}
for k in set(d1) & set(d2):
child_updates, child_removed = diff_dicts(d1[k], d2[k])
if child_updates != {} and child_updates != d1[k]:
updates[k] = child_updates
removed_keys.update({f'{k}.{child_k}' for child_k in child_removed})
return updates, removed_keys
def extract_player_state_series(states: typing.Iterator[dict], player: str, key: str):
initial = next(states)
player_count: int = initial['immutable']['player_count']
idx_of_player = initial['immutable']['players'].index(player)
previous = extract_from_dict(key, initial['mutable']['player_states'][idx_of_player])
yield {'snapshot': previous, 'update': previous, 'removed_keys': [], 'when': ['initial']}
before_source, after_source = itertools.tee(states, 2)
before_states = (extract_from_dict(key, state['mutable']['player_states'][idx_of_player])
for idx, state in enumerate(before_source)
if (idx + 1) % player_count == idx_of_player)
after_states = (extract_from_dict(key, state['mutable']['player_states'][idx_of_player])
for idx, state in enumerate(after_source)
if idx % player_count == idx_of_player)
if idx_of_player == 0:
before_states = itertools.chain([previous], before_states)
for idx, (before, after) in enumerate(zip(before_states, after_states)):
if previous != before:
update_between, removed_between = diff_dicts(previous, before)
yield {
'snapshot': before,
'update': update_between,
'removed_keys': list(removed_between),
'when': ['before', idx + 1],
}
if before != after:
update_during, removed_during = diff_dicts(before, after)
yield {
'snapshot': after,
'update': update_during,
'removed_keys': list(removed_during),
'when': ['after', idx + 1],
}
previous = after
| 42.418182
| 93
| 0.609516
|
068617097e0052b49882a36148a948e56ddcdcfb
| 1,772
|
py
|
Python
|
mobify/sources/misc.py
|
macbre/mobify
|
69d1d8754fc30f7cbb2626beb890261b41964852
|
[
"MIT"
] | 5
|
2015-11-29T00:18:36.000Z
|
2022-03-28T22:23:31.000Z
|
mobify/sources/misc.py
|
macbre/mobify
|
69d1d8754fc30f7cbb2626beb890261b41964852
|
[
"MIT"
] | 29
|
2015-10-09T19:07:35.000Z
|
2022-01-27T13:02:53.000Z
|
mobify/sources/misc.py
|
macbre/mobify
|
69d1d8754fc30f7cbb2626beb890261b41964852
|
[
"MIT"
] | 1
|
2017-11-20T04:40:04.000Z
|
2017-11-20T04:40:04.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from os import getenv
from mobify.source import MobifySource
class MiscSource(MobifySource):
"""
This source needs to be forced via --source=MiscSource
and configured via env variables
MOBIFY_SOURCE_TITLE=//h2
MOBIFY_SOURCE_CONTENT=//article
"""
@staticmethod
def is_my_url(url):
return False
def get_html(self):
content_xpath = getenv('MOBIFY_SOURCE_CONTENT', '//article')
self._logger.info("Content XPath [MOBIFY_SOURCE_CONTENT]: %s", content_xpath)
article = self.xpath(content_xpath)
# clean up the HTML
xpaths = [
'*//h1', # post title
'*//img', # images
'*//figcaption', # images
'*//figure', # images
'*//hr', # lines
]
article = self.remove_nodes(article, xpaths)
html = self.get_node_html(article)
# remove HTML tags attributes
html = re.sub(r'<(\w+)[^>]*>', lambda match: '<{}>'.format(match.group(1)), html)
# promote headings to the second level
html = html.replace('h4>', 'h2>')
# cleanup of tags
html = re.sub('</?(a|div|section)>', '', html).strip()
# add a title and a footer
return '<h1>{}</h1>{}'.format(self.get_title(), html).strip()
def get_title(self):
title_xpath = getenv('MOBIFY_SOURCE_TITLE', '//h1')
self._logger.info("Title XPath [MOBIFY_SOURCE_TITLE]: %s", title_xpath)
return self.get_node(title_xpath).strip()
def get_author(self):
return ''
def get_language(self):
"""
:rtype str
"""
return self.get_node('//html', 'lang') or 'en'
| 26.447761
| 89
| 0.577878
|
a9b7d899827355d9d4c08ef5f6914dca52995143
| 3,639
|
py
|
Python
|
venv/Lib/site-packages/caffe2/python/test_util.py
|
Westlanderz/AI-Plat1
|
1187c22819e5135e8e8189c99b86a93a0d66b8d8
|
[
"MIT"
] | 1
|
2022-01-08T12:30:44.000Z
|
2022-01-08T12:30:44.000Z
|
venv/Lib/site-packages/caffe2/python/test_util.py
|
Westlanderz/AI-Plat1
|
1187c22819e5135e8e8189c99b86a93a0d66b8d8
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/caffe2/python/test_util.py
|
Westlanderz/AI-Plat1
|
1187c22819e5135e8e8189c99b86a93a0d66b8d8
|
[
"MIT"
] | null | null | null |
## @package test_util
# Module caffe2.python.test_util
import numpy as np
from caffe2.python import core, workspace
import os
import pathlib
import shutil
import tempfile
import unittest
from typing import Any, Callable, Tuple, Type
from types import TracebackType
def rand_array(*dims):
# np.random.rand() returns float instead of 0-dim array, that's why need to
# do some tricks
return np.array(np.random.rand(*dims) - 0.5).astype(np.float32)
def randBlob(name, type, *dims, **kwargs):
offset = kwargs['offset'] if 'offset' in kwargs else 0.0
workspace.FeedBlob(name, np.random.rand(*dims).astype(type) + offset)
def randBlobFloat32(name, *dims, **kwargs):
randBlob(name, np.float32, *dims, **kwargs)
def randBlobsFloat32(names, *dims, **kwargs):
for name in names:
randBlobFloat32(name, *dims, **kwargs)
def numOps(net):
return len(net.Proto().op)
def str_compare(a, b, encoding="utf8"):
if isinstance(a, bytes):
a = a.decode(encoding)
if isinstance(b, bytes):
b = b.decode(encoding)
return a == b
def get_default_test_flags():
return [
'caffe2',
'--caffe2_log_level=0',
'--caffe2_cpu_allocator_do_zero_fill=0',
'--caffe2_cpu_allocator_do_junk_fill=1',
]
def caffe2_flaky(test_method):
# This decorator is used to mark a test method as flaky.
# This is used in conjunction with the environment variable
# CAFFE2_RUN_FLAKY_TESTS that specifies "flaky tests" mode
# If flaky tests mode are on, only flaky tests are run
# If flaky tests mode are off, only non-flaky tests are run
# NOTE: the decorator should be applied as the top-level decorator
# in a test method.
test_method.__caffe2_flaky__ = True
return test_method
def is_flaky_test_mode():
return os.getenv('CAFFE2_RUN_FLAKY_TESTS', '0') == '1'
class TestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
workspace.GlobalInit(get_default_test_flags())
# clear the default engines settings to separate out its
# affect from the ops tests
core.SetEnginePref({}, {})
def setUp(self):
# Skip tests based on whether we're in flaky test mode and
# the test is decorated as a flaky test.
test_method = getattr(self, self._testMethodName)
is_flaky_test = getattr(test_method, "__caffe2_flaky__", False)
if (is_flaky_test_mode() and not is_flaky_test):
raise unittest.SkipTest("Non-flaky tests are skipped in flaky test mode")
elif (not is_flaky_test_mode() and is_flaky_test):
raise unittest.SkipTest("Flaky tests are skipped in regular test mode")
self.ws = workspace.C.Workspace()
workspace.ResetWorkspace()
def tearDown(self):
workspace.ResetWorkspace()
def make_tempdir(self) -> pathlib.Path:
tmp_folder = pathlib.Path(tempfile.mkdtemp(prefix="caffe2_test."))
self.addCleanup(self._remove_tempdir, tmp_folder)
return tmp_folder
def _remove_tempdir(self, path: pathlib.Path) -> None:
def _onerror(
fn: Callable[..., Any],
path: str,
exc_info: Tuple[Type[BaseException], BaseException, TracebackType],
) -> None:
# Ignore FileNotFoundError, but re-raise anything else
if not isinstance(exc_info[1], FileNotFoundError):
raise exc_info[1].with_traceback(exc_info[2])
shutil.rmtree(str(path), onerror=_onerror)
| 31.37069
| 86
| 0.651003
|
1cf78a9ed564c6bdb0c67a592030ee456a4dca31
| 282
|
py
|
Python
|
atividades/ex99.py
|
Fleen66/Python_exercises
|
fd05fdf1181da833a1a1bc9f4a476afc8f467977
|
[
"MIT"
] | null | null | null |
atividades/ex99.py
|
Fleen66/Python_exercises
|
fd05fdf1181da833a1a1bc9f4a476afc8f467977
|
[
"MIT"
] | null | null | null |
atividades/ex99.py
|
Fleen66/Python_exercises
|
fd05fdf1181da833a1a1bc9f4a476afc8f467977
|
[
"MIT"
] | null | null | null |
from time import sleep
def maior(*num):
m = max(num)
me = min(num)
print(f'Há {len(num)} numeros')
print(f'{num}: o maior numero é {m} e o menor é {me}')
sleep(1)
print()
maior(3, 5 ,18, 11, 6, 7, 9,1 )
maior(2,5,10,9)
maior(5,9,8,10,3)
maior(9,7,6,1,3)
| 16.588235
| 58
| 0.553191
|
9e715ac2c4a44b7b0ee1a6b6a41ecaa9acbd6243
| 676
|
py
|
Python
|
test/test_basic.py
|
sumannam/DEVS-Python
|
b47f51e3ab7d981356d8bc18f521f9a42f5e46ea
|
[
"Apache-2.0"
] | 1
|
2021-09-28T23:35:39.000Z
|
2021-09-28T23:35:39.000Z
|
test/test_basic.py
|
sumannam/DEVS-Python
|
b47f51e3ab7d981356d8bc18f521f9a42f5e46ea
|
[
"Apache-2.0"
] | 10
|
2019-11-04T08:47:56.000Z
|
2022-01-31T09:35:10.000Z
|
test/test_basic.py
|
sumannam/DEVS-Python
|
b47f51e3ab7d981356d8bc18f521f9a42f5e46ea
|
[
"Apache-2.0"
] | 1
|
2019-11-04T08:19:18.000Z
|
2019-11-04T08:19:18.000Z
|
import sys
import unittest
sys.path.append('D:/Git/DEVS-Python')
from testPModelTest import testPModelTest
from testEF_P import testEF_P
from testROOT_CO_ORDINATORS import testROOT_CO_ORDINATORS
if __name__ == '__main__':
test_p = unittest.TestLoader().loadTestsFromTestCase(testPModelTest)
test_efp = unittest.TestLoader().loadTestsFromTestCase(testEF_P)
test_root_coordinators = unittest.TestLoader().loadTestsFromTestCase(testROOT_CO_ORDINATORS)
allTests = unittest.TestSuite()
allTests.addTest(test_p)
allTests.addTest(test_efp)
allTests.addTest(test_root_coordinators)
unittest.TextTestRunner(verbosity=2, failfast=True).run(allTests)
| 33.8
| 96
| 0.804734
|
dfb6be047b610667d735edfe1c08ee136018ef22
| 385
|
py
|
Python
|
python/Blog/Blog/wsgi.py
|
egemeric/Dj-Blog
|
c0771cb461a3b147683f313295a9577b493cbcda
|
[
"MIT"
] | 1
|
2020-07-23T19:36:30.000Z
|
2020-07-23T19:36:30.000Z
|
python/Blog/Blog/wsgi.py
|
egemeric/Dj-Blog
|
c0771cb461a3b147683f313295a9577b493cbcda
|
[
"MIT"
] | 7
|
2021-03-30T14:02:05.000Z
|
2022-03-12T00:42:51.000Z
|
python/Blog/Blog/wsgi.py
|
egemeric/Dj-Blog
|
c0771cb461a3b147683f313295a9577b493cbcda
|
[
"MIT"
] | null | null | null |
"""
WSGI config for test project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Blog.settings')
application = get_wsgi_application()
| 22.647059
| 78
| 0.781818
|
d57efb5664c75799d4e8c1c74fdc49842e025c12
| 17,695
|
py
|
Python
|
doudizhu/engine.py
|
LaudateCorpus1/doudizhu
|
26ada27dc2fc17ba973ebb7b57828a9f57bd7fa5
|
[
"MIT"
] | 210
|
2018-04-06T16:33:47.000Z
|
2022-03-31T15:57:34.000Z
|
doudizhu/engine.py
|
LaudateCorpus1/doudizhu
|
26ada27dc2fc17ba973ebb7b57828a9f57bd7fa5
|
[
"MIT"
] | 5
|
2018-04-20T12:25:06.000Z
|
2020-07-29T07:35:43.000Z
|
doudizhu/engine.py
|
LaudateCorpus1/doudizhu
|
26ada27dc2fc17ba973ebb7b57828a9f57bd7fa5
|
[
"MIT"
] | 44
|
2018-04-08T05:42:38.000Z
|
2022-02-21T05:27:31.000Z
|
# -*- coding: utf-8 -*-
"""
斗地主规则检查及比较器
~~~~~~~~~~~~~~~~~~~~~
枚举所有37种牌型,制作一个花色无关、顺序无关的字典,
能够在O(1)时间内判断出牌是否有效,在O(1)时间内比较大小
"""
import itertools
import logging
from doudizhu.compat import is_py3, cmp_to_key
logging.basicConfig(level=logging.INFO)
CARDS = '3-4-5-6-7-8-9-10-J-Q-K-A-2-BJ-CJ'.split('-')
CARD_IDX = {c: w for w, c in enumerate(CARDS)}
CARDS_NO_JOKERS = CARDS[:-2]
CARD_PAIR = [[c] * 2 for c in CARDS_NO_JOKERS]
CARD_TRIO = [[c] * 3 for c in CARDS_NO_JOKERS]
CARD_FOUR = [[c] * 4 for c in CARDS_NO_JOKERS]
def cards2str(cards):
return '-'.join(cards)
def str2cards(string):
return string.split('-')
def str2cardmap(string):
cards = str2cards(string)
cardmap = {}
for c in cards:
if c in cardmap:
cardmap[c] += 1
else:
cardmap[c] = 1
return cardmap
def sort_cards(cards):
if is_py3:
return sorted(cards, key=cmp_to_key(
lambda x, y: CARD_IDX[x] - CARD_IDX[y]))
return sorted(cards, cmp=lambda x, y: CARD_IDX[x] - CARD_IDX[y])
def order_repeat(cards, n):
"""按序重复每一个元素n次
([a,b,c], 3)
-->[a,a,a,b,b,b,c,c,c]
"""
tmp = []
for c in cards:
tmp += [c] * n
return tmp
def put_sorted_cards(result, cards, weight):
"""cards is a list of card
sort, 2str, append
"""
result.append((cards2str(sort_cards(cards)), weight))
def enum_solo():
"""枚举所有单牌,并附加权重,以便比较大小"""
return [(c, w) for w, c in enumerate(CARDS)]
def enum_solo_chain(length):
"""枚举所有单连顺,并附加权重
length 是连顺的张数,如5连
"""
if length < 5 or length > 12:
raise ValueError('chain length is in [5,12]')
chain_range = [(5, 8), (12, 1)]
count = chain_range[0][1] - (length - chain_range[0][0])
def solo_chain_x():
return [(cards2str(CARDS[i:i + length]), i) for i in range(count)]
return solo_chain_x
def enum_pair():
"""枚举所有的对子"""
return [(cards2str(pair), w) for w, pair in enumerate(CARD_PAIR)]
def enum_pair_chain(length):
"""枚举所有的连对"""
if length < 3 or length > 10:
raise ValueError('chain length is in [3,10]')
chain_range = [(3, 10), (10, 3)]
count = chain_range[0][1] - (length - chain_range[0][0])
def pair_chain_x():
solo_chain = [CARDS[i:i + length] for i in range(count)]
pair_chain = []
for w, sc in enumerate(solo_chain):
tmp = []
for c in sc:
tmp += [c, c]
pair_chain.append((cards2str(tmp), w))
return pair_chain
return pair_chain_x
def enum_trio_chain(length):
"""枚举所有的三张及连三"""
if length < 1 or length > 7:
raise ValueError('chain length is in [1,7]')
chain_range = [(1, 13), (6, 7)]
count = chain_range[0][1] - (length - chain_range[0][0])
if length >= 2:
# 2连以上不能用`2`
count -= 1
def trio_chain_x():
solo_chain = [CARDS[i:i + length] for i in range(count)]
trio_chain = []
for w, sc in enumerate(solo_chain):
tmp = []
for c in sc:
tmp += [c] * 3
trio_chain.append((cards2str(tmp), w))
return trio_chain
return trio_chain_x
def enum_trio_solo():
"""枚举所有的三带一"""
result = []
weight = 0
for trio in CARD_TRIO:
weight += 1
all_cards = [card for card in CARDS if card != trio[0]]
for card in all_cards:
put_sorted_cards(result, trio + [card], weight)
logging.debug('trio_solo: {}'.format(len(result)))
return result
def enum_trio_solo_chain(length):
"""x 连三连一"""
if length < 2 or length > 5:
raise ValueError('chain length is in [2,5]')
def trio_solo_chain_x():
result = []
weight = 0
solo_chain = [CARDS[i:i + length] for i in range(13 - length)]
for chain in solo_chain:
weight += 1
trio_chain = order_repeat(chain, 3)
avail_cards = [c for c in CARDS_NO_JOKERS if c not in set(chain)]
# 1. select {BJ, CJ}
it = itertools.combinations_with_replacement(avail_cards, length - 2)
for e in it:
kicker = list(e) + ['BJ', 'CJ']
put_sorted_cards(result, trio_chain + kicker, weight)
# 2. select BJ
it = itertools.combinations_with_replacement(avail_cards, length - 1)
for e in it:
kicker = list(e) + ['BJ']
put_sorted_cards(result, trio_chain + kicker, weight)
# 3. select CJ
it = itertools.combinations_with_replacement(avail_cards, length - 1)
for e in it:
kicker = list(e) + ['CJ']
put_sorted_cards(result, trio_chain + kicker, weight)
# 4. do not select {BJ, CJ}
it = itertools.combinations_with_replacement(avail_cards, length)
for e in it:
if length == 5 and len(set(e)) == 1:
continue
kicker = list(e)
put_sorted_cards(result, trio_chain + kicker, weight)
logging.debug('trio_solo_chain_{}: {}'.format(length, len(result)))
return result
return trio_solo_chain_x
def enum_trio_pair_chain(length):
"""x 连三连一对"""
if length < 1 or length > 4:
raise ValueError('chain length is in [1,4]')
def check_repeat_num(arr, limit):
"""
arr中每个元素最多重复的次数是limit
如果超过limit,返回True
"""
for i, e in enumerate(arr):
count = 0
for j in range(i, len(arr)):
if e == arr[j]:
count += 1
if count > limit:
return True
return False
def trio_pair_chain_x():
result = []
weight = 0
if length == 1:
solo_chain = [[e] for e in CARDS_NO_JOKERS]
else:
solo_chain = [CARDS[i:i + length] for i in range(13 - length)]
for chain in solo_chain:
weight += 1
trio_chain = order_repeat(chain, 3)
avail_cards = [c for c in CARDS_NO_JOKERS if c not in set(chain)]
it = itertools.combinations_with_replacement(avail_cards, length)
for e in it:
if length == 3 and len(set(e)) == 1:
# 排除3对重复的情形
continue
if length == 4 and len(set(e)) <= 2 and check_repeat_num(e, 2):
# 排除3对或4对重复的情形
continue
kicker = order_repeat(list(e), 2)
put_sorted_cards(result, trio_chain + kicker, weight)
logging.debug('trio_solo_chain_{}: {}'.format(length, len(result)))
return result
return trio_pair_chain_x
def enum_four_two_solo():
"""四带二单"""
result = []
weight = 0
for four in CARD_FOUR:
weight += 1
all_cards = [card for card in CARDS if card != four[0]]
it = itertools.combinations_with_replacement(all_cards, 2)
for e in it:
if e not in [('BJ', 'BJ'), ('CJ', 'CJ')]:
put_sorted_cards(result, four + list(e), weight)
logging.debug('four_solo: {}'.format(len(result)))
return result
def enum_four_two_pair():
"""四带二对"""
result = []
weight = 0
for four in CARD_FOUR:
weight += 1
all_cards = [card for card in CARDS_NO_JOKERS if card != four[0]]
it = itertools.combinations_with_replacement(all_cards, 2)
for e in it:
put_sorted_cards(result, four + order_repeat(e, 2), weight)
logging.debug('four_pair: {}'.format(len(result)))
return result
def enum_bomb():
"""枚举所有的炸弹"""
return [(cards2str(four), w) for w, four in enumerate(CARD_FOUR)]
def enum_rocket():
"""返回王炸"""
return [('BJ-CJ', 0)]
class Doudizhu(object):
"""枚举所有牌型,生成花色无关、顺序无关字典
提供的接口:
- 规则检查
- 牌型大小比较
- 可出牌提示
"""
CARD_TYPE = [
{'name': 'solo', 'zh_name': u'单牌',
'func': enum_solo, 'size': 15},
{'name': 'solo_chain_5', 'zh_name': u'顺子5连',
'func': enum_solo_chain(5), 'size': 8},
{'name': 'solo_chain_6', 'zh_name': u'顺子6连',
'func': enum_solo_chain(6), 'size': 7},
{'name': 'solo_chain_7', 'zh_name': u'顺子7连',
'func': enum_solo_chain(7), 'size': 6},
{'name': 'solo_chain_8', 'zh_name': u'顺子8连',
'func': enum_solo_chain(8), 'size': 5},
{'name': 'solo_chain_9', 'zh_name': u'顺子9连',
'func': enum_solo_chain(9), 'size': 4},
{'name': 'solo_chain_10', 'zh_name': u'顺子10连',
'func': enum_solo_chain(10), 'size': 3},
{'name': 'solo_chain_11', 'zh_name': u'顺子11连',
'func': enum_solo_chain(11), 'size': 2},
{'name': 'solo_chain_12', 'zh_name': u'顺子12连',
'func': enum_solo_chain(12), 'size': 1},
{'name': 'pair', 'zh_name': u'对子',
'func': enum_pair, 'size': 13},
{'name': 'pair_chain_3', 'zh_name': u'连对3连',
'func': enum_pair_chain(3), 'size': 10},
{'name': 'pair_chain_4', 'zh_name': u'连对4连',
'func': enum_pair_chain(4), 'size': 9},
{'name': 'pair_chain_5', 'zh_name': u'连对5连',
'func': enum_pair_chain(5), 'size': 8},
{'name': 'pair_chain_6', 'zh_name': u'连对6连',
'func': enum_pair_chain(6), 'size': 7},
{'name': 'pair_chain_7', 'zh_name': u'连对7连',
'func': enum_pair_chain(7), 'size': 6},
{'name': 'pair_chain_8', 'zh_name': u'连对8连',
'func': enum_pair_chain(8), 'size': 5},
{'name': 'pair_chain_9', 'zh_name': u'连对9连',
'func': enum_pair_chain(9), 'size': 4},
{'name': 'pair_chain_10', 'zh_name': u'连对10连',
'func': enum_pair_chain(10), 'size': 3},
{'name': 'trio', 'zh_name': u'三张',
'func': enum_trio_chain(1), 'size': 13},
{'name': 'trio_chain_2', 'zh_name': u'连三2连',
'func': enum_trio_chain(2), 'size': 11},
{'name': 'trio_chain_3', 'zh_name': u'连三3连',
'func': enum_trio_chain(3), 'size': 10},
{'name': 'trio_chain_4', 'zh_name': u'连三4连',
'func': enum_trio_chain(4), 'size': 9},
{'name': 'trio_chain_5', 'zh_name': u'连三5连',
'func': enum_trio_chain(5), 'size': 8},
{'name': 'trio_chain_6', 'zh_name': u'连三6连',
'func': enum_trio_chain(6), 'size': 7},
{'name': 'trio_solo', 'zh_name': u'三带一',
'func': enum_trio_solo, 'size': 182},
{'name': 'trio_solo_chain_2', 'zh_name': u'连三带一2连',
'func': enum_trio_solo_chain(2), 'size': 979},
{'name': 'trio_solo_chain_3', 'zh_name': u'连三带一3连',
'func': enum_trio_solo_chain(3), 'size': 3400},
{'name': 'trio_solo_chain_4', 'zh_name': u'连三带一4连',
'func': enum_trio_solo_chain(4), 'size': 7830},
{'name': 'trio_solo_chain_5', 'zh_name': u'连三带一5连',
'func': enum_trio_solo_chain(5), 'size': 12512},
{'name': 'trio_pair', 'zh_name': u'三带一对',
'func': enum_trio_pair_chain(1), 'size': 156},
{'name': 'trio_pair_chain_2', 'zh_name': u'连三带一对2连',
'func': enum_trio_pair_chain(2), 'size': 726},
{'name': 'trio_pair_chain_3', 'zh_name': u'连三带一对3连',
'func': enum_trio_pair_chain(3), 'size': 2100},
{'name': 'trio_pair_chain_4', 'zh_name': u'连三带一对4连',
'func': enum_trio_pair_chain(4), 'size': 3726},
{'name': 'four_two_solo', 'zh_name': u'四带二单',
'func': enum_four_two_solo, 'size': 1339},
{'name': 'four_two_pair', 'zh_name': u'四带二对',
'func': enum_four_two_pair, 'size': 1014},
{'name': 'bomb', 'zh_name': u'炸弹',
'func': enum_bomb, 'size': 13},
{'name': 'rocket', 'zh_name': u'王炸',
'func': enum_rocket, 'size': 1},
]
"""
{cards: [(type, weight), ],}
value 使用list是为了解决冲突
- 如四带两对中的3-3-3-3-4-4-4-4和4-4-4-4-3-3-3-3
- 如3连三带一 3-3-3-4-4-4-5-5-5-6-6-6,也可作为四连三张
todo: covert the key to binary format
"""
DATA = {}
"""
{type:{weight:[cards,]},}
"""
TYPE_CARDS = {}
TOTAL = 0
INIT_FLAG = False
@staticmethod
def init_doudizhu_dict():
if Doudizhu.INIT_FLAG:
return
Doudizhu.INIT_FLAG = True
for ct in Doudizhu.CARD_TYPE:
rst = ct['func']()
if len(rst) != ct['size']:
logging.error(ct)
Doudizhu.TOTAL += len(rst)
card_type = ct['name']
Doudizhu.TYPE_CARDS[card_type] = {}
for item in rst:
cards, weight = item
if cards not in Doudizhu.DATA:
Doudizhu.DATA[cards] = [(ct['name'], weight)]
else:
Doudizhu.DATA[cards].append((ct['name'], weight))
if weight not in Doudizhu.TYPE_CARDS[card_type]:
Doudizhu.TYPE_CARDS[card_type][weight] = [cards]
else:
Doudizhu.TYPE_CARDS[card_type][weight].append(cards)
logging.debug(Doudizhu.TOTAL)
@staticmethod
def print_multiple_types_cards():
for cards, value in iter(Doudizhu.DATA.items()):
if len(value) > 2 or \
(len(value) > 1 and value[0][0] != value[1][0]):
print(cards, value)
@staticmethod
def check_card_type(cards):
"""cards is str type"""
if isinstance(cards, str):
cards = str2cards(cards)
if not isinstance(cards, list):
return False, None
sorted_cards = sort_cards(cards)
value = Doudizhu.DATA.get(cards2str(sorted_cards))
if value is None:
return False, ValueError('invalid card type')
return True, value
@staticmethod
def type_greater(type_x, type_y):
"""check if x is greater than y
type_x/y: (type, weight)
>0: x > y
=0: x = y
<0: x < y
"""
if type_x[0] == type_y[0]:
return type_x[1] - type_y[1]
else:
if type_x[0] == 'rocket':
return 1
elif type_y[0] == 'rocket':
return -1
elif type_x[0] == 'bomb':
return 1
return ValueError('Can not compare card type')
@staticmethod
def cards_greater(cards_x, cards_y):
"""check if x is greater than y
x, y可能分别组成不同牌型
只要有x一种牌型大于y,就返回True和牌型
"""
ok, type_x = Doudizhu.check_card_type(cards_x)
if not ok:
return False, '{}: {}'.format(cards_x, type_x)
ok, type_y = Doudizhu.check_card_type(cards_y)
if not ok:
return False, '{}: {}'.format(cards_y, type_y)
for tx in type_x:
for ty in type_y:
flag = Doudizhu.type_greater(tx, ty)
if not isinstance(flag, ValueError) and flag > 0:
return True, tx[0]
return False, tx[0]
@staticmethod
def cards_contain(candidate_cardmap, cardmap):
for k, v in iter(cardmap.items()):
if k not in candidate_cardmap:
return False
if candidate_cardmap[k] < v:
return False
return True
@staticmethod
def list_greater_cards(cards_target, cards_candidate):
""" 对于目标牌组合cards_target
从候选牌cards_candidate中找出所有可以压制它的牌型
1. 对于cards_taget同牌型的不同权重组合来说,按其最大权重计算
如target='3-3-3-3-2-2-2-2', candidate='5-5-5-5-6-6-6-6'),
这里target当作<四个2带2对3>,所以返回是:
{'bomb': ['5-5-5-5', '6-6-6-6']}
2. 对于candidate中一组牌可作不同组合压制cards_taget的场景,只返回一种组合
如target='3-3-3-3-4-4-4-4', candidate='5-5-5-5-6-6-6-6'),
<四个5带2对6>,<四个6带2对5> 均大于 <四个4带2对3>
只返回一次'5-5-5-5-6-6-6-6',
{'bomb': ['5-5-5-5', '6-6-6-6'], 'four_two_pair': ['5-5-5-5-6-6-6-6']}
"""
ok, target_type = Doudizhu.check_card_type(cards_target)
if not ok:
logging.error('{}: {}'.format(cards_target, target_type))
return {}
# 对target_type去重,保留同type中weight最大的
tmp_dict = {}
for card_type, weight in target_type:
if card_type not in tmp_dict or weight > tmp_dict[card_type]:
tmp_dict[card_type] = weight
target_type = [(k, v) for k, v in iter(tmp_dict.items())]
# 如果目标牌型为rocket,则一定打不过,直接返回空
if target_type[0][0] == 'rocket':
return {}
# 按牌型大小依次判断是否可用bomb, rocket
if target_type[0][0] != 'rocket':
if target_type[0][0] != 'bomb':
target_type.append(('bomb', -1))
target_type.append(('rocket', -1))
elif target_type[0][0] != 'bomb':
target_type.append(('bomb', -1))
logging.debug('target_type: {}'.format(target_type))
candidate_cardmap = str2cardmap(cards_candidate)
cards_gt = {}
for card_type, weight in target_type:
weight_gt = [w for w in Doudizhu.TYPE_CARDS[card_type].keys()
if w > weight]
if card_type not in cards_gt:
cards_gt[card_type] = []
logging.debug(weight_gt)
logging.debug(candidate_cardmap)
for w in sorted(weight_gt):
for w_cards in Doudizhu.TYPE_CARDS[card_type][w]:
w_cardmap = str2cardmap(w_cards)
if Doudizhu.cards_contain(candidate_cardmap, w_cardmap) \
and w_cards not in cards_gt[card_type]:
cards_gt[card_type].append(w_cards)
if not cards_gt[card_type]:
cards_gt.pop(card_type)
return cards_gt
| 32.290146
| 81
| 0.540661
|
9dce22b35b66b2604e28764662052d36ab9f696c
| 27,435
|
py
|
Python
|
archive/py_lib/ftplib.py
|
benc-uk/ziz
|
4e05e5f90c33007d17d4f8bd4b7347284a32eb40
|
[
"Apache-2.0"
] | 69
|
2015-01-16T13:12:55.000Z
|
2022-02-14T12:55:27.000Z
|
archive/py_lib/ftplib.py
|
benc-uk/ziz
|
4e05e5f90c33007d17d4f8bd4b7347284a32eb40
|
[
"Apache-2.0"
] | 3
|
2019-07-19T18:02:02.000Z
|
2021-04-25T06:35:42.000Z
|
archive/py_lib/ftplib.py
|
benc-uk/ziz
|
4e05e5f90c33007d17d4f8bd4b7347284a32eb40
|
[
"Apache-2.0"
] | 32
|
2015-02-06T12:10:32.000Z
|
2019-06-18T03:21:36.000Z
|
"""An FTP client class and some helper functions.
Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds
Example:
>>> from ftplib import FTP
>>> ftp = FTP('ftp.python.org') # connect to host, default port
>>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@
'230 Guest login ok, access restrictions apply.'
>>> ftp.retrlines('LIST') # list directory contents
total 9
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
'226 Transfer complete.'
>>> ftp.quit()
'221 Goodbye.'
>>>
A nice test that reveals some of the network dialogue would be:
python ftplib.py -d localhost -l -p -l
"""
#
# Changes and improvements suggested by Steve Majewski.
# Modified by Jack to work on the mac.
# Modified by Siebren to support docstrings and PASV.
#
import os
import sys
# Import SOCKS module if it exists, else standard socket module socket
try:
import SOCKS; socket = SOCKS; del SOCKS # import SOCKS as socket
from socket import getfqdn; socket.getfqdn = getfqdn; del getfqdn
except ImportError:
import socket
__all__ = ["FTP","Netrc"]
# Magic number from <socket.h>
MSG_OOB = 0x1 # Process data out of band
# The standard FTP server control port
FTP_PORT = 21
# Exception raised when an error or invalid response is received
class Error(Exception): pass
class error_reply(Error): pass # unexpected [123]xx reply
class error_temp(Error): pass # 4xx errors
class error_perm(Error): pass # 5xx errors
class error_proto(Error): pass # response does not begin with [1-5]
# All exceptions (hopefully) that may be raised here and that aren't
# (always) programming errors on our side
all_errors = (Error, socket.error, IOError, EOFError)
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
# The class itself
class FTP:
'''An FTP client class.
To create a connection, call the class using these argument:
host, user, passwd, acct
These are all strings, and have default value ''.
Then use self.connect() with optional host and port argument.
To download a file, use ftp.retrlines('RETR ' + filename),
or ftp.retrbinary() with slightly different arguments.
To upload a file, use ftp.storlines() or ftp.storbinary(),
which have an open file as argument (see their definitions
below for details).
The download/upload functions first issue appropriate TYPE
and PORT or PASV commands.
'''
debugging = 0
host = ''
port = FTP_PORT
sock = None
file = None
welcome = None
passiveserver = 1
# Initialization method (called by class instantiation).
# Initialize host to localhost, port to standard ftp port
# Optional arguments are host (for connect()),
# and user, passwd, acct (for login())
def __init__(self, host='', user='', passwd='', acct=''):
if host:
self.connect(host)
if user: self.login(user, passwd, acct)
def connect(self, host = '', port = 0):
'''Connect to host. Arguments are:
- host: hostname to connect to (string, default previous host)
- port: port to connect to (integer, default previous port)'''
if host: self.host = host
if port: self.port = port
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.connect(sa)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
self.af = af
self.file = self.sock.makefile('rb')
self.welcome = self.getresp()
return self.welcome
def getwelcome(self):
'''Get the welcome message from the server.
(this is read and squirreled away by connect())'''
if self.debugging:
print '*welcome*', self.sanitize(self.welcome)
return self.welcome
def set_debuglevel(self, level):
'''Set the debugging level.
The required argument level means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF'''
self.debugging = level
debug = set_debuglevel
def set_pasv(self, val):
'''Use passive or active mode for data transfers.
With a false argument, use the normal PORT mode,
With a true argument, use the PASV command.'''
self.passiveserver = val
# Internal: "sanitize" a string for printing
def sanitize(self, s):
if s[:5] == 'pass ' or s[:5] == 'PASS ':
i = len(s)
while i > 5 and s[i-1] in '\r\n':
i = i-1
s = s[:5] + '*'*(i-5) + s[i:]
return repr(s)
# Internal: send one line to the server, appending CRLF
def putline(self, line):
line = line + CRLF
if self.debugging > 1: print '*put*', self.sanitize(line)
self.sock.sendall(line)
# Internal: send one command to the server (through putline())
def putcmd(self, line):
if self.debugging: print '*cmd*', self.sanitize(line)
self.putline(line)
# Internal: return one line from the server, stripping CRLF.
# Raise EOFError if the connection is closed
def getline(self):
line = self.file.readline()
if self.debugging > 1:
print '*get*', self.sanitize(line)
if not line: raise EOFError
if line[-2:] == CRLF: line = line[:-2]
elif line[-1:] in CRLF: line = line[:-1]
return line
# Internal: get a response from the server, which may possibly
# consist of multiple lines. Return a single string with no
# trailing CRLF. If the response consists of multiple lines,
# these are separated by '\n' characters in the string
def getmultiline(self):
line = self.getline()
if line[3:4] == '-':
code = line[:3]
while 1:
nextline = self.getline()
line = line + ('\n' + nextline)
if nextline[:3] == code and \
nextline[3:4] != '-':
break
return line
# Internal: get a response from the server.
# Raise various errors if the response indicates an error
def getresp(self):
resp = self.getmultiline()
if self.debugging: print '*resp*', self.sanitize(resp)
self.lastresp = resp[:3]
c = resp[:1]
if c in ('1', '2', '3'):
return resp
if c == '4':
raise error_temp, resp
if c == '5':
raise error_perm, resp
raise error_proto, resp
def voidresp(self):
"""Expect a response beginning with '2'."""
resp = self.getresp()
if resp[0] != '2':
raise error_reply, resp
return resp
def abort(self):
'''Abort a file transfer. Uses out-of-band data.
This does not follow the procedure from the RFC to send Telnet
IP and Synch; that doesn't seem to work with the servers I've
tried. Instead, just send the ABOR command as OOB data.'''
line = 'ABOR' + CRLF
if self.debugging > 1: print '*put urgent*', self.sanitize(line)
self.sock.sendall(line, MSG_OOB)
resp = self.getmultiline()
if resp[:3] not in ('426', '226'):
raise error_proto, resp
def sendcmd(self, cmd):
'''Send a command and return the response.'''
self.putcmd(cmd)
return self.getresp()
def voidcmd(self, cmd):
"""Send a command and expect a response beginning with '2'."""
self.putcmd(cmd)
return self.voidresp()
def sendport(self, host, port):
'''Send a PORT command with the current host and the given
port number.
'''
hbytes = host.split('.')
pbytes = [repr(port/256), repr(port%256)]
bytes = hbytes + pbytes
cmd = 'PORT ' + ','.join(bytes)
return self.voidcmd(cmd)
def sendeprt(self, host, port):
'''Send a EPRT command with the current host and the given port number.'''
af = 0
if self.af == socket.AF_INET:
af = 1
if self.af == socket.AF_INET6:
af = 2
if af == 0:
raise error_proto, 'unsupported address family'
fields = ['', repr(af), host, repr(port), '']
cmd = 'EPRT ' + '|'.join(fields)
return self.voidcmd(cmd)
def makeport(self):
'''Create a new socket and send a PORT command for it.'''
msg = "getaddrinfo returns an empty list"
sock = None
for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.bind(sa)
except socket.error, msg:
if sock:
sock.close()
sock = None
continue
break
if not sock:
raise socket.error, msg
sock.listen(1)
port = sock.getsockname()[1] # Get proper port
host = self.sock.getsockname()[0] # Get proper host
if self.af == socket.AF_INET:
resp = self.sendport(host, port)
else:
resp = self.sendeprt(host, port)
return sock
def makepasv(self):
if self.af == socket.AF_INET:
host, port = parse227(self.sendcmd('PASV'))
else:
host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
return host, port
def ntransfercmd(self, cmd, rest=None):
"""Initiate a transfer over the data connection.
If the transfer is active, send a port command and the
transfer command, and accept the connection. If the server is
passive, send a pasv command, connect to it, and start the
transfer command. Either way, return the socket for the
connection and the expected size of the transfer. The
expected size may be None if it could not be determined.
Optional `rest' argument can be a string that is sent as the
argument to a RESTART command. This is essentially a server
marker used to tell the server to skip over any data up to the
given marker.
"""
size = None
if self.passiveserver:
host, port = self.makepasv()
af, socktype, proto, canon, sa = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)[0]
conn = socket.socket(af, socktype, proto)
conn.connect(sa)
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# Some servers apparently send a 200 reply to
# a LIST or STOR command, before the 150 reply
# (and way before the 226 reply). This seems to
# be in violation of the protocol (which only allows
# 1xx or error messages for LIST), so we just discard
# this response.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply, resp
else:
sock = self.makeport()
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# See above.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply, resp
conn, sockaddr = sock.accept()
if resp[:3] == '150':
# this is conditional in case we received a 125
size = parse150(resp)
return conn, size
def transfercmd(self, cmd, rest=None):
"""Like ntransfercmd() but returns only the socket."""
return self.ntransfercmd(cmd, rest)[0]
def login(self, user = '', passwd = '', acct = ''):
'''Login, default anonymous.'''
if not user: user = 'anonymous'
if not passwd: passwd = ''
if not acct: acct = ''
if user == 'anonymous' and passwd in ('', '-'):
# If there is no anonymous ftp password specified
# then we'll just use anonymous@
# We don't send any other thing because:
# - We want to remain anonymous
# - We want to stop SPAM
# - We don't want to let ftp sites to discriminate by the user,
# host or country.
passwd = passwd + 'anonymous@'
resp = self.sendcmd('USER ' + user)
if resp[0] == '3': resp = self.sendcmd('PASS ' + passwd)
if resp[0] == '3': resp = self.sendcmd('ACCT ' + acct)
if resp[0] != '2':
raise error_reply, resp
return resp
def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
"""Retrieve data in binary mode.
`cmd' is a RETR command. `callback' is a callback function is
called for each block. No more than `blocksize' number of
bytes will be read from the socket. Optional `rest' is passed
to transfercmd().
A new port is created for you. Return the response code.
"""
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
while 1:
data = conn.recv(blocksize)
if not data:
break
callback(data)
conn.close()
return self.voidresp()
def retrlines(self, cmd, callback = None):
'''Retrieve data in line mode.
The argument is a RETR or LIST command.
The callback function (2nd argument) is called for each line,
with trailing CRLF stripped. This creates a new port for you.
print_line() is the default callback.'''
if callback is None: callback = print_line
resp = self.sendcmd('TYPE A')
conn = self.transfercmd(cmd)
fp = conn.makefile('rb')
while 1:
line = fp.readline()
if self.debugging > 2: print '*retr*', repr(line)
if not line:
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] == '\n':
line = line[:-1]
callback(line)
fp.close()
conn.close()
return self.voidresp()
def storbinary(self, cmd, fp, blocksize=8192):
'''Store a file in binary mode.'''
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd)
while 1:
buf = fp.read(blocksize)
if not buf: break
conn.sendall(buf)
conn.close()
return self.voidresp()
def storlines(self, cmd, fp):
'''Store a file in line mode.'''
self.voidcmd('TYPE A')
conn = self.transfercmd(cmd)
while 1:
buf = fp.readline()
if not buf: break
if buf[-2:] != CRLF:
if buf[-1] in CRLF: buf = buf[:-1]
buf = buf + CRLF
conn.sendall(buf)
conn.close()
return self.voidresp()
def acct(self, password):
'''Send new account name.'''
cmd = 'ACCT ' + password
return self.voidcmd(cmd)
def nlst(self, *args):
'''Return a list of files in a given directory (default the current).'''
cmd = 'NLST'
for arg in args:
cmd = cmd + (' ' + arg)
files = []
self.retrlines(cmd, files.append)
return files
def dir(self, *args):
'''List a directory in long form.
By default list current directory to stdout.
Optional last argument is callback function; all
non-empty arguments before it are concatenated to the
LIST command. (This *should* only be used for a pathname.)'''
cmd = 'LIST'
func = None
if args[-1:] and type(args[-1]) != type(''):
args, func = args[:-1], args[-1]
for arg in args:
if arg:
cmd = cmd + (' ' + arg)
self.retrlines(cmd, func)
def rename(self, fromname, toname):
'''Rename a file.'''
resp = self.sendcmd('RNFR ' + fromname)
if resp[0] != '3':
raise error_reply, resp
return self.voidcmd('RNTO ' + toname)
def delete(self, filename):
'''Delete a file.'''
resp = self.sendcmd('DELE ' + filename)
if resp[:3] in ('250', '200'):
return resp
elif resp[:1] == '5':
raise error_perm, resp
else:
raise error_reply, resp
def cwd(self, dirname):
'''Change to a directory.'''
if dirname == '..':
try:
return self.voidcmd('CDUP')
except error_perm, msg:
if msg.args[0][:3] != '500':
raise
elif dirname == '':
dirname = '.' # does nothing, but could return error
cmd = 'CWD ' + dirname
return self.voidcmd(cmd)
def size(self, filename):
'''Retrieve the size of a file.'''
# Note that the RFC doesn't say anything about 'SIZE'
resp = self.sendcmd('SIZE ' + filename)
if resp[:3] == '213':
s = resp[3:].strip()
try:
return int(s)
except (OverflowError, ValueError):
return long(s)
def mkd(self, dirname):
'''Make a directory, return its full pathname.'''
resp = self.sendcmd('MKD ' + dirname)
return parse257(resp)
def rmd(self, dirname):
'''Remove a directory.'''
return self.voidcmd('RMD ' + dirname)
def pwd(self):
'''Return current working directory.'''
resp = self.sendcmd('PWD')
return parse257(resp)
def quit(self):
'''Quit, and close the connection.'''
resp = self.voidcmd('QUIT')
self.close()
return resp
def close(self):
'''Close the connection without assuming anything about it.'''
if self.file:
self.file.close()
self.sock.close()
self.file = self.sock = None
_150_re = None
def parse150(resp):
'''Parse the '150' response for a RETR request.
Returns the expected transfer size or None; size is not guaranteed to
be present in the 150 message.
'''
if resp[:3] != '150':
raise error_reply, resp
global _150_re
if _150_re is None:
import re
_150_re = re.compile("150 .* \((\d+) bytes\)", re.IGNORECASE)
m = _150_re.match(resp)
if not m:
return None
s = m.group(1)
try:
return int(s)
except (OverflowError, ValueError):
return long(s)
_227_re = None
def parse227(resp):
'''Parse the '227' response for a PASV request.
Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '227':
raise error_reply, resp
global _227_re
if _227_re is None:
import re
_227_re = re.compile(r'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)')
m = _227_re.search(resp)
if not m:
raise error_proto, resp
numbers = m.groups()
host = '.'.join(numbers[:4])
port = (int(numbers[4]) << 8) + int(numbers[5])
return host, port
def parse229(resp, peer):
'''Parse the '229' response for a EPSV request.
Raises error_proto if it does not contain '(|||port|)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '229':
raise error_reply, resp
left = resp.find('(')
if left < 0: raise error_proto, resp
right = resp.find(')', left + 1)
if right < 0:
raise error_proto, resp # should contain '(|||port|)'
if resp[left + 1] != resp[right - 1]:
raise error_proto, resp
parts = resp[left + 1:right].split(resp[left+1])
if len(parts) != 5:
raise error_proto, resp
host = peer[0]
port = int(parts[3])
return host, port
def parse257(resp):
'''Parse the '257' response for a MKD or PWD request.
This is a response to a MKD or PWD request: a directory name.
Returns the directoryname in the 257 reply.'''
if resp[:3] != '257':
raise error_reply, resp
if resp[3:5] != ' "':
return '' # Not compliant to RFC 959, but UNIX ftpd does this
dirname = ''
i = 5
n = len(resp)
while i < n:
c = resp[i]
i = i+1
if c == '"':
if i >= n or resp[i] != '"':
break
i = i+1
dirname = dirname + c
return dirname
def print_line(line):
'''Default retrlines callback to print a line.'''
print line
def ftpcp(source, sourcename, target, targetname = '', type = 'I'):
'''Copy file from one FTP-instance to another.'''
if not targetname: targetname = sourcename
type = 'TYPE ' + type
source.voidcmd(type)
target.voidcmd(type)
sourcehost, sourceport = parse227(source.sendcmd('PASV'))
target.sendport(sourcehost, sourceport)
# RFC 959: the user must "listen" [...] BEFORE sending the
# transfer request.
# So: STOR before RETR, because here the target is a "user".
treply = target.sendcmd('STOR ' + targetname)
if treply[:3] not in ('125', '150'): raise error_proto # RFC 959
sreply = source.sendcmd('RETR ' + sourcename)
if sreply[:3] not in ('125', '150'): raise error_proto # RFC 959
source.voidresp()
target.voidresp()
class Netrc:
"""Class to parse & provide access to 'netrc' format files.
See the netrc(4) man page for information on the file format.
WARNING: This class is obsolete -- use module netrc instead.
"""
__defuser = None
__defpasswd = None
__defacct = None
def __init__(self, filename=None):
if filename is None:
if "HOME" in os.environ:
filename = os.path.join(os.environ["HOME"],
".netrc")
else:
raise IOError, \
"specify file to load or set $HOME"
self.__hosts = {}
self.__macros = {}
fp = open(filename, "r")
in_macro = 0
while 1:
line = fp.readline()
if not line: break
if in_macro and line.strip():
macro_lines.append(line)
continue
elif in_macro:
self.__macros[macro_name] = tuple(macro_lines)
in_macro = 0
words = line.split()
host = user = passwd = acct = None
default = 0
i = 0
while i < len(words):
w1 = words[i]
if i+1 < len(words):
w2 = words[i + 1]
else:
w2 = None
if w1 == 'default':
default = 1
elif w1 == 'machine' and w2:
host = w2.lower()
i = i + 1
elif w1 == 'login' and w2:
user = w2
i = i + 1
elif w1 == 'password' and w2:
passwd = w2
i = i + 1
elif w1 == 'account' and w2:
acct = w2
i = i + 1
elif w1 == 'macdef' and w2:
macro_name = w2
macro_lines = []
in_macro = 1
break
i = i + 1
if default:
self.__defuser = user or self.__defuser
self.__defpasswd = passwd or self.__defpasswd
self.__defacct = acct or self.__defacct
if host:
if host in self.__hosts:
ouser, opasswd, oacct = \
self.__hosts[host]
user = user or ouser
passwd = passwd or opasswd
acct = acct or oacct
self.__hosts[host] = user, passwd, acct
fp.close()
def get_hosts(self):
"""Return a list of hosts mentioned in the .netrc file."""
return self.__hosts.keys()
def get_account(self, host):
"""Returns login information for the named host.
The return value is a triple containing userid,
password, and the accounting field.
"""
host = host.lower()
user = passwd = acct = None
if host in self.__hosts:
user, passwd, acct = self.__hosts[host]
user = user or self.__defuser
passwd = passwd or self.__defpasswd
acct = acct or self.__defacct
return user, passwd, acct
def get_macros(self):
"""Return a list of all defined macro names."""
return self.__macros.keys()
def get_macro(self, macro):
"""Return a sequence of lines which define a named macro."""
return self.__macros[macro]
def test():
'''Test program.
Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ...
-d dir
-l list
-p password
'''
if len(sys.argv) < 2:
print test.__doc__
sys.exit(0)
debugging = 0
rcfile = None
while sys.argv[1] == '-d':
debugging = debugging+1
del sys.argv[1]
if sys.argv[1][:2] == '-r':
# get name of alternate ~/.netrc file:
rcfile = sys.argv[1][2:]
del sys.argv[1]
host = sys.argv[1]
ftp = FTP(host)
ftp.set_debuglevel(debugging)
userid = passwd = acct = ''
try:
netrc = Netrc(rcfile)
except IOError:
if rcfile is not None:
sys.stderr.write("Could not open account file"
" -- using anonymous login.")
else:
try:
userid, passwd, acct = netrc.get_account(host)
except KeyError:
# no account for host
sys.stderr.write(
"No account -- using anonymous login.")
ftp.login(userid, passwd, acct)
for file in sys.argv[2:]:
if file[:2] == '-l':
ftp.dir(file[2:])
elif file[:2] == '-d':
cmd = 'CWD'
if file[2:]: cmd = cmd + ' ' + file[2:]
resp = ftp.sendcmd(cmd)
elif file == '-p':
ftp.set_pasv(not ftp.passiveserver)
else:
ftp.retrbinary('RETR ' + file, \
sys.stdout.write, 1024)
ftp.quit()
if __name__ == '__main__':
test()
| 33.294903
| 101
| 0.546419
|
4ed8511ed7c240e018a9c174a941469cddb9a452
| 8,569
|
py
|
Python
|
bin/dumptruck.py
|
Meshcloud/dumptruck
|
659392f67f99f9597c45f1de891b4909ca31bbd2
|
[
"MIT"
] | 1
|
2018-08-24T07:49:21.000Z
|
2018-08-24T07:49:21.000Z
|
bin/dumptruck.py
|
meshcloud/dumptruck
|
659392f67f99f9597c45f1de891b4909ca31bbd2
|
[
"MIT"
] | 2
|
2018-10-01T20:39:00.000Z
|
2018-10-17T04:58:51.000Z
|
bin/dumptruck.py
|
meshcloud/dumptruck
|
659392f67f99f9597c45f1de891b4909ca31bbd2
|
[
"MIT"
] | 1
|
2018-12-01T21:13:29.000Z
|
2018-12-01T21:13:29.000Z
|
#!/usr/bin/env python3
import time
import json
import os
import os.path
import subprocess
import sys
import traceback
import re
from glob import glob
import requests
import swift
import rclone
ROOT = os.path.dirname(os.path.realpath(__file__))
DUMP = ROOT + "/dump.sh"
def backup_all(encryption, sources, storage, monitor=None):
sources = flatten_sources(sources, monitor)
for source in sources:
try:
print("Backing up", source["name"], "...")
backup(encryption, source, storage)
if monitor:
notify_success(source, **monitor)
# Catch all so that one failed backup doesn't stop all others from happening
except Exception as e:
print("Backup failed with:", e)
notify_failure(source, **monitor)
traceback.print_exc()
finally:
remove_files()
def flatten_sources(sources, monitor=None):
for source in sources:
if source["dbtype"] == "ravendbmultiple":
try:
databases = existing_ravendb_databases(**source)
for db in databases:
plain_source = dict(source)
plain_source["database"] = db
plain_source["name"] = db
yield plain_source
except Exception as e:
print("Failed to resolve databases from source:", source["name"], e)
notify_failure(source, **monitor)
traceback.print_exc()
else:
yield source
def existing_ravendb_databases(url, cert, key, database, **_):
resp = requests.get(
f"{url}/databases",
cert=(cert, key),
verify=False,
)
pattern = re.compile(database)
return [
database["Name"]
for database in resp.json()["Databases"]
if not database["Disabled"] and pattern.match(database["Name"])
]
def backup(encryption, source, storage):
path = dump(encryption, source)
print("Backup completed:", path)
for store in storage:
if store["type"] == "swift":
token = swift.auth(**store)
swift.upload(path, token, store["container_url"])
swift.cleanup(path, token, store["container_url"], source["keep"])
elif store["type"] == "rclone":
rclone.upload(path, store["remote"], store["target"])
rclone.cleanup(path, store["remote"], store["target"], source["keep"])
def dump_other(
encryption, dbtype, host, username, password, database, name=None, tunnel="", **_
):
timestamp = time.strftime("%Y%m%d-%H%M", time.gmtime())
path = ".".join((name, timestamp, "gz.enc"))
cmd = [
DUMP,
"dump_other",
dbtype,
host,
username,
password,
database,
path,
encryption,
tunnel,
]
subprocess.check_call(cmd)
return path
def dump_ravendb(
encryption, timestamp, url, cert, key, database, name, collections=None, **_
):
if collections:
resp = requests.get(
f"{url}/databases/{database}/collections/stats",
cert=(cert, key),
verify=False,
)
existing_collections = [
collection
for collection in resp.json()["Collections"].keys()
if collection in collections
]
print("backing up collections:", existing_collections)
else:
existing_collections = None
path = ".".join((name, timestamp, "ravendbdump.enc"))
params = [
url,
cert,
key,
database,
json.dumps(existing_collections),
path,
encryption,
]
cmd = [DUMP, "dump_ravendb", *params]
subprocess.check_call(cmd)
return path
def dump(encryption, source):
timestamp = time.strftime("%Y%m%d-%H%M", time.gmtime())
dbtype = source["dbtype"]
if dbtype.startswith("ravendb"):
return dump_ravendb(encryption, timestamp, **source)
return dump_other(encryption, **source)
def remove_files():
for path in glob("*.enc"):
os.remove(path)
def notify(source, username, password, url, data):
url = "{root}/metrics/job/dumptruck/instance/{name}".format(root=url, **source)
resp = requests.post(
url,
data=data,
auth=requests.auth.HTTPBasicAuth(username, password),
)
print(resp.text)
def notify_success(source, username, password, url):
source = dict(source)
source.setdefault("database", "")
data = "\n".join(
(
"# TYPE backup_time_seconds gauge",
"# HELP backup_time_seconds Last Unix time when this source was backed up.",
'backup_time_seconds{{database="{database}",type="{dbtype}"}} {time}\n'
"# TYPE backup_status gauge",
"# HELP backup_status Indicates success/failure of the last backup attempt.",
'backup_status{{database="{database}",type="{dbtype}"}} 1\n',
)
).format(database=source["database"], dbtype=source["dbtype"], time=time.time())
notify(source, username, password, url, data)
def notify_failure(source, username, password, url):
source = dict(source)
source.setdefault("database", "")
data = "\n".join(
(
"# TYPE backup_status gauge",
"# HELP backup_status Indicates success/failure of the last backup attempt.",
'backup_status{{database="{database}",type="{dbtype}"}} -1\n',
)
).format(database=source["database"], dbtype=source["dbtype"])
notify(source, username, password, url, data)
def restore(name, file, database, encryption, sources, storage, **_):
for s in sources:
if s["name"] == name:
source = s
break
else:
print("No database '{}' in config.".format(name))
return
if database:
source["database"] = database
for store in storage:
try:
if store["type"] == "swift":
token = swift.auth(**store)
swift.save_object(token, store["container_url"], file, ".")
elif store["type"] == "rclone":
rclone.save_object(store["remote"], store["target"], file, ".")
elif store["type"] == "local":
pass
else:
continue
break
except Exception as e:
print("Failed to get {} with error:", e)
continue
else:
print("Backup could not be retrieved, aborting restore.")
dbtype = source["dbtype"]
if dbtype.startswith("ravendb"):
return restore_ravendb(file, encryption, **source)
return restore_other("./" + file, encryption, **source)
def restore_other(
path, encryption, dbtype, host, username, password, database, tunnel=None, **_
):
cmd = [
DUMP,
"restore_other",
dbtype,
host,
username,
password,
database,
path,
encryption,
]
if tunnel:
cmd.append(tunnel)
subprocess.check_call(cmd)
remove_files()
def restore_ravendb(path, encryption, url, cert, key, database, tunnel=None, **_):
cmd = [DUMP, "restore_ravendb", url, cert, key, database, path, encryption]
if tunnel:
cmd.append(tunnel)
subprocess.check_call(cmd)
remove_files()
def usage():
print(
"Usage: {} <config.json> perform database backups according to <config.json>\n",
"or {} <config.json> <source_name> perform a single database backup according to <config.json>\n",
"or {} <config.json> <source_name> <dump> takes settings from <config.json> and downloads <dump> from a storage provider and tries to restore it to the database with name <source>",
)
def main():
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
usage()
path = sys.argv[1]
with open(path) as f:
config = json.load(f)
if len(sys.argv) == 2:
backup_all(**config)
elif len(sys.argv) == 3:
name = sys.argv[2]
config["sources"] = list(filter(lambda x: x["name"] == name, config["sources"]))
backup_all(**config)
elif len(sys.argv) >= 4:
name = sys.argv[2]
dump = sys.argv[3]
# optionally override database name
if len(sys.argv) == 5:
database = sys.argv[4]
else:
database = None
restore(name, dump, database, **config)
else:
usage()
if __name__ == "__main__":
main()
| 27.641935
| 194
| 0.57743
|
ba68f2a4b59ffae2b47c9cfedcd19509e93dfcea
| 814
|
py
|
Python
|
src/freqpoly/freqpoly.py
|
Ellon-M/visualizations
|
5a42c213ea8fd0597e2035778d9ae6460eb9e821
|
[
"MIT"
] | null | null | null |
src/freqpoly/freqpoly.py
|
Ellon-M/visualizations
|
5a42c213ea8fd0597e2035778d9ae6460eb9e821
|
[
"MIT"
] | null | null | null |
src/freqpoly/freqpoly.py
|
Ellon-M/visualizations
|
5a42c213ea8fd0597e2035778d9ae6460eb9e821
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.cm as cmp
import matplotlib.colors as cl
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
# matplotlib
fig, ax = plt.subplots(1, figsize=(24,15))
x = np.array(event_count_summer['Event'])
ax.tick_params(axis='x', colors='brown', labelsize=30)
ax.tick_params(axis='y', colors='brown', labelsize=30)
a, bins, c = plt.hist(x, bins=10, label='Event count - over time', rwidth=0.96, histtype='step') #hist first
l=list(bins)
l.insert(0,0)
l.insert(len(bins)+1,bins[len(bins)-1])
mid=[]
for i in range(len(l)-1):
ele=(l[i]+l[i+1])/2
mid.append(ele)
x=list(a)
x.insert(0,0)
x.insert(len(a)+1,0)
plt.plot(mid,x,'go--', color='r')
plt.title('\nSUMMER OLYMPIC EVENTS \nOVER THE GENERATIONS\n', fontsize=44)
| 28.068966
| 108
| 0.701474
|
87a020e6c86cac4f6971f46554f8b38c1c1206b2
| 4,120
|
py
|
Python
|
elekta-beamformer-master/mne/phantom_helpers.py
|
RTHMaK/RPGOne
|
3f3ada7db1762781668bfb2377154fdc00e17212
|
[
"Apache-2.0"
] | 1
|
2017-04-11T13:03:55.000Z
|
2017-04-11T13:03:55.000Z
|
elekta-beamformer-master/mne/phantom_helpers.py
|
RTHMaK/RPGOne
|
3f3ada7db1762781668bfb2377154fdc00e17212
|
[
"Apache-2.0"
] | null | null | null |
elekta-beamformer-master/mne/phantom_helpers.py
|
RTHMaK/RPGOne
|
3f3ada7db1762781668bfb2377154fdc00e17212
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
================================
Helpers for phantom localization
================================
"""
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
actual_pos = mne.dipole.get_phantom_dipoles('otaniemi')[0]
base_path = op.join(op.dirname(__file__), '..', '..', 'phantom')
maxfilter_options = [False, True, 'mne']
dipole_amplitudes = [20, 100, 200, 1000]
dipole_indices = [5, 6, 7, 8]
def plot_errors(errors, kind):
# Visualize the result
xs = np.arange(3)
xticklabels = ('Raw', 'SSS', 'SSS$_{MNE}$')
ylim = [0, 20]
fig, axs = plt.subplots(5, 1, figsize=(4, 8))
for ai, dipole_amplitude in enumerate([20, 100, 200, 1000]):
ax = axs[ai]
for di in range(len(dipole_indices)):
ax.plot(xs, errors[:, ai, di], label='%d' % dipole_indices[di])
ax.set(title='%d nAm' % dipole_amplitude, ylim=ylim, xticks=xs,
ylabel='Error (mm)', xlim=[0, 2])
ax.set(xticklabels=[''] * len(xs))
if ai == 3:
ax.set(xticklabels=xticklabels)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc='upper center',
bbox_to_anchor=(0.5, -0.25), ncol=2)
ax.grid(True)
fig.tight_layout()
axs[-1].set_visible(False)
for ext in ('png', 'pdf'):
plt.savefig(op.join('figures', ('phantom_errors_%s.' % kind) + ext))
plt.show()
def get_fwd():
# They all have approximately the same dev_head_t
info = mne.io.read_info(base_path + '/1000nAm/dip05_1000nAm.fif')
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
src_fname = 'phantom-src.fif'
if not op.isfile(src_fname):
mne.setup_volume_source_space(
subject=None, fname=None, pos=3.5, mri=None,
sphere=(0.0, 0.0, 0.0, 80.0), bem=None, mindist=5.0,
exclude=2.0).save(src_fname)
src = mne.read_source_spaces(src_fname)
fwd_fname = 'phantom-fwd.fif'
if not op.isfile(fwd_fname):
mne.write_forward_solution(fwd_fname, mne.make_forward_solution(
info, trans=None, src=src, bem=sphere, eeg=False,
meg=True))
fwd = mne.read_forward_solution(fwd_fname)
return src, fwd
def get_data(dipole_idx, dipole_amplitude, use_maxwell_filter, show=False):
data_path = base_path + '/%dnAm/' % dipole_amplitude
if use_maxwell_filter is True:
fname = 'dip%02d_%dnAm_sss.fif' % (dipole_idx, dipole_amplitude)
else:
fname = 'dip%02d_%dnAm.fif' % (dipole_idx, dipole_amplitude)
raw_fname = op.join(data_path, fname)
raw = mne.io.read_raw_fif(raw_fname, preload=True, verbose='error')
raw.info['bads'] = ['MEG2233', 'MEG2422', 'MEG0111']
events = mne.find_events(raw, stim_channel='STI201')
if show:
raw.plot(events=events)
if show:
raw.plot_psd(tmax=np.inf, fmax=60, average=False)
raw.fix_mag_coil_types()
if use_maxwell_filter == 'mne':
# Use Maxwell filtering from MNE
raw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.))
if show:
raw.plot(events=events)
#######################################################################
# We know our phantom produces sinusoidal bursts below 25 Hz, so let's
# filter.
raw.filter(None, 40., h_trans_bandwidth='auto', filter_length='auto',
phase='zero')
if show:
raw.plot(events=events)
#######################################################################
# Now we epoch our data, average it
tmin, tmax = -0.15, 0.1
event_id = events[0, 2]
epochs = mne.Epochs(
raw, events, event_id, tmin, tmax, baseline=(None, -0.05),
preload=True, add_eeg_ref=False)
evoked = epochs.average()
if show:
evoked.plot(spatial_colors=True)
if show:
evoked.plot_joint()
evoked.crop(0, None)
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.08)
cov = mne.compute_covariance(epochs, tmax=-0.05)
return epochs, evoked, cov, sphere
| 34.621849
| 76
| 0.590291
|
0cf6b440fbf8b88f459f79898876e8e3fba7f2f6
| 8,872
|
py
|
Python
|
rltf/models/bstrap_dqn.py
|
nikonikolov/rltf
|
d56714494f73e53ed4b41d6376d942332b406885
|
[
"MIT"
] | 90
|
2018-05-02T17:15:52.000Z
|
2021-11-09T08:53:44.000Z
|
rltf/models/bstrap_dqn.py
|
arita37/rltf
|
d56714494f73e53ed4b41d6376d942332b406885
|
[
"MIT"
] | 1
|
2019-10-01T11:41:53.000Z
|
2019-12-08T15:38:53.000Z
|
rltf/models/bstrap_dqn.py
|
arita37/rltf
|
d56714494f73e53ed4b41d6376d942332b406885
|
[
"MIT"
] | 25
|
2018-01-14T16:56:44.000Z
|
2021-11-09T08:53:48.000Z
|
import tensorflow as tf
from rltf.models import BaseDQN
from rltf.tf_utils import tf_utils
class BaseBstrapDQN(BaseDQN):
def __init__(self, huber_loss, n_heads, **kwargs):
"""
Args:
obs_shape: list. Shape of the observation tensor
n_actions: int. Number of possible actions
opt_conf: rltf.optimizers.OptimizerConf. Configuration for the optimizer
gamma: float. Discount factor
huber_loss: bool. Whether to use huber loss or not
n_heads: Number of bootstrap heads
"""
super().__init__(**kwargs)
self.huber_loss = huber_loss
self.n_heads = n_heads
# Custom TF Tensors and Ops
self._conv_out = None
def _conv_nn(self, x):
""" Build the Bootstrapped DQN architecture - as described in the original paper
Args:
x: tf.Tensor. Tensor for the input
Returns:
`tf.Tensor` of shape `[batch_size, n_heads, n_actions]`. Contains the Q-function for each action
"""
n_actions = self.n_actions
def build_head(x):
""" Build the head of the DQN network
Args:
x: tf.Tensor. Tensor for the input
Returns:
`tf.Tensor` of shape `[batch_size, 1, n_actions]`. Contains the Q-function for each action
"""
x = tf.layers.dense(x, units=512, activation=tf.nn.relu)
x = tf.layers.dense(x, units=n_actions, activation=None)
x = tf.expand_dims(x, axis=-2)
return x
with tf.variable_scope("conv_net"):
x = tf.layers.conv2d(x, filters=32, kernel_size=8, strides=4, padding="SAME", activation=tf.nn.relu)
x = tf.layers.conv2d(x, filters=64, kernel_size=4, strides=2, padding="SAME", activation=tf.nn.relu)
x = tf.layers.conv2d(x, filters=64, kernel_size=3, strides=1, padding="SAME", activation=tf.nn.relu)
x = tf.layers.flatten(x)
# Careful: Make sure self._conv_out is set only during the right function call
if "agent_net" in tf.get_variable_scope().name and self._conv_out is None: self._conv_out = x
with tf.variable_scope("action_value"):
heads = [build_head(x) for _ in range(self.n_heads)]
x = tf.concat(heads, axis=-2)
return x
def _compute_estimate(self, agent_net):
"""Get the Q value for the selected action
Returns:
`tf.Tensor` of shape `[None, n_heads]`
"""
a_mask = tf.one_hot(self.act_t_ph, self.n_actions, dtype=tf.float32)
a_mask = tf.tile(tf.expand_dims(a_mask, axis=-2), [1, self.n_heads, 1])
qf = tf.reduce_sum(agent_net * a_mask, axis=-1)
return qf
def _select_target(self, target_net):
"""Select the Double DQN target
Args:
target_net: `tf.Tensor`. shape `[None, n_heads, n_actions]. The output from `self._nn_model()`
for the target
Returns:
`tf.Tensor` of shape `[None, n_heads]`
"""
n_actions = self.n_actions
# Compute the Q-estimate with the agent network variables and select the maximizing action
agent_net = self._nn_model(self.obs_tp1, scope="agent_net") # out: [None, n_heads, n_actions]
target_act = tf.argmax(agent_net, axis=-1, output_type=tf.int32) # out: [None, n_heads]
# Select the target Q-function
target_mask = tf.one_hot(target_act, n_actions, dtype=tf.float32) # out: [None, n_heads, n_actions]
target_q = tf.reduce_sum(target_net * target_mask, axis=-1) # out: [None, n_heads]
return target_q
def _compute_backup(self, target):
"""Compute the backup Q-value for each head
Args:
target: `tf.Tensor`, shape `[None, n_heads]. The output from `self._select_target()`
Returns:
`tf.Tensor` of shape `[None, n_heads]`
"""
done_mask = tf.cast(tf.logical_not(self.done_ph), tf.float32) # out: [None]
done_mask = tf.expand_dims(done_mask, axis=-1) # out: [None, 1]
rew_t = tf.expand_dims(self.rew_t_ph, axis=-1) # out: [None, 1]
target_q = rew_t + self.gamma * done_mask * target # out: [None, n_heads]
return target_q
def _compute_loss(self, estimate, target, name):
"""
Args: shape `[None, n_heads]`
Returns:
List of size `n_heads` with a scalar tensor loss for each head
"""
if self.huber_loss:
loss = tf.losses.huber_loss(target, estimate, reduction=tf.losses.Reduction.NONE)
else:
loss = tf.losses.mean_squared_error(target, estimate, reduction=tf.losses.Reduction.NONE)
losses = tf.split(loss, self.n_heads, axis=-1)
losses = [tf.reduce_mean(loss) for loss in losses]
tf.summary.scalar(name, tf.add_n(losses)/self.n_heads)
return losses
def _compute_gradients(self, optimizer, loss, agent_vars, gate_grads=True):
x_heads = self._conv_out
# Get the conv net and the heads variables
head_vars = tf_utils.scope_vars(agent_vars, scope='agent_net/action_value')
conv_vars = tf_utils.scope_vars(agent_vars, scope='agent_net/conv_net')
# Compute the gradients of the variables in all heads as well as
# the sum of gradients backpropagated from each head into the conv net
head_grads = tf.gradients(loss, head_vars + [x_heads])
# Normalize the gradient which is backpropagated the heads to the conv net
x_heads_g = head_grads.pop(-1)
x_heads_g = x_heads_g / float(self.n_heads)
# Compute the conv net gradients using chain rule
if conv_vars:
conv_grads = optimizer.compute_gradients(x_heads, conv_vars, grad_loss=x_heads_g)
else:
conv_grads = []
# Group grads and apply them
head_grads = list(zip(head_grads, head_vars))
grads = head_grads + conv_grads
if gate_grads:
grads = tf_utils.gate_gradients(grads)
return grads
def reset(self, sess):
pass
def _act_eval_vote(self, agent_net, name):
"""Evaluation action based on voting policy from the heads"""
def count_value(votes, i):
count = tf.equal(votes, i)
count = tf.cast(count, tf.int32)
count = tf.reduce_sum(count, axis=-1, keepdims=True)
return count
# Get the greedy action from each head; output shape `[batch_size, n_heads]`
votes = tf.argmax(agent_net, axis=-1, output_type=tf.int32)
# Get the action votes; output shape `[batch_size, n_actions]`
votes = [count_value(votes, i) for i in range(self.n_actions)]
votes = tf.concat(votes, axis=-1)
# Get the max vote action; output shape `[batch_size]`
action = tf.argmax(votes, axis=-1, output_type=tf.int32, name=name)
# Set the plottable tensors for episode recordings
p_a = tf.identity(action[0], name="plot/eval/a")
p_vote = tf.identity(votes[0], name="plot/eval/vote")
self.plot_conf.set_eval_spec(dict(eval_actions=dict(a_vote=dict(height=p_vote, a=p_a))))
return action
def _act_eval_greedy(self, agent_net, name):
"""Evaluation action based on the greedy action w.r.t. the mean of all heads"""
mean = tf.reduce_mean(agent_net, axis=1)
action = tf.argmax(mean, axis=-1, output_type=tf.int32, name=name)
# Set the plottable tensors for episode recordings
p_a = tf.identity(action[0], name="plot/eval/a")
p_mean = tf.identity(mean[0], name="plot/eval/mean")
self.plot_conf.set_eval_spec(dict(eval_actions=dict(a_mean=dict(height=p_mean, a=p_a))))
return action
class BstrapDQN(BaseBstrapDQN):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Custom TF Tensors and Ops
self._active_head = None
self._set_act_head = None
def build(self):
self._active_head = tf.Variable([0], trainable=False, name="active_head")
sample_head = tf.random_uniform(shape=[1], maxval=self.n_heads, dtype=tf.int32)
self._set_act_head = tf.assign(self._active_head, sample_head, name="set_act_head")
super().build()
def _act_train(self, agent_net, name):
"""Select the greedy action from the selected head
Args:
agent_net: `tf.Tensor`, shape `[None, n_heads, n_actions]. The tensor output from
`self._nn_model()` for the agent
Returns:
`tf.Tensor` of shape `[None]`
"""
# Get the Q function from the active head
head_mask = tf.one_hot(self._active_head, self.n_heads, dtype=tf.float32) # out: [1, n_heads]
head_mask = tf.tile(head_mask, [tf.shape(agent_net)[0], 1]) # out: [None, n_heads]
head_mask = tf.expand_dims(head_mask, axis=-1) # out: [None, n_heads, 1]
q_head = tf.reduce_sum(agent_net * head_mask, axis=1) # out: [None, n_actions]
# Compute the greedy action
action = tf.argmax(q_head, axis=-1, output_type=tf.int32, name=name)
return dict(action=action)
def _act_eval(self, agent_net, name):
return dict(action=self._act_eval_vote(agent_net, name))
def reset(self, sess):
sess.run(self._set_act_head)
| 35.630522
| 106
| 0.664337
|
61864cc619fdd4200e68060287541afde1f4e5a8
| 1,941
|
py
|
Python
|
tests/api_resources/test_payout.py
|
timvisher/stripe-python
|
ae953fd0aa531f5b500e5e86eee5859df95a255d
|
[
"MIT"
] | 2
|
2020-12-05T09:02:14.000Z
|
2021-03-28T17:23:20.000Z
|
tests/api_resources/test_payout.py
|
timvisher/stripe-python
|
ae953fd0aa531f5b500e5e86eee5859df95a255d
|
[
"MIT"
] | null | null | null |
tests/api_resources/test_payout.py
|
timvisher/stripe-python
|
ae953fd0aa531f5b500e5e86eee5859df95a255d
|
[
"MIT"
] | 2
|
2019-12-19T10:25:38.000Z
|
2020-01-03T08:54:20.000Z
|
from __future__ import absolute_import, division, print_function
import stripe
TEST_RESOURCE_ID = 'po_123'
class TestPayout(object):
def test_is_listable(self, request_mock):
resources = stripe.Payout.list()
request_mock.assert_requested(
'get',
'/v1/payouts'
)
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.Payout)
def test_is_retrievable(self, request_mock):
resource = stripe.Payout.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
'get',
'/v1/payouts/%s' % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Payout)
def test_is_creatable(self, request_mock):
resource = stripe.Payout.create(
amount=100,
currency='usd'
)
request_mock.assert_requested(
'post',
'/v1/payouts'
)
assert isinstance(resource, stripe.Payout)
def test_is_saveable(self, request_mock):
resource = stripe.Payout.retrieve(TEST_RESOURCE_ID)
resource.metadata['key'] = 'value'
resource.save()
request_mock.assert_requested(
'post',
'/v1/payouts/%s' % TEST_RESOURCE_ID
)
def test_is_modifiable(self, request_mock):
resource = stripe.Payout.modify(
TEST_RESOURCE_ID,
metadata={'key': 'value'}
)
request_mock.assert_requested(
'post',
'/v1/payouts/%s' % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Payout)
def test_can_cancel(self, request_mock):
payout = stripe.Payout.retrieve(TEST_RESOURCE_ID)
resource = payout.cancel()
request_mock.assert_requested(
'post',
'/v1/payouts/%s/cancel' % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Payout)
| 29.409091
| 64
| 0.607419
|
307b7a44c1829c0e5cbf90ebdac5692e8c69495a
| 883
|
py
|
Python
|
xlsxwriter/test/comparison/test_object_position19.py
|
dthadi3/XlsxWriter
|
f1801e82240aa9c746ce14948ef95990b83162cf
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2020-07-01T07:24:37.000Z
|
2020-07-01T07:24:37.000Z
|
xlsxwriter/test/comparison/test_object_position19.py
|
dthadi3/XlsxWriter
|
f1801e82240aa9c746ce14948ef95990b83162cf
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/comparison/test_object_position19.py
|
dthadi3/XlsxWriter
|
f1801e82240aa9c746ce14948ef95990b83162cf
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('object_position19.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column(1, 1, None, None, {'hidden': 1})
worksheet.insert_image('B9', self.image_dir + 'red.png', {'x_offset': 128})
workbook.close()
self.assertExcelEqual()
| 24.527778
| 83
| 0.612684
|
082be03b9f0129269a5290e08609e176093fdb54
| 21,706
|
py
|
Python
|
Tests/test_TogoWS.py
|
lennax/biopython
|
db56e6917ef750df31e952472ebf63cd496f9ce9
|
[
"BSD-3-Clause"
] | 2
|
2016-04-15T22:39:55.000Z
|
2020-12-11T16:11:33.000Z
|
Tests/test_TogoWS.py
|
lennax/biopython
|
db56e6917ef750df31e952472ebf63cd496f9ce9
|
[
"BSD-3-Clause"
] | null | null | null |
Tests/test_TogoWS.py
|
lennax/biopython
|
db56e6917ef750df31e952472ebf63cd496f9ce9
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2010-2013 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Testing Bio.TogoWS online code.
"""
from __future__ import print_function
import unittest
from Bio._py3k import StringIO
from Bio._py3k import HTTPError
import requires_internet
requires_internet.check()
# We want to test these:
from Bio import TogoWS
# In order to check any sequences returned
from Bio import SeqIO
from Bio.SeqUtils.CheckSum import seguid
from Bio import Medline
#####################################################################
class TogoFields(unittest.TestCase):
def test_invalid_database(self):
"""Check asking for fields of invalid database fails"""
self.assertRaises(IOError, TogoWS._get_fields,
"http://togows.dbcls.jp/entry/invalid?fields")
def test_databases(self):
"""Check supported databases"""
dbs = set(TogoWS._get_entry_dbs())
expected = set(['nuccore', 'nucest', 'nucgss',
'nucleotide', 'protein', 'gene',
'homologene', 'snp',
'mesh', 'pubmed', # 'embl',
'uniprot', 'uniparc', 'uniref100',
'uniref90', 'uniref50', 'ddbj',
'dad', 'pdb', 'compound', 'drug',
'enzyme', 'genes', 'glycan',
'orthology', 'reaction', 'module',
'pathway'])
self.assertTrue(dbs.issuperset(expected),
"Missing DB: %s" % ", ".join(sorted(expected.difference(dbs))))
def test_pubmed(self):
"""Check supported fields for pubmed database"""
fields = set(TogoWS._get_entry_fields("pubmed"))
self.assertTrue(fields.issuperset(['abstract', 'au', 'authors',
'doi', 'mesh', 'so',
'title']), fields)
def test_ncbi_protein(self):
"""Check supported fields for NCBI protein database"""
fields = set(TogoWS._get_entry_fields("ncbi-protein"))
self.assertTrue(fields.issuperset(['entry_id', 'length', 'strand',
'moltype', 'linearity', 'division',
'date', 'definition', 'accession',
'accessions', 'version', 'versions',
'acc_version', 'gi', 'keywords',
'organism', 'common_name',
'taxonomy', 'comment', 'seq']),
fields)
def test_ddbj(self):
"""Check supported fields for ddbj database"""
fields = set(TogoWS._get_entry_fields("ddbj"))
self.assertTrue(fields.issuperset(['entry_id', 'length', 'strand',
'moltype', 'linearity', 'division',
'date', 'definition', 'accession',
'accessions', 'version', 'versions',
'acc_version', 'gi', 'keywords',
'organism', 'common_name',
'taxonomy', 'comment', 'seq']),
fields)
def test_uniprot(self):
"""Check supported fields for uniprot database"""
fields = set(TogoWS._get_entry_fields("uniprot"))
self.assertTrue(fields.issuperset(["definition", "entry_id", "seq"]),
fields)
def test_pdb(self):
"""Check supported fields for pdb database"""
fields = set(TogoWS._get_entry_fields("pdb"))
self.assertTrue(fields.issuperset(["accession", "chains", "keywords",
"models"]), fields)
class TogoEntry(unittest.TestCase):
def test_pubmed_16381885(self):
"""Bio.TogoWS.entry("pubmed", "16381885")"""
# Gives Medline plain text
handle = TogoWS.entry("pubmed", "16381885")
data = Medline.read(handle)
handle.close()
self.assertEqual(data["TI"],
'From genomics to chemical genomics: new developments in KEGG.')
self.assertEqual(data["AU"], ['Kanehisa M', 'Goto S', 'Hattori M',
'Aoki-Kinoshita KF', 'Itoh M',
'Kawashima S', 'Katayama T', 'Araki M',
'Hirakawa M'])
def test_pubmed_16381885_ti(self):
"""Bio.TogoWS.entry("pubmed", "16381885", field="title")"""
handle = TogoWS.entry("pubmed", "16381885", field="title")
data = handle.read().strip()
handle.close()
self.assertEqual(data,
'From genomics to chemical genomics: new developments in KEGG.')
def test_pubmed_16381885_title(self):
"""Bio.TogoWS.entry("pubmed", "16381885", field="title")"""
handle = TogoWS.entry("pubmed", "16381885", field="title")
data = handle.read().strip()
handle.close()
self.assertEqual(data,
'From genomics to chemical genomics: new developments in KEGG.')
def test_pubmed_16381885_au(self):
"""Bio.TogoWS.entry("pubmed", "16381885", field="au")"""
# Gives one name per line (i.e. \n separated), no dots
handle = TogoWS.entry("pubmed", "16381885", field="au")
data = handle.read().strip().split("\n")
handle.close()
self.assertEqual(data, ['Kanehisa M', 'Goto S', 'Hattori M',
'Aoki-Kinoshita KF', 'Itoh M',
'Kawashima S', 'Katayama T', 'Araki M',
'Hirakawa M'])
def test_pubmed_16381885_authors(self):
"""Bio.TogoWS.entry("pubmed", "16381885", field="authors")"""
# Gives names tab separated (i.e. \t separated)
handle = TogoWS.entry("pubmed", "16381885", field="authors")
data = handle.read().strip().split("\t")
handle.close()
self.assertEqual(data, ['Kanehisa, M.', 'Goto, S.', 'Hattori, M.',
'Aoki-Kinoshita, K. F.', 'Itoh, M.',
'Kawashima, S.', 'Katayama, T.', 'Araki, M.',
'Hirakawa, M.'])
def test_pubmed_16381885_invalid_field(self):
"""Bio.TogoWS.entry("pubmed", "16381885", field="invalid_for_testing")"""
self.assertRaises(ValueError, TogoWS.entry,
"pubmed", "16381885", field="invalid_for_testing")
def test_pubmed_16381885_invalid_format(self):
"""Bio.TogoWS.entry("pubmed", "16381885", format="invalid_for_testing")"""
self.assertRaises(ValueError, TogoWS.entry,
"pubmed", "16381885", format="invalid_for_testing")
def test_pubmed_invalid_id(self):
"""Bio.TogoWS.entry("pubmed", "invalid_for_testing")"""
self.assertRaises(IOError, TogoWS.entry,
"pubmed", "invalid_for_testing")
def test_pubmed_16381885_and_19850725(self):
"""Bio.TogoWS.entry("pubmed", "16381885,19850725")"""
handle = TogoWS.entry("pubmed", "16381885,19850725")
records = list(Medline.parse(handle))
handle.close()
self.assertEqual(len(records), 2)
self.assertEqual(records[0]["TI"],
'From genomics to chemical genomics: new developments in KEGG.')
self.assertEqual(records[0]["AU"], ['Kanehisa M', 'Goto S',
'Hattori M', 'Aoki-Kinoshita KF',
'Itoh M', 'Kawashima S',
'Katayama T', 'Araki M',
'Hirakawa M'])
self.assertEqual(records[1]["TI"],
'DDBJ launches a new archive database with analytical tools ' +
'for next-generation sequence data.')
self.assertEqual(records[1]["AU"], ['Kaminuma E', 'Mashima J',
'Kodama Y', 'Gojobori T',
'Ogasawara O', 'Okubo K',
'Takagi T', 'Nakamura Y'])
def test_pubmed_16381885_and_19850725_authors(self):
"""Bio.TogoWS.entry("pubmed", "16381885,19850725", field="authors")"""
handle = TogoWS.entry("pubmed", "16381885,19850725", field="authors")
# Little hack to remove blank lines...
# names = handle.read().replace("\n\n", "\n").strip().split("\n")
names = handle.read().strip().split("\n")
handle.close()
self.assertEqual(2, len(names))
names1, names2 = names
self.assertEqual(names1.split("\t"),
['Kanehisa, M.', 'Goto, S.', 'Hattori, M.',
'Aoki-Kinoshita, K. F.', 'Itoh, M.',
'Kawashima, S.', 'Katayama, T.',
'Araki, M.', 'Hirakawa, M.'])
self.assertEqual(names2.split("\t"),
['Kaminuma, E.', 'Mashima, J.', 'Kodama, Y.',
'Gojobori, T.', 'Ogasawara, O.', 'Okubo, K.',
'Takagi, T.', 'Nakamura, Y.'])
def test_invalid_db(self):
"""Bio.TogoWS.entry("invalid_db", "invalid_id")"""
self.assertRaises(ValueError, TogoWS.entry,
"invalid_db", "invalid_id")
def test_ddbj_genbank_length(self):
"""Bio.TogoWS.entry("ddbj", "X52960", field="length")"""
handle = TogoWS.entry("ddbj", "X52960", field="length")
data = handle.read().strip() # ignore trailing \n
handle.close()
self.assertEqual(data, "248")
def test_ddbj_genbank(self):
"""Bio.TogoWS.entry("ddbj", "X52960")"""
handle = TogoWS.entry("ddbj", "X52960") # Returns "genbank" format
record = SeqIO.read(handle, "gb")
handle.close()
self.assertEqual(record.id, "X52960.1")
self.assertEqual(record.name, "X52960")
self.assertEqual(len(record), 248)
self.assertEqual(seguid(record.seq), "Ktxz0HgMlhQmrKTuZpOxPZJ6zGU")
def test_nucleotide_genbank_length(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", field="length")"""
handle = TogoWS.entry("nucleotide", "X52960", field="length")
data = handle.read().strip() # ignore trailing \n
handle.close()
self.assertEqual(data, "248")
def test_nucleotide_genbank_seq(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", field="seq")"""
handle = TogoWS.entry("nucleotide", "X52960", field="seq")
data = handle.read().strip() # ignore trailing \n
handle.close()
self.assertEqual(seguid(data), "Ktxz0HgMlhQmrKTuZpOxPZJ6zGU")
def test_nucleotide_genbank_definition(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", field="definition")"""
handle = TogoWS.entry("nucleotide", "X52960", field="definition")
data = handle.read().strip() # ignore trailing \n
handle.close()
self.assertEqual(data, "Coleus blumei viroid 1 (CbVd) RNA.")
def test_nucleotide_genbank_accession(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", field="accession")"""
handle = TogoWS.entry("nucleotide", "X52960", field="accession")
data = handle.read().strip() # ignore trailing \n
handle.close()
self.assertEqual(data, "X52960")
def test_nucleotide_genbank_version(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", field="version")"""
handle = TogoWS.entry("nucleotide", "X52960", field="version")
data = handle.read().strip() # ignore trailing \n
handle.close()
self.assertEqual(data, "1")
def test_nucleotide_genbank_acc_version(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", field="acc_version")"""
handle = TogoWS.entry("nucleotide", "X52960", field="acc_version")
data = handle.read().strip() # ignore trailing \n
handle.close()
self.assertEqual(data, "X52960.1")
def test_nucleotide_genbank_organism(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", field="organism")"""
handle = TogoWS.entry("nucleotide", "X52960", field="organism")
data = handle.read().strip() # ignore trailing \n
handle.close()
self.assertEqual(data, "Coleus blumei viroid 1")
def test_ddbj_genbank_invalid_field(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", field="invalid_for_testing")"""
self.assertRaises(ValueError, TogoWS.entry,
"nucleotide", "X52960", field="invalid_for_testing")
def test_nucleotide_invalid_format(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", format="invalid_for_testing")"""
self.assertRaises(ValueError, TogoWS.entry,
"nucleotide", "X52960", format="invalid_for_testing")
def test_ddbj_gff3(self):
"""Bio.TogoWS.entry("ddbj", "X52960", format="gff")"""
handle = TogoWS.entry("ddbj", "X52960", format="gff")
data = handle.read()
handle.close()
self.assertTrue(data.startswith("##gff-version 3\nX52960\tDDBJ\t"), data)
def test_genbank_gff3(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", format="gff")"""
# Note - Using manual URL with genbank instead of nucleotide works
handle = TogoWS.entry("nucleotide", "X52960", format="gff")
data = handle.read()
handle.close()
self.assertTrue(data.startswith("##gff-version 3\nX52960\tGenbank\t"), data)
def test_ddbj_fasta(self):
"""Bio.TogoWS.entry("ddbj", "X52960", "fasta")"""
handle = TogoWS.entry("ddbj", "X52960", "fasta")
record = SeqIO.read(handle, "fasta")
handle.close()
self.assertIn("X52960", record.id)
self.assertIn("X52960", record.name)
self.assertEqual(len(record), 248)
self.assertEqual(seguid(record.seq), "Ktxz0HgMlhQmrKTuZpOxPZJ6zGU")
def test_uniprot_swiss(self):
"""Bio.TogoWS.entry("uniprot", ["A1AG1_HUMAN","A1AG1_MOUSE"])"""
# Returns "swiss" format:
handle = TogoWS.entry("uniprot", ["A1AG1_HUMAN", "A1AG1_MOUSE"])
record1, record2 = SeqIO.parse(handle, "swiss")
handle.close()
self.assertEqual(record1.id, "P02763")
self.assertEqual(record1.name, "A1AG1_HUMAN")
self.assertEqual(len(record1), 201)
self.assertEqual(seguid(record1.seq), "LHDJJ6oC7gUXo8CC7Xn6EUeA8Gk")
self.assertEqual(record2.id, "Q60590")
self.assertEqual(record2.name, "A1AG1_MOUSE")
self.assertEqual(len(record2), 207)
self.assertEqual(seguid(record2.seq), "FGcj+RFQhP2gRusCmwPFty5PJT0")
def test_nucleotide_fasta(self):
"""Bio.TogoWS.entry("nucleotide", "6273291", "fasta")"""
handle = TogoWS.entry("nucleotide", "6273291", "fasta")
record = SeqIO.read(handle, "fasta")
handle.close()
# NCBI is phasing out GI numbers, so no longer true:
# self.assertIn("6273291", record.id)
# self.assertIn("6273291", record.name)
self.assertIn("AF191665.1", record.id)
self.assertIn("AF191665.1", record.name)
self.assertEqual(len(record), 902)
self.assertEqual(seguid(record.seq), "bLhlq4mEFJOoS9PieOx4nhGnjAQ")
def test_protein_fasta(self):
"""Bio.TogoWS.entry("protein", "16130152", "fasta")"""
handle = TogoWS.entry("protein", "16130152", "fasta")
record = SeqIO.read(handle, "fasta")
handle.close()
# NCBI is phasing out GI numbers, so no longer true:
# self.assertIn("16130152", record.id)
# self.assertIn("16130152", record.name)
self.assertIn("NP_416719.1", record.id)
self.assertIn("NP_416719.1", record.name)
self.assertIn("porin protein", record.description)
self.assertEqual(len(record), 367)
self.assertEqual(seguid(record.seq), "fCjcjMFeGIrilHAn6h+yju267lg")
class TogoSearch(unittest.TestCase):
"""Search tests."""
def test_bad_args_just_limit(self):
"""Reject Bio.TogoWS.search(...) with just limit"""
self.assertRaises(ValueError, TogoWS.search,
"pubmed", "lung+cancer", limit=10)
def test_bad_args_just_offset(self):
"""Reject Bio.TogoWS.search(...) with just offset"""
self.assertRaises(ValueError, TogoWS.search,
"pubmed", "lung+cancer", offset=10)
def test_bad_args_zero_limit(self):
"""Reject Bio.TogoWS.search(...) with zero limit"""
self.assertRaises(ValueError, TogoWS.search,
"pubmed", "lung+cancer", offset=1, limit=0)
def test_bad_args_zero_offset(self):
"""Reject Bio.TogoWS.search(...) with zero offset"""
self.assertRaises(ValueError, TogoWS.search,
"pubmed", "lung+cancer", offset=0, limit=10)
def test_bad_args_non_int_offset(self):
"""Reject Bio.TogoWS.search(...) with non-integer offset"""
self.assertRaises(ValueError, TogoWS.search,
"pubmed", "lung+cancer", offset="test", limit=10)
def test_bad_args_non_int_limit(self):
"""Reject Bio.TogoWS.search(...) with non-integer limit"""
self.assertRaises(ValueError, TogoWS.search,
"pubmed", "lung+cancer", offset=1, limit="lots")
def test_pubmed_search_togows(self):
"""Bio.TogoWS.search_iter("pubmed", "TogoWS") etc"""
self.check("pubmed", "TogoWS", ["20472643"])
def test_pubmed_search_bioruby(self):
"""Bio.TogoWS.search_iter("pubmed", "BioRuby") etc"""
self.check("pubmed", "BioRuby", ["22994508", "22399473",
"20739307", "20015970", "14693808"])
def test_pubmed_search_porin(self):
"""Bio.TogoWS.search_iter("pubmed", "human porin") etc
Count was 357 at time of writing, this was choosen to
be larger than the default chunk size for iteration,
but still not too big to download the full list.
"""
self.check("pubmed", "human porin", ["21189321", "21835183"])
# TogoWS search for PDBj currently unavailable
# def test_pdb_search_porin(self):
# """Bio.TogoWS.search_iter("pdb", "porin") etc
#
# Count was about 161 at time of writing.
# """
# self.check("pdb", "porin", ["2j1n", "2vqg", "3m8b", "2k0l"])
def test_uniprot_search_lung_cancer(self):
"""Bio.TogoWS.search_iter("uniprot", "terminal+lung+cancer", limit=150) etc
Search count was 211 at time of writing, a bit large to
download all the results in a unit test. Want to use a limit
larger than the batch size (100) to ensure at least two
batches.
"""
self.check("uniprot", "terminal+lung+cancer", limit=150)
def check(self, database, search_term, expected_matches=(), limit=None):
if expected_matches and limit:
raise ValueError("Bad test - TogoWS makes no promises about order")
try:
search_count = TogoWS.search_count(database, search_term)
except HTTPError as err:
raise ValueError("%s from %s" % (err, err.url))
if expected_matches and search_count < len(expected_matches):
raise ValueError("Only %i matches, expected at least %i"
% (search_count, len(expected_matches)))
if search_count > 5000 and not limit:
print("%i results, skipping" % search_count)
return
if limit:
count = min(search_count, limit)
else:
count = search_count
# Iteration should find everything... unless a limit is used
search_iter = list(TogoWS.search_iter(database, search_term, limit))
self.assertEqual(count, len(search_iter))
for match in expected_matches:
self.assertTrue(match in search_iter,
"Expected %s in results but not" % match)
class TogoConvert(unittest.TestCase):
"""Conversion tests."""
def test_invalid_format(self):
"""Check convert file format checking."""
self.assertRaises(ValueError, TogoWS.convert,
StringIO("PLACEHOLDER"),
"genbank", "invalid_for_testing")
self.assertRaises(ValueError, TogoWS.convert,
StringIO("PLACEHOLDER"),
"invalid_for_testing", "fasta")
def test_genbank_to_fasta(self):
"""Conversion of GenBank to FASTA."""
filename = "GenBank/NC_005816.gb"
old = SeqIO.read(filename, "gb")
with open(filename) as handle:
new = SeqIO.read(TogoWS.convert(handle, "genbank", "fasta"), "fasta")
self.assertEqual(str(old.seq), str(new.seq))
# def test_genbank_to_embl(self):
# """Conversion of GenBank to EMBL."""
# filename = "GenBank/NC_005816.gb"
# old = SeqIO.read(filename, "gb")
# with open(filename) as handle:
# new = SeqIO.read(TogoWS.convert(handle, "genbank", "embl"), "embl")
# self.assertEqual(str(old.seq), str(new.seq))
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 45.220833
| 87
| 0.568875
|
c98ff1f23940cf0f1225ee858644497d07fcccbc
| 8,555
|
py
|
Python
|
test/functional/wallet_accounts.py
|
5ha2s6/kevacoin
|
ed7768c931386a93f70fae0b215faf2be2f9cf6f
|
[
"MIT"
] | 30
|
2018-12-28T20:36:22.000Z
|
2022-02-08T00:31:38.000Z
|
test/functional/wallet_accounts.py
|
5ha2s6/kevacoin
|
ed7768c931386a93f70fae0b215faf2be2f9cf6f
|
[
"MIT"
] | 21
|
2020-01-28T01:53:44.000Z
|
2021-07-11T11:16:29.000Z
|
test/functional/wallet_accounts.py
|
5ha2s6/kevacoin
|
ed7768c931386a93f70fae0b215faf2be2f9cf6f
|
[
"MIT"
] | 12
|
2019-01-19T00:39:11.000Z
|
2022-03-13T10:09:51.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test account RPCs.
RPCs tested are:
- getaccountaddress
- getaddressesbyaccount
- listaddressgroupings
- setaccount
- sendfrom (with account arguments)
- move (with account arguments)
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class WalletAccountsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[]]
def run_test(self):
node = self.nodes[0]
# Check that there's no UTXO on any of the nodes
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/500 each
node.generate(1)
node.generate(101)
assert_equal(node.getbalance(), 1000)
# there should be 2 address groups
# each with 1 address with a balance of 500 Kevacoins
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 2)
# the addresses aren't linked now, but will be after we send to the
# common address
linked_addresses = set()
for address_group in address_groups:
assert_equal(len(address_group), 1)
assert_equal(len(address_group[0]), 2)
assert_equal(address_group[0][1], 500)
linked_addresses.add(address_group[0][0])
# send 500 from each address to a third address not in this wallet
# There's some fee that will come back to us when the miner reward
# matures.
common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr"
txid = node.sendmany(
fromaccount="",
amounts={common_address: 1000},
subtractfeefrom=[common_address],
minconf=1,
)
tx_details = node.gettransaction(txid)
fee = -tx_details['details'][0]['fee']
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 1)
assert_equal(len(address_groups[0]), 2)
assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" account has what's expected.
# otherwise we're off by exactly the fee amount as that's mined
# and matures in the next 100 blocks
node.sendfrom("", common_address, fee)
amount_to_send = 1.0
# Create accounts and make sure subsequent account API calls
# recognize the account/address associations.
accounts = [Account(name) for name in ("a", "b", "c", "d", "e")]
for account in accounts:
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
# Send a transaction to each account, and make sure this forces
# getaccountaddress to generate a new receiving address.
for account in accounts:
node.sendtoaddress(account.receive_address, amount_to_send)
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
# Check the amounts received.
node.generate(1)
for account in accounts:
assert_equal(
node.getreceivedbyaddress(account.addresses[0]), amount_to_send)
assert_equal(node.getreceivedbyaccount(account.name), amount_to_send)
# Check that sendfrom account reduces listaccounts balances.
for i, account in enumerate(accounts):
to_account = accounts[(i+1) % len(accounts)]
node.sendfrom(account.name, to_account.receive_address, amount_to_send)
node.generate(1)
for account in accounts:
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
assert_equal(node.getreceivedbyaccount(account.name), 2)
node.move(account.name, "", node.getbalance(account.name))
account.verify(node)
node.generate(101)
expected_account_balances = {"": 52000}
for account in accounts:
expected_account_balances[account.name] = 0
assert_equal(node.listaccounts(), expected_account_balances)
assert_equal(node.getbalance(""), 52000)
# Check that setaccount can assign an account to a new unused address.
for account in accounts:
address = node.getaccountaddress("")
node.setaccount(address, account.name)
account.add_address(address)
account.verify(node)
assert(address not in node.getaddressesbyaccount(""))
# Check that addmultisigaddress can assign accounts.
for account in accounts:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, account.name)['address']
account.add_address(multisig_address)
account.verify(node)
node.sendfrom("", multisig_address, 500)
node.generate(101)
for account in accounts:
assert_equal(node.getbalance(account.name), 500)
# Check that setaccount can change the account of an address from a
# different account.
change_account(node, accounts[0].addresses[0], accounts[0], accounts[1])
# Check that setaccount can change the account of an address which
# is the receiving address of a different account.
change_account(node, accounts[0].receive_address, accounts[0], accounts[1])
# Check that setaccount can set the account of an address already
# in the account. This is a no-op.
change_account(node, accounts[2].addresses[0], accounts[2], accounts[2])
# Check that setaccount can set the account of an address which is
# already the receiving address of the account. It would probably make
# sense for this to be a no-op, but right now it resets the receiving
# address, causing getaccountaddress to return a brand new address.
change_account(node, accounts[2].receive_address, accounts[2], accounts[2])
class Account:
def __init__(self, name):
# Account name
self.name = name
# Current receiving address associated with this account.
self.receive_address = None
# List of all addresses assigned with this account
self.addresses = []
def add_address(self, address):
assert_equal(address not in self.addresses, True)
self.addresses.append(address)
def add_receive_address(self, address):
self.add_address(address)
self.receive_address = address
def verify(self, node):
if self.receive_address is not None:
assert self.receive_address in self.addresses
assert_equal(node.getaccountaddress(self.name), self.receive_address)
for address in self.addresses:
assert_equal(node.getaccount(address), self.name)
assert_equal(
set(node.getaddressesbyaccount(self.name)), set(self.addresses))
def change_account(node, address, old_account, new_account):
assert_equal(address in old_account.addresses, True)
node.setaccount(address, new_account.name)
old_account.addresses.remove(address)
new_account.add_address(address)
# Calling setaccount on an address which was previously the receiving
# address of a different account should reset the receiving address of
# the old account, causing getaccountaddress to return a brand new
# address.
if address == old_account.receive_address:
new_address = node.getaccountaddress(old_account.name)
assert_equal(new_address not in old_account.addresses, True)
assert_equal(new_address not in new_account.addresses, True)
old_account.add_receive_address(new_address)
old_account.verify(node)
new_account.verify(node)
if __name__ == '__main__':
WalletAccountsTest().main()
| 41.328502
| 93
| 0.666511
|
95fd737d2d591f0fdaa718c407f5c5d29b3b842a
| 4,406
|
py
|
Python
|
examples/grinimports.py
|
ccazabon/grin3
|
87c4825da3b356987e3f9112eac7afbd959dec0c
|
[
"BSD-3-Clause"
] | 7
|
2020-03-13T12:35:35.000Z
|
2021-12-24T17:33:53.000Z
|
examples/grinimports.py
|
ccazabon/grin3
|
87c4825da3b356987e3f9112eac7afbd959dec0c
|
[
"BSD-3-Clause"
] | 7
|
2020-07-29T04:18:52.000Z
|
2021-06-30T20:49:11.000Z
|
examples/grinimports.py
|
ccazabon/grin3
|
87c4825da3b356987e3f9112eac7afbd959dec0c
|
[
"BSD-3-Clause"
] | 3
|
2020-07-29T04:19:54.000Z
|
2021-01-20T18:05:27.000Z
|
#!/usr/bin/env python3
"""
Transform Python files into normalized import statements for grepping.
"""
import os
import shlex
import sys
from io import StringIO
import compiler
import grin
from compiler.visitor import ASTVisitor, walk
__version__ = "1.2"
def normalize_From(node):
"""
Return a list of strings of Python 'from' statements, one import on each line.
"""
statements = []
children = node.getChildren()
module = "." * node.level + node.modname
for name, asname in children[1]:
line = "from %s import %s" % (module, name)
if asname is not None:
line += " as %s" % asname
line += "\n"
statements.append(line)
return statements
def normalize_Import(node):
"""
Return a list of strings of Python 'import' statements, one import on each line.
"""
statements = []
children = node.getChildren()
for name, asname in children[0]:
line = "import %s" % (name)
if asname is not None:
line += " as %s" % asname
line += "\n"
statements.append(line)
return statements
class ImportPuller(ASTVisitor):
"""
Extract import statements from an AST.
"""
def __init__(self):
ASTVisitor.__init__(self)
self.statements = []
def visitFrom(self, node):
self.statements.extend(normalize_From(node))
def visitImport(self, node):
self.statements.extend(normalize_Import(node))
def as_string(self):
"""
Concatenate all of the 'import' and 'from' statements.
"""
return "".join(self.statements)
def normalize_file(filename, *args):
"""
Import-normalize a file.
If the file is not parseable, an empty filelike object will be returned.
"""
try:
ast = compiler.parseFile(filename)
except Exception:
return StringIO("")
ip = ImportPuller()
walk(ast, ip)
return StringIO(ip.as_string())
def get_grinimports_arg_parser(parser=None):
"""
Create the command-line parser.
"""
parser = grin.get_grin_arg_parser(parser)
parser.set_defaults(include="*.py")
parser.description = "Extract, normalize and search import statements from Python files."
parser.epilog = """
For example, if I have a file example.py with a bunch of imports:
$ cat example.py
import foo
import foo.baz as blah
from foo import bar, baz as bat
def somefunction():
"Do something to foo.baz"
import from_inside.function
We can grep for 'import' in order to get all of the import statements:
$ grinimports.py import example.py
example.py:
1 : import foo
2 : import foo.baz as blah
3 : from foo import bar
4 : from foo import baz as bat
5 : import from_inside.function
If we just want to find imports of foo.baz, we can do this:
$ grinimports.py "import foo\\.baz|from foo import baz" example.py
example.py:
2 : import foo.baz as blah
4 : from foo import baz as bat
A typical grep (or grin) cannot find all of these in the original files because
the import statements are not normalized.
$ grin "foo\\.baz|from foo import baz" example.py
example.py:
2 : import foo.baz as blah
6 : "Do something to foo.baz"
"""
for action in parser._actions:
if hasattr(action, "version"):
action.version = "grinpython %s" % __version__
return parser
def grinimports_main(argv=None):
if argv is None:
# Look at the GRIN_ARGS environment variable for more arguments.
env_args = shlex.split(os.getenv("GRIN_ARGS", ""))
argv = [sys.argv[0]] + env_args + sys.argv[1:]
parser = get_grinimports_arg_parser()
args = parser.parse_args(argv[1:])
if args.context is not None:
args.before_context = args.context
args.after_context = args.context
_isatty = not args.no_color and sys.stdout.isatty() and (os.environ.get("TERM") != "dumb")
args.use_color = args.force_color or _isatty
regex = grin.get_regex(args)
g = grin.GrepText(regex, args)
for filename, kind in grin.get_filenames(args):
if kind == "text":
# Ignore gzipped files.
report = g.grep_a_file(filename, opener=normalize_file)
sys.stdout.write(report)
if __name__ == "__main__":
grinimports_main()
| 27.36646
| 94
| 0.637313
|
54c4ff36b270ef7b3c1c4fc4f95720db8104c6cf
| 795
|
py
|
Python
|
test.py
|
TheAcademyofNaturalSciences/BMPxlsx
|
825c60d319710cbe285632e1f4a99b18b24750f5
|
[
"MIT"
] | null | null | null |
test.py
|
TheAcademyofNaturalSciences/BMPxlsx
|
825c60d319710cbe285632e1f4a99b18b24750f5
|
[
"MIT"
] | 1
|
2021-09-16T14:17:13.000Z
|
2022-01-11T17:35:51.000Z
|
test.py
|
TheAcademyofNaturalSciences/BMPxlsx
|
825c60d319710cbe285632e1f4a99b18b24750f5
|
[
"MIT"
] | null | null | null |
import BMPxlsx
import os
# Example Function Run
# Function(dataDictionary, FileName)
# DICTIONARY {"SHEET": {"CELL": VALUE, "CELL": VALUE}}
datadict = {'Sheet1': {'D1': 123.4, 'D2': 567.8},
'Sheet2': {'D1': 123.4, 'D2': 567.8},
'Sheet3': {'D1': 123.4, 'D2': 567.8},
}
# FULL PATH TO FILE
loc = os.getcwd()
fnme = 'test2.xlsx'
file = os.path.join(loc, fnme)
# Function Methods
writer = BMPxlsx.Writer(file)
input1 = {'Sheet1': {'D1': 13.4, 'D2': 47.8},
'Sheet2': {'D1': 23.4, 'D2': 57.8},
'Sheet3': {'D1': 33.4, 'D2': 67.8},
}
input2 = {'Sheet1': {'D1': 23.4, 'D2': 57.8},
'Sheet2': {'D1': 33.4, 'D2': 67.8},
'Sheet3': {'D1': 43.4, 'D2': 77.8},
'Sheet4': {'D2': 45.0}
}
writer.write(input1)
writer.close()
| 24.090909
| 54
| 0.513208
|
ac0b4bf6ade9c61d0dc1c581476f062b6dac9085
| 24,773
|
py
|
Python
|
tests/test_routes.py
|
ahopkins/sanic
|
4d22b6c3680d1a7d357e031fd4276c9b5c2e8e00
|
[
"MIT"
] | null | null | null |
tests/test_routes.py
|
ahopkins/sanic
|
4d22b6c3680d1a7d357e031fd4276c9b5c2e8e00
|
[
"MIT"
] | null | null | null |
tests/test_routes.py
|
ahopkins/sanic
|
4d22b6c3680d1a7d357e031fd4276c9b5c2e8e00
|
[
"MIT"
] | null | null | null |
import asyncio
import pytest
from sanic import Sanic
from sanic.response import text, json
from sanic.router import RouteExists, RouteDoesNotExist, ParameterNameConflicts
from sanic.constants import HTTP_METHODS
# ------------------------------------------------------------ #
# UTF-8
# ------------------------------------------------------------ #
@pytest.mark.parametrize('method', HTTP_METHODS)
def test_versioned_routes_get(app, method):
method = method.lower()
func = getattr(app, method)
if callable(func):
@func('/{}'.format(method), version=1)
def handler(request):
return text('OK')
else:
print(func)
raise
client_method = getattr(app.test_client, method)
request, response = client_method('/v1/{}'.format(method))
assert response.status == 200
def test_shorthand_routes_get(app):
@app.get('/get')
def handler(request):
return text('OK')
request, response = app.test_client.get('/get')
assert response.text == 'OK'
request, response = app.test_client.post('/get')
assert response.status == 405
def test_shorthand_routes_multiple(app):
@app.get('/get')
def get_handler(request):
return text('OK')
@app.options('/get')
def options_handler(request):
return text('')
request, response = app.test_client.get('/get/')
assert response.status == 200
assert response.text == 'OK'
request, response = app.test_client.options('/get/')
assert response.status == 200
def test_route_strict_slash(app):
@app.get('/get', strict_slashes=True)
def handler(request):
assert request.stream is None
return text('OK')
@app.post('/post/', strict_slashes=True)
def handler(request):
assert request.stream is None
return text('OK')
assert app.is_request_stream is False
request, response = app.test_client.get('/get')
assert response.text == 'OK'
request, response = app.test_client.get('/get/')
assert response.status == 404
request, response = app.test_client.post('/post/')
assert response.text == 'OK'
request, response = app.test_client.post('/post')
assert response.status == 404
def test_route_invalid_parameter_syntax(app):
with pytest.raises(ValueError):
@app.get('/get/<:string>', strict_slashes=True)
def handler(request):
return text('OK')
request, response = app.test_client.get('/get')
def test_route_strict_slash_default_value():
app = Sanic('test_route_strict_slash', strict_slashes=True)
@app.get('/get')
def handler(request):
return text('OK')
request, response = app.test_client.get('/get/')
assert response.status == 404
def test_route_strict_slash_without_passing_default_value(app):
@app.get('/get')
def handler(request):
return text('OK')
request, response = app.test_client.get('/get/')
assert response.text == 'OK'
def test_route_strict_slash_default_value_can_be_overwritten():
app = Sanic('test_route_strict_slash', strict_slashes=True)
@app.get('/get', strict_slashes=False)
def handler(request):
return text('OK')
request, response = app.test_client.get('/get/')
assert response.text == 'OK'
def test_route_slashes_overload(app):
@app.get('/hello/')
def handler(request):
return text('OK')
@app.post('/hello/')
def handler(request):
return text('OK')
request, response = app.test_client.get('/hello')
assert response.text == 'OK'
request, response = app.test_client.get('/hello/')
assert response.text == 'OK'
request, response = app.test_client.post('/hello')
assert response.text == 'OK'
request, response = app.test_client.post('/hello/')
assert response.text == 'OK'
def test_route_optional_slash(app):
@app.get('/get')
def handler(request):
return text('OK')
request, response = app.test_client.get('/get')
assert response.text == 'OK'
request, response = app.test_client.get('/get/')
assert response.text == 'OK'
def test_route_strict_slashes_set_to_false_and_host_is_a_list(app):
# Part of regression test for issue #1120
site1 = '127.0.0.1:{}'.format(app.test_client.port)
# before fix, this raises a RouteExists error
@app.get('/get', host=[site1, 'site2.com'], strict_slashes=False)
def get_handler(request):
return text('OK')
request, response = app.test_client.get('http://' + site1 + '/get')
assert response.text == 'OK'
@app.post('/post', host=[site1, 'site2.com'], strict_slashes=False)
def post_handler(request):
return text('OK')
request, response = app.test_client.post('http://' + site1 + '/post')
assert response.text == 'OK'
@app.put('/put', host=[site1, 'site2.com'], strict_slashes=False)
def put_handler(request):
return text('OK')
request, response = app.test_client.put('http://' + site1 + '/put')
assert response.text == 'OK'
@app.delete('/delete', host=[site1, 'site2.com'], strict_slashes=False)
def delete_handler(request):
return text('OK')
request, response = app.test_client.delete('http://' + site1 + '/delete')
assert response.text == 'OK'
def test_shorthand_routes_post(app):
@app.post('/post')
def handler(request):
return text('OK')
request, response = app.test_client.post('/post')
assert response.text == 'OK'
request, response = app.test_client.get('/post')
assert response.status == 405
def test_shorthand_routes_put(app):
@app.put('/put')
def handler(request):
assert request.stream is None
return text('OK')
assert app.is_request_stream is False
request, response = app.test_client.put('/put')
assert response.text == 'OK'
request, response = app.test_client.get('/put')
assert response.status == 405
def test_shorthand_routes_delete(app):
@app.delete('/delete')
def handler(request):
assert request.stream is None
return text('OK')
assert app.is_request_stream is False
request, response = app.test_client.delete('/delete')
assert response.text == 'OK'
request, response = app.test_client.get('/delete')
assert response.status == 405
def test_shorthand_routes_patch(app):
@app.patch('/patch')
def handler(request):
assert request.stream is None
return text('OK')
assert app.is_request_stream is False
request, response = app.test_client.patch('/patch')
assert response.text == 'OK'
request, response = app.test_client.get('/patch')
assert response.status == 405
def test_shorthand_routes_head(app):
@app.head('/head')
def handler(request):
assert request.stream is None
return text('OK')
assert app.is_request_stream is False
request, response = app.test_client.head('/head')
assert response.status == 200
request, response = app.test_client.get('/head')
assert response.status == 405
def test_shorthand_routes_options(app):
@app.options('/options')
def handler(request):
assert request.stream is None
return text('OK')
assert app.is_request_stream is False
request, response = app.test_client.options('/options')
assert response.status == 200
request, response = app.test_client.get('/options')
assert response.status == 405
def test_static_routes(app):
@app.route('/test')
async def handler1(request):
return text('OK1')
@app.route('/pizazz')
async def handler2(request):
return text('OK2')
request, response = app.test_client.get('/test')
assert response.text == 'OK1'
request, response = app.test_client.get('/pizazz')
assert response.text == 'OK2'
def test_dynamic_route(app):
results = []
@app.route('/folder/<name>')
async def handler(request, name):
results.append(name)
return text('OK')
request, response = app.test_client.get('/folder/test123')
assert response.text == 'OK'
assert results[0] == 'test123'
def test_dynamic_route_string(app):
results = []
@app.route('/folder/<name:string>')
async def handler(request, name):
results.append(name)
return text('OK')
request, response = app.test_client.get('/folder/test123')
assert response.text == 'OK'
assert results[0] == 'test123'
request, response = app.test_client.get('/folder/favicon.ico')
assert response.text == 'OK'
assert results[1] == 'favicon.ico'
def test_dynamic_route_int(app):
results = []
@app.route('/folder/<folder_id:int>')
async def handler(request, folder_id):
results.append(folder_id)
return text('OK')
request, response = app.test_client.get('/folder/12345')
assert response.text == 'OK'
assert type(results[0]) is int
request, response = app.test_client.get('/folder/asdf')
assert response.status == 404
def test_dynamic_route_number(app):
results = []
@app.route('/weight/<weight:number>')
async def handler(request, weight):
results.append(weight)
return text('OK')
request, response = app.test_client.get('/weight/12345')
assert response.text == 'OK'
assert type(results[0]) is float
request, response = app.test_client.get('/weight/1234.56')
assert response.status == 200
request, response = app.test_client.get('/weight/1234-56')
assert response.status == 404
def test_dynamic_route_regex(app):
@app.route('/folder/<folder_id:[A-Za-z0-9]{0,4}>')
async def handler(request, folder_id):
return text('OK')
request, response = app.test_client.get('/folder/test')
assert response.status == 200
request, response = app.test_client.get('/folder/test1')
assert response.status == 404
request, response = app.test_client.get('/folder/test-123')
assert response.status == 404
request, response = app.test_client.get('/folder/')
assert response.status == 200
def test_dynamic_route_uuid(app):
import uuid
results = []
@app.route('/quirky/<unique_id:uuid>')
async def handler(request, unique_id):
results.append(unique_id)
return text('OK')
request, response = app.test_client.get('/quirky/123e4567-e89b-12d3-a456-426655440000')
assert response.text == 'OK'
assert type(results[0]) is uuid.UUID
request, response = app.test_client.get('/quirky/{}'.format(uuid.uuid4()))
assert response.status == 200
request, response = app.test_client.get('/quirky/non-existing')
assert response.status == 404
def test_dynamic_route_path(app):
@app.route('/<path:path>/info')
async def handler(request, path):
return text('OK')
request, response = app.test_client.get('/path/1/info')
assert response.status == 200
request, response = app.test_client.get('/info')
assert response.status == 404
@app.route('/<path:path>')
async def handler1(request, path):
return text('OK')
request, response = app.test_client.get('/info')
assert response.status == 200
request, response = app.test_client.get('/whatever/you/set')
assert response.status == 200
def test_dynamic_route_unhashable(app):
@app.route('/folder/<unhashable:[A-Za-z0-9/]+>/end/')
async def handler(request, unhashable):
return text('OK')
request, response = app.test_client.get('/folder/test/asdf/end/')
assert response.status == 200
request, response = app.test_client.get('/folder/test///////end/')
assert response.status == 200
request, response = app.test_client.get('/folder/test/end/')
assert response.status == 200
request, response = app.test_client.get('/folder/test/nope/')
assert response.status == 404
def test_websocket_route(app):
ev = asyncio.Event()
@app.websocket('/ws')
async def handler(request, ws):
assert ws.subprotocol is None
ev.set()
request, response = app.test_client.get('/ws', headers={
'Upgrade': 'websocket',
'Connection': 'upgrade',
'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
'Sec-WebSocket-Version': '13'})
assert response.status == 101
assert ev.is_set()
def test_websocket_route_with_subprotocols(app):
results = []
@app.websocket('/ws', subprotocols=['foo', 'bar'])
async def handler(request, ws):
results.append(ws.subprotocol)
request, response = app.test_client.get('/ws', headers={
'Upgrade': 'websocket',
'Connection': 'upgrade',
'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
'Sec-WebSocket-Version': '13',
'Sec-WebSocket-Protocol': 'bar'})
assert response.status == 101
request, response = app.test_client.get('/ws', headers={
'Upgrade': 'websocket',
'Connection': 'upgrade',
'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
'Sec-WebSocket-Version': '13',
'Sec-WebSocket-Protocol': 'bar, foo'})
assert response.status == 101
request, response = app.test_client.get('/ws', headers={
'Upgrade': 'websocket',
'Connection': 'upgrade',
'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
'Sec-WebSocket-Version': '13',
'Sec-WebSocket-Protocol': 'baz'})
assert response.status == 101
request, response = app.test_client.get('/ws', headers={
'Upgrade': 'websocket',
'Connection': 'upgrade',
'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
'Sec-WebSocket-Version': '13'})
assert response.status == 101
assert results == ['bar', 'bar', None, None]
def test_route_duplicate(app):
with pytest.raises(RouteExists):
@app.route('/test')
async def handler1(request):
pass
@app.route('/test')
async def handler2(request):
pass
with pytest.raises(RouteExists):
@app.route('/test/<dynamic>/')
async def handler1(request, dynamic):
pass
@app.route('/test/<dynamic>/')
async def handler2(request, dynamic):
pass
def test_method_not_allowed(app):
@app.route('/test', methods=['GET'])
async def handler(request):
return text('OK')
request, response = app.test_client.get('/test')
assert response.status == 200
request, response = app.test_client.post('/test')
assert response.status == 405
def test_static_add_route(app):
async def handler1(request):
return text('OK1')
async def handler2(request):
return text('OK2')
app.add_route(handler1, '/test')
app.add_route(handler2, '/test2')
request, response = app.test_client.get('/test')
assert response.text == 'OK1'
request, response = app.test_client.get('/test2')
assert response.text == 'OK2'
def test_dynamic_add_route(app):
results = []
async def handler(request, name):
results.append(name)
return text('OK')
app.add_route(handler, '/folder/<name>')
request, response = app.test_client.get('/folder/test123')
assert response.text == 'OK'
assert results[0] == 'test123'
def test_dynamic_add_route_string(app):
results = []
async def handler(request, name):
results.append(name)
return text('OK')
app.add_route(handler, '/folder/<name:string>')
request, response = app.test_client.get('/folder/test123')
assert response.text == 'OK'
assert results[0] == 'test123'
request, response = app.test_client.get('/folder/favicon.ico')
assert response.text == 'OK'
assert results[1] == 'favicon.ico'
def test_dynamic_add_route_int(app):
results = []
async def handler(request, folder_id):
results.append(folder_id)
return text('OK')
app.add_route(handler, '/folder/<folder_id:int>')
request, response = app.test_client.get('/folder/12345')
assert response.text == 'OK'
assert type(results[0]) is int
request, response = app.test_client.get('/folder/asdf')
assert response.status == 404
def test_dynamic_add_route_number(app):
results = []
async def handler(request, weight):
results.append(weight)
return text('OK')
app.add_route(handler, '/weight/<weight:number>')
request, response = app.test_client.get('/weight/12345')
assert response.text == 'OK'
assert type(results[0]) is float
request, response = app.test_client.get('/weight/1234.56')
assert response.status == 200
request, response = app.test_client.get('/weight/1234-56')
assert response.status == 404
def test_dynamic_add_route_regex(app):
async def handler(request, folder_id):
return text('OK')
app.add_route(handler, '/folder/<folder_id:[A-Za-z0-9]{0,4}>')
request, response = app.test_client.get('/folder/test')
assert response.status == 200
request, response = app.test_client.get('/folder/test1')
assert response.status == 404
request, response = app.test_client.get('/folder/test-123')
assert response.status == 404
request, response = app.test_client.get('/folder/')
assert response.status == 200
def test_dynamic_add_route_unhashable(app):
async def handler(request, unhashable):
return text('OK')
app.add_route(handler, '/folder/<unhashable:[A-Za-z0-9/]+>/end/')
request, response = app.test_client.get('/folder/test/asdf/end/')
assert response.status == 200
request, response = app.test_client.get('/folder/test///////end/')
assert response.status == 200
request, response = app.test_client.get('/folder/test/end/')
assert response.status == 200
request, response = app.test_client.get('/folder/test/nope/')
assert response.status == 404
def test_add_route_duplicate(app):
with pytest.raises(RouteExists):
async def handler1(request):
pass
async def handler2(request):
pass
app.add_route(handler1, '/test')
app.add_route(handler2, '/test')
with pytest.raises(RouteExists):
async def handler1(request, dynamic):
pass
async def handler2(request, dynamic):
pass
app.add_route(handler1, '/test/<dynamic>/')
app.add_route(handler2, '/test/<dynamic>/')
def test_add_route_method_not_allowed(app):
async def handler(request):
return text('OK')
app.add_route(handler, '/test', methods=['GET'])
request, response = app.test_client.get('/test')
assert response.status == 200
request, response = app.test_client.post('/test')
assert response.status == 405
def test_remove_static_route(app):
async def handler1(request):
return text('OK1')
async def handler2(request):
return text('OK2')
app.add_route(handler1, '/test')
app.add_route(handler2, '/test2')
request, response = app.test_client.get('/test')
assert response.status == 200
request, response = app.test_client.get('/test2')
assert response.status == 200
app.remove_route('/test')
app.remove_route('/test2')
request, response = app.test_client.get('/test')
assert response.status == 404
request, response = app.test_client.get('/test2')
assert response.status == 404
def test_remove_dynamic_route(app):
async def handler(request, name):
return text('OK')
app.add_route(handler, '/folder/<name>')
request, response = app.test_client.get('/folder/test123')
assert response.status == 200
app.remove_route('/folder/<name>')
request, response = app.test_client.get('/folder/test123')
assert response.status == 404
def test_remove_inexistent_route(app):
with pytest.raises(RouteDoesNotExist):
app.remove_route('/test')
def test_removing_slash(app):
@app.get('/rest/<resource>')
def get(_):
pass
@app.post('/rest/<resource>')
def post(_):
pass
assert len(app.router.routes_all.keys()) == 2
def test_remove_unhashable_route(app):
async def handler(request, unhashable):
return text('OK')
app.add_route(handler, '/folder/<unhashable:[A-Za-z0-9/]+>/end/')
request, response = app.test_client.get('/folder/test/asdf/end/')
assert response.status == 200
request, response = app.test_client.get('/folder/test///////end/')
assert response.status == 200
request, response = app.test_client.get('/folder/test/end/')
assert response.status == 200
app.remove_route('/folder/<unhashable:[A-Za-z0-9/]+>/end/')
request, response = app.test_client.get('/folder/test/asdf/end/')
assert response.status == 404
request, response = app.test_client.get('/folder/test///////end/')
assert response.status == 404
request, response = app.test_client.get('/folder/test/end/')
assert response.status == 404
def test_remove_route_without_clean_cache(app):
async def handler(request):
return text('OK')
app.add_route(handler, '/test')
request, response = app.test_client.get('/test')
assert response.status == 200
app.remove_route('/test', clean_cache=True)
app.remove_route('/test/', clean_cache=True)
request, response = app.test_client.get('/test')
assert response.status == 404
app.add_route(handler, '/test')
request, response = app.test_client.get('/test')
assert response.status == 200
app.remove_route('/test', clean_cache=False)
request, response = app.test_client.get('/test')
assert response.status == 200
def test_overload_routes(app):
@app.route('/overload', methods=['GET'])
async def handler1(request):
return text('OK1')
@app.route('/overload', methods=['POST', 'PUT'])
async def handler2(request):
return text('OK2')
request, response = app.test_client.get('/overload')
assert response.text == 'OK1'
request, response = app.test_client.post('/overload')
assert response.text == 'OK2'
request, response = app.test_client.put('/overload')
assert response.text == 'OK2'
request, response = app.test_client.delete('/overload')
assert response.status == 405
with pytest.raises(RouteExists):
@app.route('/overload', methods=['PUT', 'DELETE'])
async def handler3(request):
return text('Duplicated')
def test_unmergeable_overload_routes(app):
@app.route('/overload_whole', methods=None)
async def handler1(request):
return text('OK1')
with pytest.raises(RouteExists):
@app.route('/overload_whole', methods=['POST', 'PUT'])
async def handler2(request):
return text('Duplicated')
request, response = app.test_client.get('/overload_whole')
assert response.text == 'OK1'
request, response = app.test_client.post('/overload_whole')
assert response.text == 'OK1'
@app.route('/overload_part', methods=['GET'])
async def handler1(request):
return text('OK1')
with pytest.raises(RouteExists):
@app.route('/overload_part')
async def handler2(request):
return text('Duplicated')
request, response = app.test_client.get('/overload_part')
assert response.text == 'OK1'
request, response = app.test_client.post('/overload_part')
assert response.status == 405
def test_unicode_routes(app):
@app.get('/你好')
def handler1(request):
return text('OK1')
request, response = app.test_client.get('/你好')
assert response.text == 'OK1'
@app.route('/overload/<param>', methods=['GET'])
async def handler2(request, param):
return text('OK2 ' + param)
request, response = app.test_client.get('/overload/你好')
assert response.text == 'OK2 你好'
def test_uri_with_different_method_and_different_params(app):
@app.route('/ads/<ad_id>', methods=['GET'])
async def ad_get(request, ad_id):
return json({'ad_id': ad_id})
@app.route('/ads/<action>', methods=['POST'])
async def ad_post(request, action):
return json({'action': action})
request, response = app.test_client.get('/ads/1234')
assert response.status == 200
assert response.json == {
'ad_id': '1234'
}
request, response = app.test_client.post('/ads/post')
assert response.status == 200
assert response.json == {
'action': 'post'
}
def test_route_raise_ParameterNameConflicts(app):
with pytest.raises(ParameterNameConflicts):
@app.get('/api/v1/<user>/<user>/')
def handler(request, user):
return text('OK')
| 26.214815
| 91
| 0.644008
|
9a7a58ec748b9b35f8135ada77bfee832a9b6266
| 28,785
|
py
|
Python
|
pycochleagram/cochleagram.py
|
z430/pycochleagram
|
45420d969047781933a91281592f46befcc3f48f
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
pycochleagram/cochleagram.py
|
z430/pycochleagram
|
45420d969047781933a91281592f46befcc3f48f
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
pycochleagram/cochleagram.py
|
z430/pycochleagram
|
45420d969047781933a91281592f46befcc3f48f
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# TODO:
# + convert docstrings to np format
# + build and format docs
# + put docs on github
# + test padding (pad_factor)
# + sensible parameters for downsampling?
# + clean up old and deprecated methods
# + write readme
# + python compatibility issues
# + erb filters fails with certain arguments:
# `N: 680, sample_factor: 15, signal_length: 2433, sr: 32593, low_lim: 147, hi_lim: 16296, pad_factor: None`
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from time import sleep
import numpy as np
import scipy.signal
from pycochleagram import erbfilter as erb
from pycochleagram import subband as sb
import matplotlib.pyplot as plt
import pdb as ipdb
def cochleagram(signal, sr, n, low_lim, hi_lim, sample_factor,
padding_size=None, downsample=None, nonlinearity=None,
fft_mode='auto', ret_mode='envs', strict=True, **kwargs):
"""Generate the subband envelopes (i.e., the cochleagram)
of the provided signal.
This first creates a an ERB filterbank with the provided input arguments for
the provided signal. This filterbank is then used to perform the subband
decomposition to create the subband envelopes. The resulting envelopes can be
optionally downsampled and then modified with a nonlinearity.
Args:
signal (array): The sound signal (waveform) in the time domain. Should be
flattened, i.e., the shape is (n_samples,).
sr (int): Sampling rate associated with the signal waveform.
n (int): Number of filters (subbands) to be generated with standard
sampling (i.e., using a sampling factor of 1). Note, the actual number of
filters in the generated filterbank depends on the sampling factor, and
will also include lowpass and highpass filters that allow for
perfect reconstruction of the input signal (the exact number of lowpass
and highpass filters is determined by the sampling factor).
low_lim (int): Lower limit of frequency range. Filters will not be defined
below this limit.
hi_lim (int): Upper limit of frequency range. Filters will not be defined
above this limit.
sample_factor (int): Positive integer that determines how densely ERB function
will be sampled to create bandpass filters. 1 represents standard sampling;
adjacent bandpass filters will overlap by 50%. 2 represents 2x overcomplete sampling;
adjacent bandpass filters will overlap by 75%. 4 represents 4x overcomplete sampling;
adjacent bandpass filters will overlap by 87.5%.
padding_size (int, optional): If None (default), the signal will not be padded
before filtering. Otherwise, the filters will be created assuming the
waveform signal will be padded to length padding_size+signal_length.
downsample (None, int, callable, optional): The `downsample` argument can
be an integer representing the upsampling factor in polyphase resampling
(with `sr` as the downsampling factor), a callable
(to perform custom downsampling), or None to return the
unmodified cochleagram; see `apply_envelope_downsample` for more
information. If `ret_mode` is 'envs', this will be applied to the
cochleagram before the nonlinearity, otherwise no downsampling will be
performed. Providing a callable for custom downsampling is suggested.
nonlinearity ({None, 'db', 'power', callable}, optional): The `nonlinearity`
argument can be an predefined type, a callable
(to apply a custom nonlinearity), or None to return the unmodified
cochleagram; see `apply_envelope_nonlinearity` for more information.
If `ret_mode` is 'envs', this will be applied to the cochleagram after
downsampling, otherwise no nonlinearity will be applied. Providing a
callable for applying a custom nonlinearity is suggested.
fft_mode ({'auto', 'fftw', 'np'}, optional): Determine what implementation
to use for FFT-like operations. 'auto' will attempt to use pyfftw, but
will fallback to numpy, if necessary.
ret_mode ({'envs', 'subband', 'analytic', 'all'}): Determines what will be
returned. 'envs' (default) returns the subband envelopes; 'subband'
returns just the subbands, 'analytic' returns the analytic signal provided
by the Hilbert transform, 'all' returns all local variables created in this
function.
strict (bool, optional): If True (default), will include the extra
highpass and lowpass filters required to make the filterbank invertible.
If False, this will only perform calculations on the bandpass filters; note
this decreases the number of frequency channels in the output by
2 * `sample_factor`.
function is used in a way that is unsupported by the MATLAB implemenation.
strict (bool, optional): If True (default), will throw an errors if this
function is used in a way that is unsupported by the MATLAB implemenation.
Returns:
array:
**out**: The output, depending on the value of `ret_mode`. If the `ret_mode`
is 'envs' and a downsampling and/or nonlinearity
operation was requested, the output will reflect these operations.
"""
if strict:
if not isinstance(sr, int):
raise ValueError('`sr` must be an int; ignore with `strict`=False')
# make sure low_lim and hi_lim are int
if not isinstance(low_lim, int):
raise ValueError('`low_lim` must be an int; ignore with `strict`=False')
if not isinstance(hi_lim, int):
raise ValueError('`hi_lim` must be an int; ignore with `strict`=False')
ret_mode = ret_mode.lower()
if ret_mode == 'all':
ret_all_sb = True
else:
ret_all_sb = False
# verify n is positive
if n <= 0:
raise ValueError('number of filters `n` must be positive; found: %s' % n)
# allow for batch generation without creating filters everytime
batch_signal = sb.reshape_signal_batch(signal) # (batch_dim, waveform_samples)
# only make the filters once
if kwargs.get('no_hp_lp_filts'):
erb_kwargs = {'no_highpass': True, 'no_lowpass': True}
else:
erb_kwargs = {}
# print(erb_kwargs)
filts, hz_cutoffs, freqs = erb.make_erb_cos_filters_nx(batch_signal.shape[1],
sr, n, low_lim, hi_lim, sample_factor, padding_size=padding_size,
full_filter=True, strict=strict, **erb_kwargs)
# utils.filtshow(freqs, filts, hz_cutoffs, use_log_x=True)
freqs_to_plot = np.log10(freqs)
# print(filts.shape)
# plt.figure(figsize=(18,5))
# # plt.plot(freqs_to_plot, filts[:,3:11], 'k')
# plt.plot(freqs_to_plot, filts[:,5:13], 'k', linewidth=2)
# plt.xlim([2, 3.5])
# plt.ylim([0, None])
# plt.title('%s @ %s' % (n, sample_factor))
# wfn = '/om/user/raygon/projects/deepFerret/src/dflearn/COSYNE18_diagPlots/filters_%s_%s.pdf' % (n, sample_factor)
# plt.savefig(wfn)
# plt.show()
# ipdb.set_trace()
is_batch = batch_signal.shape[0] > 1
for i in range(batch_signal.shape[0]):
# if is_batch:
# print('generating cochleagram -> %s/%s' % (i+1, batch_signal.shape[0]))
temp_signal_flat = sb.reshape_signal_canonical(batch_signal[i, ...])
if ret_mode == 'envs' or ret_mode == 'all':
temp_sb = sb.generate_subband_envelopes_fast(temp_signal_flat, filts,
padding_size=padding_size, fft_mode=fft_mode, debug_ret_all=ret_all_sb)
elif ret_mode == 'subband':
temp_sb = sb.generate_subbands(temp_signal_flat, filts, padding_size=padding_size,
fft_mode=fft_mode, debug_ret_all=ret_all_sb)
elif ret_mode == 'analytic':
temp_sb = sb.generate_subbands(temp_signal_flat, filts, padding_size=padding_size,
fft_mode=fft_mode)
else:
raise NotImplementedError('`ret_mode` is not supported.')
if ret_mode == 'envs':
if downsample is None or callable(downsample):
# downsample is None or callable
temp_sb = apply_envelope_downsample(temp_sb, downsample)
else:
# interpret downsample as new sampling rate
temp_sb = apply_envelope_downsample(temp_sb, 'poly', sr, downsample)
temp_sb = apply_envelope_nonlinearity(temp_sb, nonlinearity)
if i == 0:
sb_out = np.zeros(([batch_signal.shape[0]] + list(temp_sb.shape)))
sb_out[i] = temp_sb
sb_out = sb_out.squeeze()
if ret_mode == 'all':
out_dict = {}
# add all local variables to out_dict
for k in dir():
if k != 'out_dict':
out_dict[k] = locals()[k]
return out_dict
else:
return sb_out
def human_cochleagram(signal, sr, n=None, low_lim=50, hi_lim=20000,
sample_factor=2, padding_size=None, downsample=None, nonlinearity=None,
fft_mode='auto', ret_mode='envs', strict=True, **kwargs):
"""Convenience function to generate the subband envelopes
(i.e., the cochleagram) of the provided signal using sensible default
parameters for a human cochleagram.
This first creates a an ERB filterbank with the provided input arguments for
the provided signal. This filterbank is then used to perform the subband
decomposition to create the subband envelopes. The resulting envelopes can be
optionally downsampled and then modified with a nonlinearity.
Args:
signal (array): The sound signal (waveform) in the time domain. Should be
flattened, i.e., the shape is (n_samples,).
sr (int): Sampling rate associated with the signal waveform.
n (int): Number of filters (subbands) to be generated with standard
sampling (i.e., using a sampling factor of 1). Note, the actual number of
filters in the generated filterbank depends on the sampling factor, and
will also include lowpass and highpass filters that allow for
perfect reconstruction of the input signal (the exact number of lowpass
and highpass filters is determined by the sampling factor).
low_lim (int): Lower limit of frequency range. Filters will not be defined
below this limit.
hi_lim (int): Upper limit of frequency range. Filters will not be defined
above this limit.
sample_factor (int): Positive integer that determines how densely ERB function
will be sampled to create bandpass filters. 1 represents standard sampling;
adjacent bandpass filters will overlap by 50%. 2 represents 2x overcomplete sampling;
adjacent bandpass filters will overlap by 75%. 4 represents 4x overcomplete sampling;
adjacent bandpass filters will overlap by 87.5%.
padding_size (int, optional): If None (default), the signal will not be padded
before filtering. Otherwise, the filters will be created assuming the
waveform signal will be padded to length padding_size+signal_length.
downsample (None, int, callable, optional): The `downsample` argument can
be an integer representing the upsampling factor in polyphase resampling
(with `sr` as the downsampling factor), a callable
(to perform custom downsampling), or None to return the
unmodified cochleagram; see `apply_envelope_downsample` for more
information. If `ret_mode` is 'envs', this will be applied to the
cochleagram before the nonlinearity, otherwise no downsampling will be
performed. Providing a callable for custom downsampling is suggested.
nonlinearity ({None, 'db', 'power', callable}, optional): The `nonlinearity`
argument can be an predefined type, a callable
(to apply a custom nonlinearity), or None to return the unmodified
cochleagram; see `apply_envelope_nonlinearity` for more information.
If `ret_mode` is 'envs', this will be applied to the cochleagram after
downsampling, otherwise no nonlinearity will be applied. Providing a
callable for applying a custom nonlinearity is suggested.
fft_mode ({'auto', 'fftw', 'np'}, optional): Determine what implementation
to use for FFT-like operations. 'auto' will attempt to use pyfftw, but
will fallback to numpy, if necessary.
ret_mode ({'envs', 'subband', 'analytic', 'all'}): Determines what will be
returned. 'envs' (default) returns the subband envelopes; 'subband'
returns just the subbands, 'analytic' returns the analytic signal provided
by the Hilber transform, 'all' returns all local variables created in this
function.
strict (bool, optional): If True (default), will throw an errors if this
function is used in a way that is unsupported by the MATLAB implemenation.
Returns:
array:
**out**: The output, depending on the value of `ret_mode`. If the `ret_mode`
is 'envs' and a downsampling and/or nonlinearity
operation was requested, the output will reflect these operations.
"""
if n is None:
n = int(np.floor(erb.freq2erb(hi_lim) - erb.freq2erb(low_lim)) - 1)
# print("here")
out = cochleagram(signal, sr, n, low_lim, hi_lim, sample_factor, padding_size,
downsample, nonlinearity, fft_mode, ret_mode, strict, **kwargs)
return out
def invert_cochleagram_with_filterbank(cochleagram, filters, sr, target_rms=100,
downsample=None, nonlinearity=None, n_iter=20):
"""Generate a waveform from a cochleagram using a provided filterbank.
Args:
cochleagram (array): The subband envelopes (i.e., cochleagram) to invert.
filters (array): The filterbank, in frequency space, used to generate the
cochleagram. This should be the full filter-set output of
erbFilter.make_erb_cos_filters_nx, or similar.
sr (int): Sampling rate associated with the cochleagram.
target_rms (scalar): Target root-mean-squared value of the output, related
to SNR, TODO: this needs to be checked
downsample (None, int, callable, optional): If downsampling was performed on
`cochleagram`, this is the operation to invert that downsampling
(i.e., upsample); this determines the length of the output signal.
The `downsample` argument can be an integer representing the downsampling
factor in polyphase resampling (with `sr` as the upsampling factor),
a callable (to perform custom downsampling), or None to return the
unmodified cochleagram; see `apply_envelope_downsample` for more
information. Providing a callable for custom function for upsampling
is suggested.
nonlinearity ({None, 'db', 'power', callable}, optional): If a nonlinearity
was applied to `cochleagram`, this is the operation to invert that
nonlinearity. The `nonlinearity` argument can be an predefined type,
a callable (to apply a custom nonlinearity), or None to return the
unmodified cochleagram; see `apply_envelope_nonlinearity` for more
information. If this is a predefined type, the nonlinearity will be
inverted according to `apply_envelope_nonlinearity`.
fft_mode ({'auto', 'fftw', 'np'}, optional): Determine what implementation
to use for FFT-like operations. 'auto' will attempt to use pyfftw, but
will fallback to numpy, if necessary.
n_iter (int, optional): Number of iterations to perform for the inversion.
Returns:
array:
**inv_signal**: The waveform signal created by inverting the cochleagram.
"""
# decompress envelopes
linear_cochleagram = apply_envelope_nonlinearity(cochleagram, nonlinearity, invert=True)
if downsample is None or callable(downsample):
_wrapped_downsample = lambda coch, inv: apply_envelope_downsample(coch, downsample, invert=inv) # downsample is None or callable
else:
# interpret downsample as new sampling rate
_wrapped_downsample = lambda coch, inv: apply_envelope_downsample(coch, 'poly', sr, downsample, invert=inv)
# apply the upsampling
linear_cochleagram = _wrapped_downsample(cochleagram, True)
coch_length = linear_cochleagram.shape[1]
# cochleagram /= cochleagram.max()
# print('ref coch: [%s, %s]' % (cochleagram.min(), cochleagram.max()))
# generated signal starts from noise
synth_size = coch_length
synth_sound = np.random.random(synth_size) # uniform noise
# synth_sound = np.random.randn(synth_size) # gaussian noise
# print('synth sound [%s, %s]' % (synth_sound.min(), synth_sound.max()))
# iteratively enforce envelopes on cochleagram of iter_noise
for i in range(n_iter):
# calculate error in decibels between original and synthesized cochleagrams
# if i > 0:
# db_error = np.abs(cochleagram - np.abs(synth_analytic_subbands))
# else:
# db_error = np.abs(cochleagram - np.zeros_like(cochleagram))
# synth_sound = target_rms / utils.rms(synth_sound) * synth_sound
# GET THE ERROR OF ENVS FROM DOWNSAMPLING
synth_analytic_subbands = sb.generate_analytic_subbands(synth_sound, filters)
synth_subband_mags = np.abs(synth_analytic_subbands) # complex magnitude
synth_subband_phases = synth_analytic_subbands / synth_subband_mags # should be phases
synth_subbands = synth_subband_phases * linear_cochleagram
synth_subbands = np.real(synth_subbands)
np.nan_to_num(synth_size)
synth_sound = sb.collapse_subbands(synth_subbands, filters)
synth_analytic_subbands = sb.generate_analytic_subbands(synth_sound, filters)
synth_coch = np.abs(synth_analytic_subbands)
# print('ref coch: [%s, %s], synth coch: [%s, %s]' % (cochleagram.min(), cochleagram.max(), synth_coch.min(), synth_coch.max()))
# apply compression and downsample if necessary to compare reference coch to synth
synth_coch = _wrapped_downsample(linear_cochleagram, False)
synth_coch = apply_envelope_nonlinearity(synth_coch, nonlinearity, invert=False)
# compute error using raw cochleagrams
db_error = 10 * np.log10(np.sum(np.power(cochleagram - synth_coch, 2)) /
np.sum(np.power(cochleagram, 2)))
print('inverting iteration: %s, error (db): %s' % (i + 1, db_error))
return synth_sound, synth_coch
def invert_cochleagram(cochleagram, sr, n, low_lim, hi_lim, sample_factor,
padding_size=None, target_rms=100, downsample=None, nonlinearity=None, n_iter=50, strict=True):
"""Generate a waveform from a cochleagram using the provided arguments to
construct a filterbank.
Args:
cochleagram (array): The subband envelopes (i.e., cochleagram) to invert.
sr (int): Sampling rate associated with the cochleagram.
n (int): Number of filters (subbands) to be generated with standard
sampling (i.e., using a sampling factor of 1). Note, the actual number of
filters in the generated filterbank depends on the sampling factor, and
will also include lowpass and highpass filters that allow for
perfect reconstruction of the input signal (the exact number of lowpass
and highpass filters is determined by the sampling factor).
low_lim (int): Lower limit of frequency range. Filters will not be defined
below this limit.
hi_lim (int): Upper limit of frequency range. Filters will not be defined
above this limit.
sample_factor (int): Positive integer that determines how densely ERB function
will be sampled to create bandpass filters. 1 represents standard sampling;
adjacent bandpass filters will overlap by 50%. 2 represents 2x overcomplete sampling;
adjacent bandpass filters will overlap by 75%. 4 represents 4x overcomplete sampling;
adjacent bandpass filters will overlap by 87.5%.
padding_size (int, optional): If None (default), the signal will not be padded
before filtering. Otherwise, the filters will be created assuming the
waveform signal will be padded to length padding_size+signal_length.
target_rms (scalar): Target root-mean-squared value of the output, related
to SNR, TODO: this needs to be checked
downsample (None, int, callable, optional): If downsampling was performed on
`cochleagram`, this is the operation to invert that downsampling
(i.e., upsample); this determines the length of the output signal.
The `downsample` argument can be an integer representing the downsampling
factor in polyphase resampling (with `sr` as the upsampling factor),
a callable (to perform custom downsampling), or None to return the
unmodified cochleagram; see `apply_envelope_downsample` for more
information. Providing a callable for custom function for upsampling
is suggested.
nonlinearity ({None, 'db', 'power', callable}, optional): If a nonlinearity
was applied to `cochleagram`, this is the operation to invert that
nonlinearity. The `nonlinearity` argument can be an predefined type,
a callable (to apply a custom nonlinearity), or None to return the
unmodified cochleagram; see `apply_envelope_nonlinearity` for more
information. If this is a predefined type, the nonlinearity will be
inverted according to `apply_envelope_nonlinearity`.
fft_mode ({'auto', 'fftw', 'np'}, optional): Determine what implementation
to use for FFT-like operations. 'auto' will attempt to use pyfftw, but
will fallback to numpy, if necessary.
n_iter (int, optional): Number of iterations to perform for the inversion.
strict (bool, optional): If True (default), will throw an errors if this
function is used in a way that is unsupported by the MATLAB implemenation.
Returns:
array:
**inv_signal**: The waveform signal created by inverting the cochleagram.
**inv_coch**: The inverted cochleagram.
"""
# decompress envelopes
cochleagram_ref = apply_envelope_nonlinearity(cochleagram, nonlinearity, invert=True)
# upsample envelopes
if downsample is None or callable(downsample):
# downsample is None or callable
cochleagram_ref = apply_envelope_downsample(cochleagram_ref, downsample, invert=True)
else:
# interpret downsample as new sampling rate
cochleagram_ref = apply_envelope_downsample(cochleagram_ref, 'poly', sr, downsample, invert=True)
signal_length = cochleagram_ref.shape[1]
# generate filterbank
filts, hz_cutoffs, freqs = erb.make_erb_cos_filters_nx(signal_length,
sr, n, low_lim, hi_lim, sample_factor, padding_size=padding_size,
full_filter=True, strict=strict)
# invert filterbank
inv_signal, inv_coch = invert_cochleagram_with_filterbank(cochleagram_ref, filts, sr, target_rms=target_rms, n_iter=n_iter)
return inv_signal, inv_coch
def apply_envelope_downsample(subband_envelopes, mode, audio_sr=None, env_sr=None, invert=False, strict=True):
"""Apply a downsampling operation to cochleagram subband envelopes.
The `mode` argument can be a predefined downsampling type from
{'poly', 'resample', 'decimate'}, a callable (to perform custom downsampling),
or None to return the unmodified cochleagram. If `mode` is a predefined type,
`audio_sr` and `env_sr` are required.
Args:
subband_envelopes (array): Cochleagram subbands to mode.
mode ({'poly', 'resample', 'decimate', callable, None}): Determines the
downsampling operation to apply to the cochleagram. 'decimate' will
resample using scipy.signal.decimate with audio_sr/env_sr as the
downsampling factor. 'resample' will downsample using
scipy.signal.resample with np.ceil(subband_envelopes.shape[1]*(audio_sr/env_sr))
as the number of samples. 'poly' will resample using scipy.signal.resample_poly
with `env_sr` as the upsampling factor and `audio_sr` as the downsampling
factor. If `mode` is a python callable (e.g., function), it will be
applied to `subband_envelopes`. If this is None, no downsampling is
performed and the unmodified cochleagram is returned.
audio_sr (int, optional): If using a predefined sampling `mode`, this
represents the sampling rate of the original signal.
env_sr (int, optional): If using a predefined sampling `mode`, this
represents the sampling rate of the downsampled subband envelopes.
invert (bool, optional): If using a predefined sampling `mode`, this
will invert (i.e., upsample) the subband envelopes using the values
provided in `audio_sr` and `env_sr`.
strict (bool, optional): If using a predefined sampling `mode`, this
ensure the downsampling will result in an integer number of samples. This
should mean the upsample(downsample(x)) will have the same number of
samples as x.
Returns:
array:
**downsampled_subband_envelopes**: The subband_envelopes after being
downsampled with `mode`.
"""
if mode is None:
pass
elif callable(mode):
# apply the downsampling function
subband_envelopes = mode(subband_envelopes)
else:
mode = mode.lower()
if audio_sr is None:
raise ValueError('`audio_sr` cannot be None. Provide sampling rate of original audio signal.')
if env_sr is None:
raise ValueError('`env_sr` cannot be None. Provide sampling rate of subband envelopes (cochleagram).')
if mode == 'decimate':
if invert:
raise NotImplementedError()
else:
# was BadCoefficients error with Chebyshev type I filter [default]
subband_envelopes = scipy.signal.decimate(subband_envelopes, audio_sr // env_sr, axis=1, ftype='fir') # this caused weird banding artifacts
elif mode == 'resample':
if invert:
subband_envelopes = scipy.signal.resample(subband_envelopes, np.ceil(subband_envelopes.shape[1]*(audio_sr/env_sr)), axis=1) # fourier method: this causes NANs that get converted to 0s
else:
subband_envelopes = scipy.signal.resample(subband_envelopes, np.ceil(subband_envelopes.shape[1]*(env_sr/audio_sr)), axis=1) # fourier method: this causes NANs that get converted to 0s
elif mode == 'poly':
if strict:
n_samples = subband_envelopes.shape[1] * (audio_sr / env_sr) if invert else subband_envelopes.shape[1] * (env_sr / audio_sr)
if not np.isclose(n_samples, int(n_samples)):
raise ValueError('Choose `env_sr` and `audio_sr` such that the number of samples after polyphase resampling is an integer'+
'\n(length: %s, env_sr: %s, audio_sr: %s !--> %s' % (subband_envelopes.shape[1], env_sr, audio_sr, n_samples))
if invert:
subband_envelopes = scipy.signal.resample_poly(subband_envelopes, audio_sr, env_sr, axis=1) # this requires v0.18 of scipy
else:
subband_envelopes = scipy.signal.resample_poly(subband_envelopes, env_sr, audio_sr, axis=1) # this requires v0.18 of scipy
else:
raise ValueError('Unsupported downsampling `mode`: %s' % mode)
subband_envelopes[subband_envelopes < 0] = 0
return subband_envelopes
def apply_envelope_nonlinearity(subband_envelopes, nonlinearity, invert=False):
"""Apply a nonlinearity to the cochleagram.
The `nonlinearity` argument can be an predefined type, a callable
(to apply a custom nonlinearity), or None to return the unmodified
cochleagram.
Args:
subband_envelopes (array): Cochleagram to apply the nonlinearity to.
nonlinearity ({'db', 'power'}, callable, None): Determines the nonlinearity
operation to apply to the cochleagram. If this is a valid string, one
of the predefined nonlinearities will be used. It can be: 'power' to
perform np.power(subband_envelopes, 3.0 / 10.0) or 'db' to perform
20 * np.log10(subband_envelopes / np.max(subband_envelopes)), with values
clamped to be greater than -60. If `nonlinearity` is a python callable
(e.g., function), it will be applied to `subband_envelopes`. If this is
None, no nonlinearity is applied and the unmodified cochleagram is
returned.
invert (bool): For predefined nonlinearities 'db' and 'power', if False
(default), the nonlinearity will be applied. If True, the nonlinearity
will be inverted.
Returns:
array:
**nonlinear_subband_envelopes**: The subband_envelopes with the specified
nonlinearity applied.
Raises:
ValueError: Error if the provided `nonlinearity` isn't a recognized
option.
"""
# apply nonlinearity
if nonlinearity is None:
pass
elif nonlinearity == "power":
if invert:
subband_envelopes = np.power(subband_envelopes, 10.0 / 3.0) # from Alex's code
else:
subband_envelopes = np.power(subband_envelopes, 3.0 / 10.0) # from Alex's code
elif nonlinearity == "db":
if invert:
subband_envelopes = np.power(10, subband_envelopes / 20) # adapted from Anastasiya's code
else:
dtype_eps = np.finfo(subband_envelopes.dtype).eps
subband_envelopes[subband_envelopes == 0] = dtype_eps
subband_envelopes = 20 * np.log10(subband_envelopes / np.max(subband_envelopes))
subband_envelopes[subband_envelopes < -60] = -60
elif callable(nonlinearity):
subband_envelopes = nonlinearity(subband_envelopes)
else:
raise ValueError('argument "nonlinearity" must be "power", "db", or a function.')
return subband_envelopes
| 50.323427
| 192
| 0.724996
|
e7d4a340af0465d26aa9572719336b20b47b1ebe
| 423
|
py
|
Python
|
pyscord/exceptions.py
|
Sly-Little-Test/Pyscord
|
9c1bb7ccd79366686ef0e3e302d30fdb5044bc77
|
[
"MIT"
] | null | null | null |
pyscord/exceptions.py
|
Sly-Little-Test/Pyscord
|
9c1bb7ccd79366686ef0e3e302d30fdb5044bc77
|
[
"MIT"
] | null | null | null |
pyscord/exceptions.py
|
Sly-Little-Test/Pyscord
|
9c1bb7ccd79366686ef0e3e302d30fdb5044bc77
|
[
"MIT"
] | null | null | null |
from typing import Optional
class InvalidTokenError(ValueError):
def __init__(self, hint: Optional[str] = None):
"""Exception raised when the authorization token is invalid.
:param hint:
Additional information about the exception cause.
"""
super(InvalidTokenError, self).__init__(
"The given token is not a valid token." + (str(hint) * bool(hint))
)
| 28.2
| 78
| 0.635934
|
6637c441a354ee9e734586f7548d8c3f731b5ae2
| 21,700
|
py
|
Python
|
opentamp/core/util_classes/no_openrave_body.py
|
Algorithmic-Alignment-Lab/openTAMP
|
f0642028d551d0436b3a3dbc3bfb2f23a00adc14
|
[
"MIT"
] | 4
|
2022-02-13T15:52:18.000Z
|
2022-03-26T17:33:13.000Z
|
opentamp/core/util_classes/no_openrave_body.py
|
Algorithmic-Alignment-Lab/openTAMP
|
f0642028d551d0436b3a3dbc3bfb2f23a00adc14
|
[
"MIT"
] | 1
|
2022-02-13T22:48:09.000Z
|
2022-02-13T22:48:09.000Z
|
opentamp/core/util_classes/no_openrave_body.py
|
Algorithmic-Alignment-Lab/openTAMP
|
f0642028d551d0436b3a3dbc3bfb2f23a00adc14
|
[
"MIT"
] | null | null | null |
import numpy as np
from math import cos, sin, atan2
from errors_exceptions import OpenRAVEException
from openravepy import quatFromAxisAngle, matrixFromPose, poseFromMatrix, \
axisAngleFromRotationMatrix, KinBody, GeometryType, RaveCreateRobot, \
RaveCreateKinBody, TriMesh, Environment, DOFAffine, IkParameterization, IkParameterizationType, \
IkFilterOptions, matrixFromAxisAngle, quatFromRotationMatrix
from opentamp.core.util_classes.robots import Robot, PR2, Baxter, Washer
from opentamp.core.util_classes.items import Item, Box, Can, BlueCan, RedCan, Circle, BlueCircle, RedCircle, GreenCircle, Obstacle, Wall, Table, Basket
WALL_THICKNESS = 1
class OpenRAVEBody(object):
def __init__(self, env, name, geom):
assert env is not None
self.name = name
self._env = env
self._geom = geom
if env.GetKinBody(name) == None and env.GetRobot(name) == None:
if isinstance(geom, Robot):
self._add_robot(geom)
elif isinstance(geom, Item):
self._add_item(geom)
else:
raise OpenRAVEException("Geometry not supported for %s for OpenRAVEBody"%geom)
elif env.GetKinBody(name) != None:
self.env_body = env.GetKinBody(name)
else:
self.env_body = env.GetRobot(name)
self.set_transparency(0.5)
def delete(self):
self._env.Remove(self.env_body)
def set_transparency(self, transparency):
for link in self.env_body.GetLinks():
for geom in link.GetGeometries():
geom.SetTransparency(transparency)
def _add_robot(self, geom):
self.env_body = self._env.ReadRobotXMLFile(geom.shape)
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
geom.setup(self.env_body)
def _add_item(self, geom):
try:
fun_name = "self._add_{}".format(geom._type)
eval(fun_name)(geom)
except:
self._add_obj(geom)
def _add_circle(self, geom):
color = [1,0,0]
if hasattr(geom, "color") and geom.color == 'blue':
color = [0, 0, 1]
elif hasattr(geom, "color") and geom.color == 'green':
color = [0, 1, 0]
elif hasattr(geom, "color") and geom.color == 'red':
color = [1, 0, 0]
self.env_body = OpenRAVEBody.create_cylinder(self._env, self.name, np.eye(4),
[geom.radius, 2], color)
self._env.AddKinBody(self.env_body)
def _add_can(self, geom):
color = [1,0,0]
if hasattr(geom, "color") and geom.color == 'blue':
color = [0, 0, 1]
elif hasattr(geom, "color") and geom.color == 'green':
color = [0, 1, 0]
elif hasattr(geom, "color") and geom.color == 'red':
color = [1, 0, 0]
self.env_body = OpenRAVEBody.create_cylinder(self._env, self.name, np.eye(4),
[geom.radius, geom.height], color)
self._env.AddKinBody(self.env_body)
def _add_obstacle(self, geom):
obstacles = np.matrix('-0.576036866359447, 0.918128654970760, 1;\
-0.806451612903226,-1.07017543859649, 1;\
1.01843317972350,-0.988304093567252, 1;\
0.640552995391705,0.906432748538011, 1;\
-0.576036866359447, 0.918128654970760, -1;\
-0.806451612903226,-1.07017543859649, -1;\
1.01843317972350,-0.988304093567252, -1;\
0.640552995391705,0.906432748538011, -1')
body = RaveCreateKinBody(self._env, '')
vertices = np.array(obstacles)
indices = np.array([[0, 1, 2], [2, 3, 0], [4, 5, 6], [6, 7, 4], [0, 4, 5],
[0, 1, 5], [1, 2, 5], [5, 6, 2], [2, 3, 6], [6, 7, 3],
[0, 3, 7], [0, 4, 7]])
body.InitFromTrimesh(trimesh=TriMesh(vertices, indices), draw=True)
body.SetName(self.name)
for link in body.GetLinks():
for geom in link.GetGeometries():
geom.SetDiffuseColor((.9, .9, .9))
self.env_body = body
self._env.AddKinBody(body)
def _add_box(self, geom):
infobox = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, geom.dim, [0.5, 0.2, 0.1])
self.env_body = RaveCreateKinBody(self._env,'')
self.env_body.InitFromGeometries([infobox])
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def _add_sphere(self, geom):
infobox = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Sphere, [geom.radius], [0, 0, 1])
self.env_body = RaveCreateKinBody(self._env,'')
self.env_body.InitFromGeometries([infobox])
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def _add_wall(self, geom):
self.env_body = OpenRAVEBody.create_wall(self._env, geom.wall_type)
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def _add_obj(self, geom):
self.env_body = self._env.ReadKinBodyXMLFile(geom.shape)
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def _add_table(self, geom):
self.env_body = OpenRAVEBody.create_table(self._env, geom)
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def _add_basket(self, geom):
self.env_body = self._env.ReadKinBodyXMLFile(geom.shape)
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def set_pose(self, base_pose, rotation = [0, 0, 0]):
trans = None
if np.any(np.isnan(base_pose)) or np.any(np.isnan(rotation)):
return
if isinstance(self._geom, Robot) and not isinstance(self._geom, Washer):
trans = OpenRAVEBody.base_pose_to_mat(base_pose)
elif len(base_pose) == 2:
trans = OpenRAVEBody.base_pose_2D_to_mat(base_pose)
else:
trans = OpenRAVEBody.transform_from_obj_pose(base_pose, rotation)
self.env_body.SetTransform(trans)
def set_dof(self, dof_value_map):
"""
dof_value_map: A dict that maps robot attribute name to a list of corresponding values
"""
# make sure only sets dof for robot
# assert isinstance(self._geom, Robot)
if not isinstance(self._geom, Robot): return
# Get current dof value for each joint
dof_val = self.env_body.GetActiveDOFValues()
for k, v in dof_value_map.items():
if k not in self._geom.dof_map or np.any(np.isnan(v)): continue
inds = self._geom.dof_map[k]
try:
dof_val[inds] = v
except IndexError:
print(('\n\n\nBad index in set dof:', inds, k, v, self._geom, '\n\n\n'))
# Set new DOF value to the robot
self.env_body.SetActiveDOFValues(dof_val)
def _set_active_dof_inds(self, inds = None):
"""
Set active dof index to the one we are interested
This function is implemented to simplify jacobian calculation in the CollisionPredicate
inds: Optional list of index specifying dof index we are interested in
"""
robot = self.env_body
if inds == None:
dof_inds = np.ndarray(0, dtype=np.int)
if robot.GetJoint("torso_lift_joint") != None:
dof_inds = np.r_[dof_inds, robot.GetJoint("torso_lift_joint").GetDOFIndex()]
dof_inds = np.r_[dof_inds, robot.GetManipulator("leftarm").GetArmIndices()]
dof_inds = np.r_[dof_inds, robot.GetManipulator("leftarm").GetGripperIndices()]
dof_inds = np.r_[dof_inds, robot.GetManipulator("rightarm").GetArmIndices()]
dof_inds = np.r_[dof_inds, robot.GetManipulator("rightarm").GetGripperIndices()]
robot.SetActiveDOFs(
dof_inds,
DOFAffine.X + DOFAffine.Y + DOFAffine.RotationAxis,
[0, 0, 1])
else:
robot.SetActiveDOFs(inds)
@staticmethod
def create_cylinder(env, body_name, t, dims, color=[0, 1, 1]):
infocylinder = OpenRAVEBody.create_body_info(GeometryType.Cylinder, dims, color)
if type(env) != Environment:
# import ipdb; ipdb.set_trace()
print("Environment object is not valid")
cylinder = RaveCreateKinBody(env, '')
cylinder.InitFromGeometries([infocylinder])
cylinder.SetName(body_name)
cylinder.SetTransform(t)
return cylinder
@staticmethod
def create_box(env, name, transform, dims, color=[0,0,1]):
infobox = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, dims, color, 0, True)
box = RaveCreateKinBody(env,'')
box.InitFromGeometries([infobox])
box.SetName(name)
box.SetTransform(transform)
return box
@staticmethod
def create_sphere(env, name, transform, dims, color=[0,0,1]):
infobox = OpenRAVEBody.create_body_info(GeometryType.Sphere, dims, color)
sphere = RaveCreateKinBody(env,'')
sphere.InitFromGeometries([infobox])
sphere.SetName(name)
sphere.SetTransform(transform)
return sphere
@staticmethod
def create_body_info(body_type, dims, color, transparency = 0.8, visible = True):
infobox = KinBody.Link.GeometryInfo()
infobox._type = body_type
infobox._vGeomData = dims
infobox._bVisible = True
infobox._fTransparency = transparency
infobox._vDiffuseColor = color
return infobox
@staticmethod
def create_wall(env, wall_type):
component_type = KinBody.Link.GeomType.Box
wall_color = [0.5, 0.2, 0.1]
box_infos = []
if wall_type == 'closet':
wall_endpoints = [[-6.0,-8.0],[-6.0,4.0],[1.9,4.0],[1.9,8.0],[5.0,8.0],[5.0,4.0],[13.0,4.0],[13.0,-8.0],[-6.0,-8.0]]
else:
raise NotImplemented
for i, (start, end) in enumerate(zip(wall_endpoints[0:-1], wall_endpoints[1:])):
dim_x, dim_y = 0, 0
thickness = WALL_THICKNESS
if start[0] == end[0]:
ind_same, ind_diff = 0, 1
length = abs(start[ind_diff] - end[ind_diff])
dim_x, dim_y = thickness, length/2 + thickness
elif start[1] == end[1]:
ind_same, ind_diff = 1, 0
length = abs(start[ind_diff] - end[ind_diff])
dim_x, dim_y = length/2 + thickness, thickness
else:
raise NotImplemented('Can only create axis-aligned walls')
transform = np.eye(4)
transform[ind_same, 3] = start[ind_same]
if start[ind_diff] < end[ind_diff]:
transform[ind_diff, 3] = start[ind_diff] + length/2
else:
transform[ind_diff, 3] = end[ind_diff] + length/2
dims = [dim_x, dim_y, 1]
box_info = OpenRAVEBody.create_body_info(component_type, dims, wall_color)
box_info._t = transform
box_infos.append(box_info)
wall = RaveCreateKinBody(env, '')
wall.InitFromGeometries(box_infos)
return wall
@staticmethod
def get_wall_dims(wall_type='closet'):
wall_endpoints = [[-6.0,-8.0],[-6.0,4.0],[1.9,4.0],[1.9,8.0],[5.0,8.0],[5.0,4.0],[13.0,4.0],[13.0,-8.0],[-6.0,-8.0]]
dims = []
for i, (start, end) in enumerate(zip(wall_endpoints[0:-1], wall_endpoints[1:])):
dim_x, dim_y = 0, 0
thickness = WALL_THICKNESS
if start[0] == end[0]:
ind_same, ind_diff = 0, 1
length = abs(start[ind_diff] - end[ind_diff])
dim_x, dim_y = thickness, length/2 + thickness
elif start[1] == end[1]:
ind_same, ind_diff = 1, 0
length = abs(start[ind_diff] - end[ind_diff])
dim_x, dim_y = length/2 + thickness, thickness
else:
raise NotImplemented('Can only create axis-aligned walls')
transform = np.eye(4)
transform[ind_same, 3] = start[ind_same]
if start[ind_diff] < end[ind_diff]:
transform[ind_diff, 3] = start[ind_diff] + length/2
else:
transform[ind_diff, 3] = end[ind_diff] + length/2
dims.append(([dim_x, dim_y, 1], transform))
return dims
@staticmethod
def create_basket_col(env):
long_info1 = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.3,.15,.015], [0, 0.75, 1])
long_info2 = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.3,.15,.015], [0, 0.75, 1])
short_info1 = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.015,.15,.2], [0, 0.75, 1])
short_info2 = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.015,.15,.2], [0, 0.75, 1])
bottom_info = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.3,.015,.2], [0, 0.75, 1])
long_info1._t = OpenRAVEBody.transform_from_obj_pose([0,-0.118,0.208],[0,0,0.055])
long_info2._t = OpenRAVEBody.transform_from_obj_pose([0,-0.118,-0.208],[0,0,-0.055])
short_info1._t = OpenRAVEBody.transform_from_obj_pose([0.309,-0.118,0],[-0.055,0,0])
short_info2._t = OpenRAVEBody.transform_from_obj_pose([-0.309,-0.118,0],[0.055,0,0])
bottom_info._t = OpenRAVEBody.transform_from_obj_pose([0,-0.25,0],[0,0,0])
basket = RaveCreateRobot(env, '')
basket.InitFromGeometries([long_info1, long_info2, short_info1, short_info2, bottom_info])
return basket
@staticmethod
def create_table(env, geom):
thickness = geom.thickness
leg_height = geom.leg_height
back = geom.back
dim1, dim2 = geom.table_dim
legdim1, legdim2 = geom.leg_dim
table_color = [0.5, 0.2, 0.1]
component_type = KinBody.Link.GeomType.Box
tabletop = OpenRAVEBody.create_body_info(component_type, [dim1/2, dim2/2, thickness/2], table_color)
leg1 = OpenRAVEBody.create_body_info(component_type, [legdim1/2, legdim2/2, leg_height/2], table_color)
leg1._t[0, 3] = dim1/2 - legdim1/2
leg1._t[1, 3] = dim2/2 - legdim2/2
leg1._t[2, 3] = -leg_height/2 - thickness/2
leg2 = OpenRAVEBody.create_body_info(component_type, [legdim1/2, legdim2/2, leg_height/2], table_color)
leg2._t[0, 3] = dim1/2 - legdim1/2
leg2._t[1, 3] = -dim2/2 + legdim2/2
leg2._t[2, 3] = -leg_height/2 - thickness/2
leg3 = OpenRAVEBody.create_body_info(component_type, [legdim1/2, legdim2/2, leg_height/2], table_color)
leg3._t[0, 3] = -dim1/2 + legdim1/2
leg3._t[1, 3] = dim2/2 - legdim2/2
leg3._t[2, 3] = -leg_height/2 - thickness/2
leg4 = OpenRAVEBody.create_body_info(component_type, [legdim1/2, legdim2/2, leg_height/2], table_color)
leg4._t[0, 3] = -dim1/2 + legdim1/2
leg4._t[1, 3] = -dim2/2 + legdim2/2
leg4._t[2, 3] = -leg_height/2 - thickness/2
if back:
back_plate = OpenRAVEBody.create_body_info(component_type, [legdim1/10, dim2/2, leg_height-thickness/2], table_color)
back_plate._t[0, 3] = dim1/2 - legdim1/10
back_plate._t[1, 3] = 0
back_plate._t[2, 3] = -leg_height/2 - thickness/4
table = RaveCreateRobot(env, '')
if not back:
table.InitFromGeometries([tabletop, leg1, leg2, leg3, leg4])
else:
table.InitFromGeometries([tabletop, leg1, leg2, leg3, leg4, back_plate])
return table
@staticmethod
def base_pose_2D_to_mat(pose):
# x, y = pose
assert len(pose) == 2
x = pose[0]
y = pose[1]
rot = 0
q = quatFromAxisAngle((0, 0, rot)).tolist()
pos = [x, y, 0]
# pos = np.vstack((x,y,np.zeros(1)))
matrix = matrixFromPose(q + pos)
return matrix
@staticmethod
def base_pose_3D_to_mat(pose):
# x, y, z = pose
assert len(pose) == 3
x = pose[0]
y = pose[1]
z = pose[2]
rot = 0
q = quatFromAxisAngle((0, 0, rot)).tolist()
pos = [x, y, z]
# pos = np.vstack((x,y,np.zeros(1)))
matrix = matrixFromPose(q + pos)
return matrix
@staticmethod
def mat_to_base_pose_2D(mat):
pose = poseFromMatrix(mat)
x = pose[4]
y = pose[5]
return np.array([x,y])
@staticmethod
def base_pose_to_mat(pose):
# x, y, rot = pose
assert len(pose) == 3
x = pose[0]
y = pose[1]
rot = pose[2]
q = quatFromAxisAngle((0, 0, rot)).tolist()
pos = [x, y, 0]
# pos = np.vstack((x,y,np.zeros(1)))
matrix = matrixFromPose(q + pos)
return matrix
@staticmethod
def angle_pose_to_mat(pose):
assert len(pose) == 1
q = quatFromAxisAngle((0, 0, pose)).tolist()
matrix = matrixFromPose(q + pos)
return matrix
@staticmethod
def mat_to_base_pose(mat):
pose = poseFromMatrix(mat)
x = pose[4]
y = pose[5]
rot = axisAngleFromRotationMatrix(mat)[2]
return np.array([x,y,rot])
@staticmethod
def obj_pose_from_transform(transform):
trans = transform[:3,3]
rot_matrix = transform[:3,:3]
yaw, pitch, roll = OpenRAVEBody._ypr_from_rot_matrix(rot_matrix)
# ipdb.set_trace()
return np.array((trans[0], trans[1], trans[2], yaw, pitch, roll))
@staticmethod
def transform_from_obj_pose(pose, rotation = np.array([0,0,0])):
x, y, z = pose
alpha, beta, gamma = rotation
Rz, Ry, Rx = OpenRAVEBody._axis_rot_matrices(pose, rotation)
rot_mat = np.dot(Rz, np.dot(Ry, Rx))
matrix = np.eye(4)
matrix[:3,:3] = rot_mat
matrix[:3,3] = [x,y,z]
return matrix
@staticmethod
def _axis_rot_matrices(pose, rotation):
x, y, z = pose
alpha, beta, gamma = rotation
Rz_2d = np.array([[cos(alpha), -sin(alpha)], [sin(alpha), cos(alpha)]])
Ry_2d = np.array([[cos(beta), sin(beta)], [-sin(beta), cos(beta)]])
Rx_2d = np.array([[cos(gamma), -sin(gamma)], [sin(gamma), cos(gamma)]])
I = np.eye(3)
Rz = I.copy()
Rz[:2,:2] = Rz_2d
Ry = I.copy()
Ry[[[0],[2]],[0,2]] = Ry_2d
Rx = I.copy()
Rx[1:3,1:3] = Rx_2d
# ipdb.set_trace()
return Rz, Ry, Rx
@staticmethod
def _ypr_from_rot_matrix(r):
# alpha
yaw = atan2(r[1,0], r[0,0])
# beta
pitch = atan2(-r[2,0],np.sqrt(r[2,1]**2+r[2,2]**2))
# gamma
roll = atan2(r[2,1], r[2,2])
# ipdb.set_trace()
return (yaw, pitch, roll)
@staticmethod
def get_ik_transform(pos, rot, right_arm = True):
trans = OpenRAVEBody.transform_from_obj_pose(pos, rot)
# Openravepy flip the rotation axis by 90 degree, thus we need to change it back
if right_arm:
rot_mat = matrixFromAxisAngle([0, np.pi/2, 0])
else:
rot_mat = matrixFromAxisAngle([0, -np.pi/2, 0])
trans_mat = trans[:3, :3].dot(rot_mat[:3, :3])
trans[:3, :3] = trans_mat
return trans
def get_ik_arm_pose(self, pos, rot):
# assert isinstance(self._geom, PR2)
solutions = self.get_ik_from_pose(pos, rot, 'rightarm_torso')
return solutions
def get_ik_from_pose(self, pos, rot, manip_name, use6d=True):
trans = OpenRAVEBody.get_ik_transform(pos, rot)
solutions = self.get_ik_solutions(manip_name, trans, use6d)
return solutions
def get_ik_solutions(self, manip_name, trans, use6d=True):
manip = self.env_body.GetManipulator(manip_name)
if use6d:
iktype = IkParameterizationType.Transform6D
else:
iktype = IkParameterizationType.Translation3D
solutions = manip.FindIKSolutions(IkParameterization(trans, iktype),IkFilterOptions.CheckEnvCollisions)
return solutions
def get_close_ik_solution(self, manip_name, trans, dof_map=None):
if dof_map is not None:
self.set_dof(dof_map)
manip = self.env_body.GetManipulator(manip_name)
iktype = IkParameterizationType.Transform6D
ik_param = IkParameterization(trans, iktype)
solution = manip.FindIKSolution(ik_param, IkFilterOptions.IgnoreSelfCollisions)
return solution
def fwd_kinematics(self, manip_name, dof_map=None, mat_result=False):
if dof_map is not None:
self.set_dof(dof_map)
trans = self.env_body.GetLink(manip_name).GetTransform()
if mat_result:
return trans
pos = trans[:3, 3]
quat = quatFromRotationMatrix(trans[:3, :3])
return {'pos': pos, 'quat': quat}
def param_fwd_kinematics(self, param, manip_names, t, mat_result=False):
if not isinstance(self._geom, Robot): return
attrs = list(param._attr_types.keys())
dof_val = self.env_body.GetActiveDOFValues()
for attr in attrs:
if attr not in self._geom.dof_map: continue
val = getattr(param, attr)[:, t]
if np.any(np.isnan(val)): continue
inds = self._geom.dof_map[attr]
dof_val[inds] = val
self.env_body.SetActiveDOFValues(dof_val)
result = {}
for manip_name in manip_names:
result[manip_name] = self.fwd_kinematics(manip_name, mat_result=mat_result)
return result
| 40.0369
| 151
| 0.595346
|
27511c5668a76e696e0764325067f7b90eab876a
| 9,086
|
py
|
Python
|
python/ray/_private/import_thread.py
|
goswamig/amazon-ray
|
9984ebcdc9d0da0de65363074021e9aff2f82636
|
[
"Apache-2.0"
] | 1
|
2022-01-11T11:41:31.000Z
|
2022-01-11T11:41:31.000Z
|
python/ray/_private/import_thread.py
|
goswamig/amazon-ray
|
9984ebcdc9d0da0de65363074021e9aff2f82636
|
[
"Apache-2.0"
] | 27
|
2021-11-19T05:20:27.000Z
|
2022-03-26T07:09:49.000Z
|
python/ray/_private/import_thread.py
|
goswamig/amazon-ray
|
9984ebcdc9d0da0de65363074021e9aff2f82636
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
import threading
import traceback
import redis
import grpc
import ray
from ray import ray_constants
from ray import cloudpickle as pickle
import ray._private.profiling as profiling
import logging
logger = logging.getLogger(__name__)
class ImportThread:
"""A thread used to import exports from the driver or other workers.
Attributes:
worker: the worker object in this process.
mode: worker mode
redis_client: the redis client used to query exports.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
imported_collision_identifiers: This is a dictionary mapping collision
identifiers for the exported remote functions and actor classes to
the number of times that collision identifier has appeared. This is
used to provide good error messages when the same function or class
is exported many times.
"""
def __init__(self, worker, mode, threads_stopped):
self.worker = worker
self.mode = mode
self.redis_client = worker.redis_client
self.gcs_client = worker.gcs_client
self.threads_stopped = threads_stopped
self.imported_collision_identifiers = defaultdict(int)
def start(self):
"""Start the import thread."""
self.t = threading.Thread(target=self._run, name="ray_import_thread")
# Making the thread a daemon causes it to exit
# when the main thread exits.
self.t.daemon = True
self.t.start()
def join_import_thread(self):
"""Wait for the thread to exit."""
self.t.join()
def _run(self):
import_pubsub_client = self.redis_client.pubsub()
# Exports that are published after the call to
# import_pubsub_client.subscribe and before the call to
# import_pubsub_client.listen will still be processed in the loop.
import_pubsub_client.subscribe("__keyspace@0__:Exports")
# Keep track of the number of imports that we've imported.
num_imported = 0
try:
# Get the exports that occurred before the call to subscribe.
export_keys = self.redis_client.lrange("Exports", 0, -1)
for key in export_keys:
num_imported += 1
self._process_key(key)
while True:
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
msg = import_pubsub_client.get_message()
if msg is None:
self.threads_stopped.wait(timeout=0.01)
continue
if msg["type"] == "subscribe":
continue
assert msg["data"] == b"rpush"
num_imports = self.redis_client.llen("Exports")
assert num_imports >= num_imported
for i in range(num_imported, num_imports):
num_imported += 1
key = self.redis_client.lindex("Exports", i)
self._process_key(key)
except (OSError, redis.exceptions.ConnectionError, grpc.RpcError) as e:
logger.error(f"ImportThread: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
import_pubsub_client.close()
def _get_import_info_for_collision_detection(self, key):
"""Retrieve the collision identifier, type, and name of the import."""
if key.startswith(b"RemoteFunction"):
collision_identifier, function_name = self._internal_kv_multiget(
key, ["collision_identifier", "function_name"])
return (collision_identifier,
ray._private.utils.decode(function_name.encode()),
"remote function")
elif key.startswith(b"ActorClass"):
collision_identifier, class_name = self._internal_kv_multiget(
key, ["collision_identifier", "class_name"])
return collision_identifier, ray._private.utils.decode(
class_name.encode()), "actor"
def _process_key(self, key):
"""Process the given export key from redis."""
if self.mode != ray.WORKER_MODE:
# If the same remote function or actor definition appears to be
# exported many times, then print a warning. We only issue this
# warning from the driver so that it is only triggered once instead
# of many times. TODO(rkn): We may want to push this to the driver
# through Redis so that it can be displayed in the dashboard more
# easily.
if (key.startswith(b"RemoteFunction")
or key.startswith(b"ActorClass")):
collision_identifier, name, import_type = (
self._get_import_info_for_collision_detection(key))
self.imported_collision_identifiers[collision_identifier] += 1
if (self.imported_collision_identifiers[collision_identifier]
== ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD):
logger.warning(
"The %s '%s' has been exported %s times. It's "
"possible that this warning is accidental, but this "
"may indicate that the same remote function is being "
"defined repeatedly from within many tasks and "
"exported to all of the workers. This can be a "
"performance issue and can be resolved by defining "
"the remote function on the driver instead. See "
"https://github.com/ray-project/ray/issues/6240 for "
"more discussion.", import_type, name,
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD)
if key.startswith(b"RemoteFunction"):
# TODO (Alex): There's a race condition here if the worker is
# shutdown before the function finished registering (because core
# worker's global worker is unset before shutdown and is needed
# for profiling).
# with profiling.profile("register_remote_function"):
(self.worker.function_actor_manager.
fetch_and_register_remote_function(key))
elif key.startswith(b"FunctionsToRun"):
with profiling.profile("fetch_and_run_function"):
self.fetch_and_execute_function_to_run(key)
elif key.startswith(b"ActorClass"):
# Keep track of the fact that this actor class has been
# exported so that we know it is safe to turn this worker
# into an actor of that class.
self.worker.function_actor_manager.imported_actor_classes.add(key)
with self.worker.function_actor_manager.cv:
# Function manager may be waiting on actor class to be
# loaded for deserialization, notify it to wake up and
# check if the actor class it was looking for is loaded
self.worker.function_actor_manager.cv.notify_all()
# TODO(rkn): We may need to bring back the case of
# fetching actor classes here.
else:
assert False, "This code should be unreachable."
def fetch_and_execute_function_to_run(self, key):
"""Run on arbitrary function on the worker."""
(job_id, serialized_function) = self._internal_kv_multiget(
key, ["job_id", "function"])
if self.worker.mode == ray.SCRIPT_MODE:
return
if ray_constants.ISOLATE_EXPORTS and \
job_id != self.worker.current_job_id.binary():
return
try:
# FunctionActorManager may call pickle.loads at the same time.
# Importing the same module in different threads causes deadlock.
with self.worker.function_actor_manager.lock:
# Deserialize the function.
function = pickle.loads(serialized_function)
# Run the function.
function({"worker": self.worker})
except Exception:
# If an exception was thrown when the function was run, we record
# the traceback and notify the scheduler of the failure.
traceback_str = traceback.format_exc()
# Log the error message.
ray._private.utils.push_error_to_driver(
self.worker,
ray_constants.FUNCTION_TO_RUN_PUSH_ERROR,
traceback_str,
job_id=ray.JobID(job_id))
def _internal_kv_multiget(self, key, fields):
vals = self.gcs_client.internal_kv_get(
key, ray_constants.KV_NAMESPACE_FUNCTION_TABLE)
if vals is None:
vals = {}
else:
vals = pickle.loads(vals)
return (vals.get(field) for field in fields)
| 45.20398
| 79
| 0.614462
|
f19fcf1773f5801b6771c5b1600be9817e8e14c0
| 1,667
|
py
|
Python
|
test/mock_status.py
|
qarnot/computing-python-sdk
|
cfee10a16bfdaf9e894317b04efd5563933b0ce4
|
[
"Apache-2.0"
] | 11
|
2016-09-08T06:00:37.000Z
|
2021-11-11T21:03:16.000Z
|
test/mock_status.py
|
qarnot/computing-python-sdk
|
cfee10a16bfdaf9e894317b04efd5563933b0ce4
|
[
"Apache-2.0"
] | 3
|
2020-01-20T23:00:32.000Z
|
2021-04-26T08:09:05.000Z
|
test/mock_status.py
|
qarnot/computing-python-sdk
|
cfee10a16bfdaf9e894317b04efd5563933b0ce4
|
[
"Apache-2.0"
] | 5
|
2017-09-26T14:58:58.000Z
|
2021-02-24T15:23:08.000Z
|
default_json_status = {
"timestamp": "0001-01-01T00:00:00Z",
"lastUpdateTimestamp": "0001-01-01T00:00:00Z",
"downloadProgress": 0,
"executionProgress": 100,
"uploadProgress": 100,
"instanceCount": 0,
"downloadTime": "00:00:00",
"downloadTimeSec": 0,
"environmentTime": "00:01:02",
"environmentTimeSec": 62,
"executionTime": "00:28:04",
"executionTimeSec": 1684,
"executionTimeByCpuModel": [
{
"model": "AMD Ryzen 7 2700 Eight-Core Processor",
"time": 1684,
"core": 16
}
],
"executionTimeGhzByCpuModel": [
{
"model": "AMD Ryzen 7 2700 Eight-Core Processor",
"timeGhz": 5385.8447265625,
"clockRatio": 0.999,
"core": 16
}
],
"uploadTime": "00:00:04",
"uploadTimeSec": 4,
"wallTime": "00:30:27",
"wallTimeSec": 1827,
"succeededRange": "0",
"executedRange": "0",
"failedRange": "",
"startedOnceRange": "0",
"runningInstancesInfo": {
"perRunningInstanceInfo": [
],
"snapshotResults": [
"snap1"
],
"timestamp": "0001-01-01T00:00:00Z",
"averageFrequencyGHz": 0,
"maxFrequencyGHz": 0,
"minFrequencyGHz": 0,
"averageMaxFrequencyGHz": 0,
"averageCpuUsage": 0,
"clusterPowerIndicator": 1,
"averageMemoryUsage": 0,
"averageNetworkInKbps": 0,
"averageNetworkOutKbps": 0,
"totalNetworkInKbps": 0,
"totalNetworkOutKbps": 0,
"runningCoreCountByCpuModel": [
]
}
}
| 28.254237
| 62
| 0.527894
|
89645c558eb36442b2daa779b0d35c7def365a0a
| 777
|
py
|
Python
|
src/tests/docker_classif/docker_classif.py
|
terencebeauj/mle_project
|
6270860c4161f259be5663c15c4ff7153fec6639
|
[
"MIT"
] | 1
|
2022-03-25T03:18:19.000Z
|
2022-03-25T03:18:19.000Z
|
src/tests/docker_classif/docker_classif.py
|
terencebeauj/mle_project
|
6270860c4161f259be5663c15c4ff7153fec6639
|
[
"MIT"
] | null | null | null |
src/tests/docker_classif/docker_classif.py
|
terencebeauj/mle_project
|
6270860c4161f259be5663c15c4ff7153fec6639
|
[
"MIT"
] | null | null | null |
import requests
import os
api_adddress = "fastapi_container"
api_port = "8000"
patient = "10"
username = os.environ.get("username")
password = os.environ.get("password")
params = {"user": username, "password": password}
r = requests.get(url=f"http://{api_adddress}:{api_port}/decision/{patient}", params=params)
status_code = r.status_code
if status_code == 200:
test_status = "SUCCESS"
else:
test_status = "FAILURE"
response = r.json()["score"]
output = f"""
==========
Decision Test
==========
requests done at "/decision/{patient}"
==> {test_status}
expected result must be == 0 or == 1 or == 2
actual result is: {response}
"""
print(output)
if os.environ.get("LOG") == "1":
with open("/home/logs/api_test.log", "a") as file:
file.write(output)
| 20.447368
| 91
| 0.658945
|
f2dd53e48223c9ce6f747e180f2481e2fa345ad9
| 705
|
py
|
Python
|
homework(november)/homeworkAssigment2/greatest_common _divisor.py
|
tkanicka/python_learning
|
67fc0e8ca6333571f8b0d30f835b759d670a8643
|
[
"Unlicense"
] | null | null | null |
homework(november)/homeworkAssigment2/greatest_common _divisor.py
|
tkanicka/python_learning
|
67fc0e8ca6333571f8b0d30f835b759d670a8643
|
[
"Unlicense"
] | null | null | null |
homework(november)/homeworkAssigment2/greatest_common _divisor.py
|
tkanicka/python_learning
|
67fc0e8ca6333571f8b0d30f835b759d670a8643
|
[
"Unlicense"
] | null | null | null |
x = int(input("first number: "))
y = int(input("second number: "))
def GCD(x,y):
smallerNumber = min(abs(x),abs(y))
for divisor in range(1,smallerNumber + 1):
if x % divisor == 0 and y % divisor == 0:
gcd = divisor
return gcd
def GCD_recursion(x,y):
higher_number = max(abs(x), abs(y))
smaller_number = min(abs(x), abs(y))
if higher_number % smaller_number == 0:
return smaller_number # example(15;9): 15%9 = 6 -> 9%6 = 3 -> 6%3 = 0 => 15,9,6,3 % 3 = 0
else:
return GCD_recursion(smaller_number, higher_number % smaller_number)
print(" greatest common divisor of your imputs is: ", GCD(x,y),",", GCD_recursion(x,y))
| 28.2
| 115
| 0.588652
|
945576316526e1ee81b60817d8c4047793cf902b
| 258
|
py
|
Python
|
UnityEngine/AudioClip/PCMSetPositionCallback/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
UnityEngine/AudioClip/PCMSetPositionCallback/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
UnityEngine/AudioClip/PCMSetPositionCallback/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class PCMSetPositionCallback:
def __new__(cls, arg1=None):
'''
:returns: PCMSetPositionCallback
:rtype: UnityEngine.PCMSetPositionCallback
'''
pass
| 21.5
| 50
| 0.670543
|
7426002b592bd5172d7f12c0252cf0e490de9927
| 3,331
|
py
|
Python
|
recorded_future/komand_recorded_future/actions/search_urls/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | null | null | null |
recorded_future/komand_recorded_future/actions/search_urls/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2021-02-23T23:57:37.000Z
|
2021-02-23T23:57:37.000Z
|
recorded_future/komand_recorded_future/actions/search_urls/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | null | null | null |
import komand
import json
from .schema import SearchUrlsInput, SearchUrlsOutput, Component, Input
from komand.exceptions import PluginException
# Custom imports below
class SearchUrls(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='search_urls',
description=Component.DESCRIPTION,
input=SearchUrlsInput(),
output=SearchUrlsOutput())
def run(self, params={}):
riskRuleMap = {
"Historically Reported by Insikt Group": "analystNote",
"C&C URL": "cncUrl",
"Compromised URL": "compromisedUrl",
"Historically Reported as a Defanged URL": "defangedURL",
"Historically Reported by DHS AIS": "dhsAis",
"Historically Reported Fraudulent Content": "fraudulentContent",
"Historically Reported in Threat List": "historicalThreatListMembership",
"Historically Detected Malicious Browser Exploits": "maliciousSiteDetected",
"Historically Detected Malware Distribution": "malwareSiteDetected",
"Historically Detected Cryptocurrency Mining Techniques": "miningSiteDetected",
"Historically Detected Phishing Techniques": "phishingSiteDetected",
"Active Phishing URL": "phishingUrl",
"Positive Malware Verdict": "positiveMalwareVerdict",
"Ransomware Distribution URL": "ransomwareDistribution",
"Recently Reported by Insikt Group": "recentAnalystNote",
"Recently Reported as a Defanged URL": "recentDefangedURL",
"Recently Reported by DHS AIS": "recentDhsAis",
"Recently Reported Fraudulent Content": "recentFraudulentContent",
"Recently Detected Malicious Browser Exploits": "recentMaliciousSiteDetected",
"Recently Detected Malware Distribution": "recentMalwareSiteDetected",
"Recently Detected Cryptocurrency Mining Techniques": "recentMiningSiteDetected",
"Recently Detected Phishing Techniques": "recentPhishingSiteDetected",
"Recently Referenced by Insikt Group": "recentRelatedNote",
"Recently Reported Spam or Unwanted Content": "recentSpamSiteDetected",
"Recently Detected Suspicious Content": "recentSuspiciousSiteDetected",
"Recently Active URL on Weaponized Domain": "recentWeaponizedURL",
"Historically Referenced by Insikt Group": "relatedNote",
"Historically Reported Spam or Unwanted Content": "spamSiteDetected",
"Historically Detected Suspicious Content": "suspiciousSiteDetected"
}
risk_rule = riskRuleMap.get(params.get(Input.RISKRULE))
if risk_rule:
params[Input.RISKRULE] = risk_rule
params["fields"] = "analystNotes,counts,enterpriseLists,entity,metrics,relatedEntities,risk,timestamps,sightings"
if not params.get(Input.RISKSCORE):
params[Input.RISKSCORE] = None
try:
results = self.connection.client.search_urls(**params)
return json.loads(results._req_response._content.decode("utf-8"))
except Exception as e:
self.logger.error("Error: " + str(e))
raise PluginException(preset=PluginException.Preset.UNKNOWN, data=e)
| 52.046875
| 121
| 0.67157
|
74341e0c8c449fabc46b5e251c04639070857c8a
| 1,530
|
py
|
Python
|
pystock_crawler/exporters.py
|
breakhearts/pystock-crawler
|
8b803c8944f36af46daf04c6767a74132e37a101
|
[
"MIT"
] | 320
|
2015-01-01T01:44:54.000Z
|
2022-01-03T15:04:45.000Z
|
pystock_crawler/exporters.py
|
breakhearts/pystock-crawler
|
8b803c8944f36af46daf04c6767a74132e37a101
|
[
"MIT"
] | 8
|
2015-04-23T12:38:36.000Z
|
2018-10-04T16:50:13.000Z
|
pystock_crawler/exporters.py
|
breakhearts/pystock-crawler
|
8b803c8944f36af46daf04c6767a74132e37a101
|
[
"MIT"
] | 112
|
2015-01-06T20:16:13.000Z
|
2022-03-25T05:16:00.000Z
|
from scrapy.conf import settings
from scrapy.contrib.exporter import BaseItemExporter, CsvItemExporter
class CsvItemExporter2(CsvItemExporter):
'''
The standard CsvItemExporter class does not pass the kwargs through to the
CSV writer, resulting in EXPORT_FIELDS and EXPORT_ENCODING being ignored
(EXPORT_EMPTY is not used by CSV).
http://stackoverflow.com/questions/6943778/python-scrapy-how-to-get-csvitemexporter-to-write-columns-in-a-specific-order
'''
def __init__(self, *args, **kwargs):
kwargs['fields_to_export'] = settings.getlist('EXPORT_FIELDS') or None
kwargs['encoding'] = settings.get('EXPORT_ENCODING', 'utf-8')
super(CsvItemExporter2, self).__init__(*args, **kwargs)
def _write_headers_and_set_fields_to_export(self, item):
# HACK: Override this private method to filter fields that are in
# fields_to_export but not in item
if self.include_headers_line:
item_fields = item.fields.keys()
if self.fields_to_export:
self.fields_to_export = filter(lambda a: a in item_fields, self.fields_to_export)
else:
self.fields_to_export = item_fields
self.csv_writer.writerow(self.fields_to_export)
class SymbolListExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs, dont_fail=True)
self.file = file
def export_item(self, item):
self.file.write('%s\t%s\n' % (item['symbol'], item['name']))
| 38.25
| 124
| 0.69281
|
8b5a97999c26f77632fe246ad595df4852f9dfac
| 411
|
py
|
Python
|
Gecko_web_app_API/wsgi.py
|
urosjevremovic/Github-API-web-app
|
beb784c814c920852a8e2d7ffd6259e8bb58a8f3
|
[
"MIT"
] | null | null | null |
Gecko_web_app_API/wsgi.py
|
urosjevremovic/Github-API-web-app
|
beb784c814c920852a8e2d7ffd6259e8bb58a8f3
|
[
"MIT"
] | 6
|
2020-02-11T23:05:41.000Z
|
2021-09-08T00:05:14.000Z
|
Gecko_web_app_API/wsgi.py
|
urosjevremovic/Github-API-web-app
|
beb784c814c920852a8e2d7ffd6259e8bb58a8f3
|
[
"MIT"
] | null | null | null |
"""
WSGI config for Gecko_web_app_API project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Gecko_web_app_API.settings")
application = get_wsgi_application()
| 24.176471
| 78
| 0.79562
|
d7b5c50f1f6e37912ecbf5882290c16882638bac
| 69,537
|
py
|
Python
|
tensorflow/python/eager/context.py
|
leike666666/tensorflow
|
a3fd0ddfcb716be124e95b51e96e6c1e4507ef64
|
[
"Apache-2.0"
] | 1
|
2019-12-31T02:05:11.000Z
|
2019-12-31T02:05:11.000Z
|
tensorflow/python/eager/context.py
|
leike666666/tensorflow
|
a3fd0ddfcb716be124e95b51e96e6c1e4507ef64
|
[
"Apache-2.0"
] | 2
|
2021-08-25T15:57:35.000Z
|
2022-02-10T01:09:32.000Z
|
tensorflow/python/eager/context.py
|
leike666666/tensorflow
|
a3fd0ddfcb716be124e95b51e96e6c1e4507ef64
|
[
"Apache-2.0"
] | 1
|
2017-11-27T02:55:11.000Z
|
2017-11-27T02:55:11.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""State management for eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import random
import threading
from absl import logging
import numpy as np
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python import tf2
from tensorflow.python.eager import eager_util as c_api_util
from tensorflow.python.eager import executor
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import device as pydev
from tensorflow.python.util import compat
from tensorflow.python.util import is_in_graph_mode
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
GRAPH_MODE = 0
EAGER_MODE = 1
default_execution_mode = EAGER_MODE if tf2.enabled() else GRAPH_MODE
# Cache from (old_device_name, partial_new_device_name) -> (new_device_name,
# new_device_spec).
# Note that we do not protect this with a lock and instead rely on python's GIL
# and the idempotent nature of writes to provide thread safety.
_device_parsing_cache = {}
_starting_device_spec = pydev.DeviceSpec.from_string("")
_MAXINT32 = 2**31 - 1
DEVICE_PLACEMENT_EXPLICIT = pywrap_tfe.TFE_DEVICE_PLACEMENT_EXPLICIT
DEVICE_PLACEMENT_WARN = pywrap_tfe.TFE_DEVICE_PLACEMENT_WARN
DEVICE_PLACEMENT_SILENT = pywrap_tfe.TFE_DEVICE_PLACEMENT_SILENT
DEVICE_PLACEMENT_SILENT_FOR_INT32 = (
pywrap_tfe.TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32)
SYNC = 0
ASYNC = 1
MIRRORING_NONE = pywrap_tfe.TFE_MIRRORING_NONE
MIRRORING_ALL = pywrap_tfe.TFE_MIRRORING_ALL
_KEEP_ALIVE_SECS = 600
_python_eager_context_create_counter = monitoring.Counter(
"/tensorflow/api/python/eager_context_create_counter",
"Counter for number of eager contexts created in Python.")
class _EagerTensorCache(object):
"""Simple cache which evicts items based on length in a FIFO manner."""
def __init__(self, max_items=256, max_tensor_size=10000):
self._data = collections.OrderedDict()
self._max_items = max_items
self._max_tensor_size = max_tensor_size
def put(self, key, value):
if value._num_elements() > self._max_tensor_size: # pylint: disable=protected-access
return
self._data[key] = value
if len(self._data) > self._max_items:
self._data.popitem(last=False)
def get(self, key):
return self._data.get(key, None)
def flush(self):
self._data = {}
class FunctionCallOptions(object):
"""Options applied at call sites of eager functions.
Eager functions are functions decorated with tf.contrib.eager.defun.
"""
def __init__(self, executor_type=None, config_proto=None):
"""Constructor.
Args:
executor_type: (optional) name of the executor to be used to execute the
eager function. If None or an empty string, the default Tensorflow
executor will be used.
config_proto: (optional) a `config_pb2.ConfigProto` proto or
a serialized string of that proto.
The config used by Grappler when optimizing the function graph.
Each concrete function is optimized the first time is called. Changing
config_proto after the first call has no effect.
If config_proto is None, an empty RewriterConfig will be used.
"""
self.config_proto_serialized = config_proto
self.executor_type = executor_type
@property
def executor_type(self):
return self._executor_type
@executor_type.setter
def executor_type(self, executor_type):
self._executor_type = executor_type
@property
def config_proto_serialized(self):
return self._config_proto_serialized
@config_proto_serialized.setter
def config_proto_serialized(self, config):
if isinstance(config, config_pb2.ConfigProto):
self._config_proto_serialized = config.SerializeToString()
elif isinstance(config, str):
self._config_proto_serialized = config
elif config is None:
self._config_proto_serialized = (
config_pb2.ConfigProto().SerializeToString())
else:
raise ValueError("the rewriter config must be either a "
"config_pb2.ConfigProto, or a serialized string of that "
"proto or None. got: {}".format(type(config)))
# Map from context_id (an int) to _TensorCaches.
# Dicts are thread safe in CPython.
# TODO(iga): Remove this once TensorCaches are moved to C++.
_tensor_caches_map = {}
class _TensorCaches(threading.local):
"""Thread local tensor caches."""
def __init__(self):
super(_TensorCaches, self).__init__()
self._ones_rank_cache = None
self._zeros_cache = None
@property
def ones_rank_cache(self):
if not self._ones_rank_cache:
self._ones_rank_cache = _EagerTensorCache()
return self._ones_rank_cache
@property
def zeros_cache(self):
if not self._zeros_cache:
self._zeros_cache = _EagerTensorCache()
return self._zeros_cache
class _ThreadLocalData(threading.local):
"""Thread local storage for the eager context."""
def __init__(self):
super(_ThreadLocalData, self).__init__()
self.device_spec = _starting_device_spec
self.device_name = ""
self.is_eager = default_execution_mode == EAGER_MODE
self.scope_name = ""
self.function_call_options = None
self.executor = None
self.op_callbacks = []
self.invoking_op_callbacks = False
ContextSwitch = collections.namedtuple(
"ContextSwitch", ["is_building_function", "enter_context_fn",
"device_stack"])
# `_ContextSwitchStack` is a `threading.local` to match the semantics of
# ``DefaultGraphStack`, which is also a `threading.local`.
class _ContextSwitchStack(threading.local):
"""A thread-local stack of context switches."""
def __init__(self, eager):
super(_ContextSwitchStack, self).__init__()
self.stack = []
if eager:
# Initialize the stack with a pointer to enter the eager context; this
# ensures that the fact that eager execution was enabled is propagated
# across threads, since (1) `enable_eager_execution` modifies a
# process-level flag (`default_execution_mode`) and (2) `__init__` is
# called each time a threading.local object is used in a separate thread.
self.push(is_building_function=False, enter_context_fn=eager_mode,
device_stack=None)
def push(self, is_building_function, enter_context_fn, device_stack):
"""Push metadata about a context switch onto the stack.
A context switch can take any one of the two forms: installing a graph as
the default graph, or entering the eager context. For each context switch,
we record whether or not the entered context is building a function.
Args:
is_building_function: (bool.) Whether the context is building a function.
enter_context_fn: (function.) A callable that executes the context switch.
For example, `graph.as_default` or `eager_mode`.
device_stack: If applicable, the device function stack for this
graph. When breaking out of graphs in init_scope, the innermost nonempty
device stack is used. Eager contexts put `None` here and the value is
never used.
"""
self.stack.append(
ContextSwitch(is_building_function, enter_context_fn, device_stack))
def pop(self):
"""Pop the stack."""
self.stack.pop()
@tf_export("config.LogicalDevice")
class LogicalDevice(
collections.namedtuple("LogicalDevice", ["name", "device_type"])):
"""Abstraction for a logical device initialized by the runtime.
A `tf.config.LogicalDevice` corresponds to an initialized logical device on a
`tf.config.PhysicalDevice` or a remote device visible to the cluster. Tensors
and operations can be placed on a specific logical device by calling
`tf.device` with a specified `tf.config.LogicalDevice`.
Fields:
name: The fully qualified name of the device. Can be used for Op or function
placement.
device_type: String declaring the type of device such as "CPU" or "GPU".
"""
pass
@tf_export("config.LogicalDeviceConfiguration",
"config.experimental.VirtualDeviceConfiguration")
class LogicalDeviceConfiguration(
collections.namedtuple("LogicalDeviceConfiguration", ["memory_limit"])):
"""Configuration class for a logical devices.
The class specifies the parameters to configure a `tf.config.PhysicalDevice`
as it is initialized to a `tf.config.LogicalDevice` during runtime
initialization. Not all fields are valid for all device types.
See `tf.config.get_logical_device_configuration` and
`tf.config.set_logical_device_configuration` for usage examples.
Fields:
memory_limit: (optional) Maximum memory (in MB) to allocate on the virtual
device. Currently only supported for GPUs.
"""
def __new__(cls, memory_limit=None):
return super(LogicalDeviceConfiguration, cls).__new__(cls, memory_limit)
@tf_export("config.PhysicalDevice")
class PhysicalDevice(
collections.namedtuple("PhysicalDevice", ["name", "device_type"])):
"""Abstraction for a locally visible physical device.
TensorFlow can utilize various devices such as the CPU or multiple GPUs
for computation. Before initializing a local device for use, the user can
customize certain properties of the device such as it's visibility or memory
configuration.
Once a visible `tf.config.PhysicalDevice` is initialized one or more
`tf.config.LogicalDevice` objects are created. Use
`tf.config.set_visible_devices` to configure the visibility of a physical
device and `tf.config.set_logical_device_configuration` to configure multiple
`tf.config.LogicalDevice` objects for a `tf.config.PhysicalDevice`. This is
useful when separation between models is needed or to simulate a multi-device
environment.
Fields:
name: Unique identifier for device.
device_type: String declaring the type of device such as "CPU" or "GPU".
"""
pass
class _AtomicCounter(object):
"""A simple atomic counter."""
def __init__(self):
self._value = 0
self._lock = threading.Lock()
def increment_and_get(self):
with self._lock:
self._value += 1
return self._value
_context_id_counter = _AtomicCounter()
class _TensorCacheDeleter(object):
"""Deletes tensor caches for a given context."""
def __init__(self, context_id):
self._context_id = context_id
def __del__(self):
if _tensor_caches_map is None:
return
if self._context_id in _tensor_caches_map:
del _tensor_caches_map[self._context_id]
# TODO(agarwal): rename to EagerContext / EagerRuntime ?
# TODO(agarwal): consider keeping the corresponding Graph here.
class Context(object):
"""Environment in which eager operations execute."""
# TODO(agarwal): create and link in some documentation for `execution_mode`.
# pylint: disable=redefined-outer-name
def __init__(self,
config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Creates a new Context.
Args:
config: (Optional.) A `ConfigProto` protocol buffer with configuration
options for the Context. Note that a lot of these options may be
currently unimplemented or irrelevant when eager execution is enabled.
device_policy: (Optional.) What policy to use when trying to run an
operation on a device with inputs which are not on that device.
When set to None, an appropriate value will be picked automatically.
The value picked may change between TensorFlow releases.
Defaults to DEVICE_PLACEMENT_SILENT.
Valid values:
- DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is
not correct.
- DEVICE_PLACEMENT_WARN: copies the tensors which are not on the
right device but raises a warning.
- DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might
hide performance problems.
- DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors,
raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched
are actually executed. When set to None, an appropriate value will be
picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- SYNC: executes each operation synchronously.
- ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
server_def: (Optional.) A tensorflow::ServerDef proto.
Enables execution on remote devices. GrpcServers need to be started by
creating an identical server_def to this, and setting the appropriate
task_indexes, so that the servers can communicate. It will then be
possible to execute operations on remote devices.
Raises:
ValueError: If execution_mode is not valid.
"""
# This _id is used only to index the tensor caches.
# TODO(iga): Remove this when tensor caches are moved to C++.
self._id = _context_id_counter.increment_and_get()
self._tensor_cache_deleter = _TensorCacheDeleter(self._id)
_tensor_caches_map[self._id] = _TensorCaches()
self._config = config
self._thread_local_data = _ThreadLocalData()
self._context_switches = _ContextSwitchStack(self.executing_eagerly())
self._context_handle = None
self._context_devices = None
self._seed = None
self._initialize_lock = threading.Lock()
self._initialized = False
if device_policy is None:
device_policy = DEVICE_PLACEMENT_SILENT
self._device_policy = device_policy
self._mirroring_policy = None
if execution_mode not in (None, SYNC, ASYNC):
raise ValueError(
"execution_mode should be None/SYNC/ASYNC. Got %s" % execution_mode)
if execution_mode is None:
execution_mode = SYNC
self._default_is_async = execution_mode == ASYNC
self._lazy_remote_inputs_copy = None
self._server_def = server_def
self._collective_ops_server_def = None
self._collective_leader = None
self._collective_scoped_allocator_enabled_ops = None
self._collective_use_nccl_communication = None
self._collective_device_filters = None
self._device_lock = threading.Lock()
self._physical_devices = None
self._visible_device_list = []
self._memory_growth_map = None
self._virtual_device_map = {}
# Values set after construction
self._optimizer_jit = None
self._intra_op_parallelism_threads = None
self._inter_op_parallelism_threads = None
self._soft_device_placement = None
self._log_device_placement = None
self._enable_mlir_bridge = None
self._optimizer_experimental_options = {}
_python_eager_context_create_counter.get_cell().increase_by(1)
# pylint: enable=redefined-outer-name
def _set_global_seed(self, seed):
"""Set a global eager mode seed for random ops."""
self._seed = seed
# `random.Random(seed)` needs `seed` to be hashable, while values of type
# e.g. `np.int64` or `np.ndarray` are not. We use `int(...)` to convert them
# to int.
try:
hash(seed)
except TypeError:
seed = int(np.array(seed))
self._rng = random.Random(seed)
# Also clear the kernel cache, to reset any existing seeds
if self._context_handle is not None:
pywrap_tfe.TFE_ContextClearCaches(self._context_handle)
def _internal_operation_seed(self):
"""Returns a fake operation seed.
In eager mode, user shouldn't set or depend on operation seed.
Here, we generate a random seed based on global seed to make
operation's randomness different and depend on the global seed.
Returns:
A fake operation seed based on global seed.
"""
return self._rng.randint(0, _MAXINT32)
def _initialize_logical_devices(self):
"""Helper to initialize devices."""
# Store list of devices
logical_devices = []
context_devices = []
device_list = pywrap_tfe.TFE_ContextListDevices(self._context_handle)
try:
self._num_gpus = 0
for i in range(pywrap_tfe.TF_DeviceListCount(device_list)):
dev_name = pywrap_tfe.TF_DeviceListName(device_list, i)
context_devices.append(pydev.canonical_name(dev_name))
spec = pydev.DeviceSpec.from_string(dev_name)
# If the job is localhost, we assume that the cluster has not yet been
# configured and thus clear the job, replica & task.
if spec.job == "localhost":
spec = spec.replace(job=None, replica=None, task=None)
logical_devices.append(
LogicalDevice(name=spec.to_string(), device_type=spec.device_type))
dev_type = pywrap_tfe.TF_DeviceListType(device_list, i)
if dev_type == "GPU":
self._num_gpus += 1
finally:
self._logical_devices = logical_devices
self._context_devices = context_devices
pywrap_tfe.TF_DeleteDeviceList(device_list)
def ensure_initialized(self):
"""Initialize handle and devices if not already done so."""
if self._initialized:
return
with self._initialize_lock:
if self._initialized:
return
assert self._context_devices is None
opts = pywrap_tfe.TFE_NewContextOptions()
try:
config_str = self.config.SerializeToString()
pywrap_tfe.TFE_ContextOptionsSetConfig(opts, config_str)
if self._device_policy is not None:
pywrap_tfe.TFE_ContextOptionsSetDevicePlacementPolicy(
opts, self._device_policy)
if self._mirroring_policy is not None:
pywrap_tfe.TFE_ContextOptionsSetMirroringPolicy(
opts, self._mirroring_policy)
if self._default_is_async == ASYNC:
pywrap_tfe.TFE_ContextOptionsSetAsync(opts, True)
if self._lazy_remote_inputs_copy is not None:
pywrap_tfe.TFE_ContextOptionsSetLazyRemoteInputsCopy(
opts, self._lazy_remote_inputs_copy)
context_handle = pywrap_tfe.TFE_NewContext(opts)
finally:
pywrap_tfe.TFE_DeleteContextOptions(opts)
assert not (self._server_def and self._collective_ops_server_def), (
"Cannot enable remote execution as well as collective ops at the "
"moment. If this is important to you, please file an issue.")
if self._server_def is not None:
server_def_str = self._server_def.SerializeToString()
pywrap_tfe.TFE_ContextSetServerDef(context_handle, _KEEP_ALIVE_SECS,
server_def_str)
elif self._collective_ops_server_def is not None:
server_def_str = self._collective_ops_server_def.SerializeToString()
pywrap_tfe.TFE_EnableCollectiveOps(context_handle, server_def_str)
self._context_handle = context_handle
self._initialize_logical_devices()
self._initialized = True
def _clear_caches(self):
self.ones_rank_cache().flush()
self.zeros_cache().flush()
pywrap_tfe.TFE_ClearScalarCache()
def get_server_def(self):
return self._server_def
def set_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS):
"""Allow setting a server_def on the context.
When a server def is replaced, it effectively clears a bunch of caches
within the context. If you attempt to use a tensor object that was pointing
to a tensor on the remote device, it will raise an error.
Args:
server_def: A tensorflow::ServerDef proto.
Enables execution on remote devices.
keep_alive_secs: Num. seconds after which the remote end will hang up.
As long as the client is still alive, the server state for the context
will be kept alive. If the client is killed (or there is some failure),
the server will clean up its context keep_alive_secs after the final RPC
it receives.
Raises:
ValueError: if server_def is None.
"""
if not server_def:
raise ValueError("server_def is None.")
self._server_def = server_def
if self._context_handle:
server_def_str = server_def.SerializeToString()
pywrap_tfe.TFE_ContextSetServerDef(self._context_handle, keep_alive_secs,
server_def_str)
self._initialize_logical_devices()
# Clear all the caches in case there are remote tensors in them.
self._clear_caches()
def update_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS):
"""Update a server_def on the context.
Args:
server_def: A tensorflow::ServerDef proto. Enables execution on remote
devices.
keep_alive_secs: Num. seconds after which the remote end will hang up. As
long as the client is still alive, the server state for the context will
be kept alive. If the client is killed (or there is some failure), the
server will clean up its context keep_alive_secs after the final RPC it
receives.
Raises:
ValueError: if server_def is None.
"""
if not server_def:
raise ValueError("server_def is None.")
self._server_def = server_def
if self._context_handle:
server_def_str = server_def.SerializeToString()
pywrap_tfe.TFE_ContextUpdateServerDef(self._context_handle,
keep_alive_secs, server_def_str)
self._initialize_logical_devices()
self._clear_caches()
def check_alive(self, worker_name):
"""Checks whether a remote worker is alive or not.
Args:
worker_name: a string representing the remote worker. It must be a fully
specified name like "/job:worker/replica:0/task:0".
Returns:
a boolean indicating whether the remote worker is alive or not.
Raises:
ValueError: if context is not initialized.
"""
# TODO(yuefengz): support checking multiple workers.
if self._context_handle:
return pywrap_tfe.TFE_ContextCheckAlive(self._context_handle, worker_name)
else:
raise ValueError("Context is not initialized.")
def enable_collective_ops(self, server_def):
"""Enable distributed collective ops with an appropriate server_def.
Args:
server_def: A tensorflow::ServerDef proto. Enables execution on remote
devices.
Raises:
ValueError: if server_def is None.
RuntimeError: if this method is not called at program startup.
"""
if not server_def:
raise ValueError("server_def is None.")
# TODO(b/129298253): Allow creating datasets/tensors before enabling
# collective ops.
if self._context_handle is not None:
logging.warning("Enabling collective ops after program startup may cause "
"error when accessing previously created tensors.")
self._collective_ops_server_def = server_def
def configure_collective_ops(
self,
collective_leader="",
scoped_allocator_enabled_ops=("CollectiveReduce",),
use_nccl_communication=False,
device_filters=None):
"""Configure collective ops.
Collective group leader is necessary for collective ops to run, other
configurations are mainly for the purpose of performance.
Args:
collective_leader: a device string for collective leader, e.g.
"/job:worker/replica:0/task:0"; empty string means local execution of
collective ops.
scoped_allocator_enabled_ops: a tuple or a list of op names for scoped
allocator to run with.
use_nccl_communication: whether to use nccl communication for collective
ops.
device_filters: a tuple or a list of device strings. If set, corresponding
task can only see the devices filtered by these device filters.
Raises:
RuntimeError: if this method is not called at program startup.
"""
if self._collective_leader is not None:
if (self._collective_leader != collective_leader or
self._collective_scoped_allocator_enabled_ops !=
scoped_allocator_enabled_ops or
self._collective_use_nccl_communication != use_nccl_communication or
self._collective_device_filters != device_filters):
raise ValueError("Collective ops are already configured.")
else:
return
if self._context_handle is not None:
raise RuntimeError("Collective ops must be configured at program startup")
self._collective_leader = collective_leader
self._collective_scoped_allocator_enabled_ops = scoped_allocator_enabled_ops
self._collective_use_nccl_communication = use_nccl_communication
self._collective_device_filters = device_filters
@property
def _handle(self):
if self._context_handle is None:
raise AssertionError("Context must be initialized first.")
return self._context_handle
@property
def _devices(self):
if self._context_devices is None:
raise AssertionError("Context must be initialized first.")
return self._context_devices
def __str__(self):
if self._context_handle is None:
return "Eager TensorFlow Context. Devices currently uninitialized."
else:
devices = self._devices
lines = ["Eager TensorFlow Context with %d devices" % (len(devices))]
for i, d in enumerate(devices):
lines.append(" Device %d: %s" % (i, d))
return "\n".join(lines)
@tf_contextlib.contextmanager
def _mode(self, mode):
"""A context manager to allow setting the mode to EAGER/GRAPH."""
ctx = self._thread_local_data
old_is_eager = ctx.is_eager
ctx.is_eager = mode == EAGER_MODE
if mode == EAGER_MODE:
# Entering graph mode does not provide us with sufficient information to
# record a context switch; graph-based context switches are only logged
# when a graph is registered as the default graph.
self.context_switches.push(False, eager_mode, None)
try:
yield
finally:
ctx.is_eager = old_is_eager
if mode == EAGER_MODE:
self.context_switches.pop()
def executing_eagerly(self):
"""Returns True if current thread has eager executing enabled."""
return self._thread_local_data.is_eager
def ones_rank_cache(self):
"""Per-device cache for scalars."""
return _tensor_caches_map[self._id].ones_rank_cache
def zeros_cache(self):
"""Per-device cache for scalars."""
return _tensor_caches_map[self._id].zeros_cache
@property
def scope_name(self):
"""Returns scope name for the current thread."""
return self._thread_local_data.scope_name
@scope_name.setter
def scope_name(self, s):
"""Sets scope name for the current thread."""
self._thread_local_data.scope_name = s
@property
def device_name(self):
"""Returns the device name for the current thread."""
return self._thread_local_data.device_name
@property
def device_spec(self):
"""Returns the device spec for the current thread."""
return self._thread_local_data.device_spec
def _set_device(self, device_name, device_spec):
self._thread_local_data.device_name = device_name
self._thread_local_data.device_spec = device_spec
def device(self, name):
"""Context-manager to force placement of operations and Tensors on a device.
Args:
name: Name of the device or None to get default placement.
Returns:
Context manager that forces device placement.
Raises:
ValueError: If name is not a string or is an invalid device name.
RuntimeError: If device scopes are not properly nested.
"""
if isinstance(name, LogicalDevice):
name = name.name
elif pydev.is_device_spec(name):
name = name.to_string()
return _EagerDeviceContext(self, name)
def devices(self):
"""List of the names of devices available to execute operations."""
return self._devices
# TODO(fishx): remove this property.
@property
def execution_mode(self):
"""Gets execution mode for current thread."""
return ASYNC if self.is_async() else SYNC
@execution_mode.setter
def execution_mode(self, mode):
"""Sets execution mode for current thread."""
if mode not in (None, SYNC, ASYNC):
raise ValueError(
"Execution mode should be None/SYNC/ASYNC. Got %s" % mode)
if mode is None:
mode = SYNC
enable_async = (mode == ASYNC)
if self.is_async() != enable_async:
# Only set the execution mode if the context has already been initialized
if self._context_handle is not None:
self.executor.wait()
executor_new = executor.new_executor(enable_async)
self._thread_local_data.executor = executor_new
pywrap_tfe.TFE_ContextSetExecutorForThread(self._context_handle,
executor_new.handle())
else:
self._default_is_async = enable_async
def is_async(self):
if self._context_handle is not None:
return self.executor.is_async()
else:
return self._default_is_async
@property
def executor(self):
ensure_initialized()
return executor.Executor(
pywrap_tfe.TFE_ContextGetExecutorForThread(self._context_handle))
@executor.setter
def executor(self, e):
ensure_initialized()
pywrap_tfe.TFE_ContextSetExecutorForThread(self._context_handle, e.handle())
@property
def config(self):
"""Return the ConfigProto with all runtime deltas applied."""
# Ensure physical devices have been discovered and config has been imported
self._initialize_physical_devices()
config = config_pb2.ConfigProto()
if self._config is not None:
config.CopyFrom(self._config)
if self._optimizer_jit is not None:
config.graph_options.optimizer_options.global_jit_level = (
config_pb2.OptimizerOptions.ON_1
if self._optimizer_jit else config_pb2.OptimizerOptions.OFF)
if self._intra_op_parallelism_threads is not None:
config.intra_op_parallelism_threads = self._intra_op_parallelism_threads
if self._inter_op_parallelism_threads is not None:
config.inter_op_parallelism_threads = self._inter_op_parallelism_threads
if self._soft_device_placement is not None:
config.allow_soft_placement = self._soft_device_placement
else:
config.allow_soft_placement = self.executing_eagerly()
if self._log_device_placement is not None:
config.log_device_placement = self._log_device_placement
if self._enable_mlir_bridge is not None:
config.experimental.enable_mlir_bridge = self._enable_mlir_bridge
def rewriter_toggle(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options,
option,
(rewriter_config_pb2.RewriterConfig.ON
if toggle else rewriter_config_pb2.RewriterConfig.OFF))
def rewriter_bool(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options,
option,
toggle)
rewriter_toggle("layout_optimizer")
rewriter_toggle("constant_folding")
rewriter_toggle("shape_optimization")
rewriter_toggle("remapping")
rewriter_toggle("arithmetic_optimization")
rewriter_toggle("dependency_optimization")
rewriter_toggle("loop_optimization")
rewriter_toggle("function_optimization")
rewriter_toggle("debug_stripper")
rewriter_bool("disable_model_pruning")
rewriter_toggle("scoped_allocator_optimization")
rewriter_toggle("pin_to_host_optimization")
rewriter_toggle("implementation_selector")
rewriter_toggle("auto_mixed_precision")
rewriter_bool("disable_meta_optimizer")
nodes = self._optimizer_experimental_options.get("min_graph_nodes", None)
if nodes is not None:
config.graph_options.rewrite_options.min_graph_nodes = nodes
# Compute device counts
config.device_count["CPU"] = 0
config.device_count["GPU"] = 0
for dev in self._physical_devices:
if dev not in self._visible_device_list:
continue
virtual_devices = self._virtual_device_map.get(dev)
if virtual_devices is None:
config.device_count[dev.device_type] += 1
else:
config.device_count[dev.device_type] += len(virtual_devices)
# Configure gpu_options
gpu_options = self._compute_gpu_options()
config.gpu_options.MergeFrom(gpu_options)
# Configure collective ops
if self._collective_leader:
config.experimental.collective_group_leader = self._collective_leader
if self._collective_scoped_allocator_enabled_ops:
rewrite_options = config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
del rewrite_options.scoped_allocator_opts.enable_op[:]
for op in self._collective_scoped_allocator_enabled_ops:
rewrite_options.scoped_allocator_opts.enable_op.append(op)
if self._collective_use_nccl_communication:
config.experimental.collective_nccl = True
if self._collective_device_filters:
del config.device_filters[:]
for f in self._collective_device_filters:
config.device_filters.append(f)
return config
def _compute_gpu_options(self):
"""Build the GPUOptions proto."""
visible_device_list = []
virtual_devices = []
gpu_index = -1
memory_growths = set()
for dev in self.list_physical_devices("GPU"):
gpu_index += 1
if dev not in self._visible_device_list:
continue
growth = self._memory_growth_map[dev]
memory_growths.add(growth)
visible_device_list.append(str(gpu_index))
if self._virtual_device_map:
vdevs = self._virtual_device_map.get(dev, [])
device_limits = []
for virt_dev in vdevs:
device_limits.append(virt_dev.memory_limit)
virtual_devices.append(
config_pb2.GPUOptions.Experimental.VirtualDevices(
memory_limit_mb=device_limits))
# Only compute growth if virtual devices have not been configured and we
# have GPUs
if not virtual_devices and memory_growths:
if len(memory_growths) > 1:
raise ValueError("Memory growth cannot differ between GPU devices")
allow_growth = memory_growths.pop()
else:
allow_growth = None
return config_pb2.GPUOptions(
allow_growth=allow_growth,
visible_device_list=",".join(visible_device_list),
experimental=config_pb2.GPUOptions.Experimental(
virtual_devices=virtual_devices))
@property
def function_call_options(self):
"""Returns function call options for current thread.
Note that the returned object is still referenced by the eager context.
Returns: the FunctionCallOptions for current thread.
"""
if self._thread_local_data.function_call_options is None:
config = self.config
# Default to soft placement for functions unless specified
if self._soft_device_placement is None:
config.allow_soft_placement = True
self._thread_local_data.function_call_options = FunctionCallOptions(
config_proto=config)
return self._thread_local_data.function_call_options
@function_call_options.setter
def function_call_options(self, options):
"""Returns function call options for current thread."""
self._thread_local_data.function_call_options = options
def num_gpus(self):
"""The number of GPUs available to execute operations."""
self.ensure_initialized()
return self._num_gpus
def add_function(self, fn):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fn: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextAddFunction(self._handle, fn)
def add_function_def(self, fdef):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fdef: A FunctionDef protocol buffer message.
"""
self.ensure_initialized()
fdef_string = fdef.SerializeToString()
pywrap_tfe.TFE_ContextAddFunctionDef(self._handle, fdef_string,
len(fdef_string))
def remove_function(self, name):
"""Remove a function from the context.
Once removed, the function cannot be executed anymore.
Args:
name: function signature name.
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextRemoveFunction(self._handle, name)
def has_function(self, name):
"""Check if a function `name` is registered."""
self.ensure_initialized()
return bool(pywrap_tfe.TFE_ContextHasFunction(self._handle, name))
def add_op_callback(self, callback):
"""Add a post-op callback to the context.
A post-op callback is invoked immediately after an eager operation or
function has finished execution or after a op has been added to a graph,
providing access to the op's type, name input and output tensors. Multiple
op callbacks can be added, in which case the callbacks will be invoked in
the order in which they are added.
Args:
callback: a callable of the signature
`f(op_type, inputs, attrs, outputs, op_name=None, graph=None)`.
See doc strings in `op_callbacks.py` for details on the function
signature and its semantics.
"""
if callback not in self._thread_local_data.op_callbacks:
self._thread_local_data.op_callbacks.append(callback)
def remove_op_callback(self, callback):
"""Remove an already-registered op callback.
Args:
callback: The op callback to be removed.
Raises:
KeyError: If `callback` is not already registered.
"""
if callback not in self._thread_local_data.op_callbacks:
raise KeyError(
"The specified op callback has not been registered, "
"and hence cannot be removed.")
del self._thread_local_data.op_callbacks[
self._thread_local_data.op_callbacks.index(callback)]
@property
def op_callbacks(self):
return self._thread_local_data.op_callbacks
@property
def invoking_op_callbacks(self):
return self._thread_local_data.invoking_op_callbacks
@invoking_op_callbacks.setter
def invoking_op_callbacks(self, value):
self._thread_local_data.invoking_op_callbacks = value
def _initialize_physical_devices(self):
"""Get local devices visible to the system."""
# We lazy initialize self._physical_devices since we do not want to do this
# the constructor since the backend may not be initialized yet.
with self._device_lock:
if self._physical_devices is not None:
return
devs = pywrap_tfe.TF_ListPhysicalDevices()
self._physical_devices = [
PhysicalDevice(name=d.decode(),
device_type=d.decode().split(":")[1]) for d in devs]
# Construct the visible device list from all physical devices but ignore
# XLA devices
self._visible_device_list = [
d for d in self._physical_devices
if not d.device_type.startswith("XLA")
]
self._memory_growth_map = {
d: None for d in self._physical_devices if d.device_type == "GPU"
}
# Import device settings that may have been passed into the constructor
self._import_config()
def list_physical_devices(self, device_type=None):
"""List local devices visible to the system.
This API allows a client to query the devices before they have been
initialized by the eager runtime. Additionally a user can filter by device
type, to get only CPUs or GPUs.
Args:
device_type: Optional device type to limit results to
Returns:
List of PhysicalDevice objects.
"""
self._initialize_physical_devices()
if device_type is None:
return list(self._physical_devices)
return [d for d in self._physical_devices if d.device_type == device_type]
def _import_config(self):
"""Import config if passed in during construction.
If Context was created with a ConfigProto such as when calling
tf.compat.v1.enable_eager_execution(), then we need to pull out the
various pieces we might be replacing and import then into our internal
class representation.
"""
if self._config is None:
return
num_cpus = self._config.device_count.get("CPU", 1)
if num_cpus != 1:
cpus = [d for d in self._physical_devices if d.device_type == "CPU"]
if num_cpus == 0:
self.set_visible_devices([], "CPU")
elif num_cpus > 1:
self.set_logical_device_configuration(
cpus[0], [LogicalDeviceConfiguration() for _ in range(num_cpus)])
# Parse GPU options
gpus = [d for d in self._physical_devices if d.device_type == "GPU"]
# If there are no GPUs detected, simply ignore all the GPU options passed in
# rather than doing any validation checks.
if not gpus:
return
gpu_count = self._config.device_count.get("GPU", None)
visible_gpus = []
# TODO(gjn): Handle importing existing virtual GPU configuration
visible_indices = self._config.gpu_options.visible_device_list
if visible_indices:
for index in visible_indices.split(","):
if int(index) >= len(gpus):
raise ValueError("Invalid visible device index: %s" % index)
visible_gpus.append(gpus[int(index)])
else:
visible_gpus = gpus
if gpu_count is not None:
visible_gpus = visible_gpus[:gpu_count]
self.set_visible_devices(visible_gpus, "GPU")
def list_logical_devices(self, device_type=None):
"""Return logical devices."""
self.ensure_initialized()
if device_type is None:
return list(self._logical_devices)
return [d for d in self._logical_devices if d.device_type == device_type]
def get_visible_devices(self, device_type=None):
"""Get the list of visible devices."""
self._initialize_physical_devices()
if device_type is None:
return list(self._visible_device_list)
return [
d for d in self._visible_device_list if d.device_type == device_type
]
def set_visible_devices(self, devices, device_type=None):
"""Set the list of visible devices."""
self._initialize_physical_devices()
if not isinstance(devices, list):
devices = [devices]
for d in devices:
if d not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(d))
if device_type is not None and d.device_type != device_type:
raise ValueError("Unrecognized device: %s" % repr(d))
visible_device_list = []
if device_type is not None:
visible_device_list = [
d for d in self._visible_device_list if d.device_type != device_type
]
visible_device_list += devices
if self._visible_device_list == visible_device_list:
return
if self._context_handle is not None:
raise RuntimeError(
"Visible devices cannot be modified after being initialized")
self._visible_device_list = visible_device_list
def get_memory_growth(self, dev):
"""Get if memory growth is enabled for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
return self._memory_growth_map[dev]
def set_memory_growth(self, dev, enable):
"""Set if memory growth should be enabled for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
if dev in self._virtual_device_map:
raise ValueError(
"Cannot set memory growth on device when virtual devices configured")
if dev.device_type != "GPU":
raise ValueError("Cannot set memory growth on non-GPU devices")
if self._memory_growth_map.get(dev) == enable:
return
if self._context_handle is not None:
raise RuntimeError(
"Physical devices cannot be modified after being initialized")
self._memory_growth_map[dev] = enable
def get_logical_device_configuration(self, dev):
"""Get the virtual device configuration for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
return self._virtual_device_map.get(dev)
def set_logical_device_configuration(self, dev, virtual_devices):
"""Set the virtual device configuration for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
if dev.device_type == "CPU":
for vdev in virtual_devices:
if vdev.memory_limit is not None:
raise ValueError("Setting memory limit on CPU virtual devices is "
"currently not supported")
elif dev.device_type == "GPU":
for vdev in virtual_devices:
if vdev.memory_limit is None:
raise ValueError(
"Setting memory limit is required for GPU virtual devices")
else:
raise ValueError("Virtual devices are not supported for %s" %
dev.device_type)
if self._virtual_device_map.get(dev) == virtual_devices:
return
if self._context_handle is not None:
raise RuntimeError(
"Virtual devices cannot be modified after being initialized")
self._virtual_device_map[dev] = virtual_devices
@property
def enable_mlir_bridge(self):
return self._enable_mlir_bridge
@enable_mlir_bridge.setter
def enable_mlir_bridge(self, enabled):
self._enable_mlir_bridge = enabled
self._thread_local_data.function_call_options = None
@property
def optimizer_jit(self):
level = self.config.graph_options.optimizer_options.global_jit_level
return (level == config_pb2.OptimizerOptions.ON_1 or
level == config_pb2.OptimizerOptions.ON_2)
@optimizer_jit.setter
def optimizer_jit(self, enabled):
self._optimizer_jit = enabled
self._thread_local_data.function_call_options = None
def get_optimizer_experimental_options(self):
"""Get experimental options for the optimizer.
Returns:
Dictionary of current option values
"""
rewrite_options = self.config.graph_options.rewrite_options
options = {}
def rewriter_toggle(option):
attr = getattr(rewrite_options, option)
if attr != 0:
options[option] = (attr == rewriter_config_pb2.RewriterConfig.ON)
def rewriter_bool(option):
options[option] = getattr(rewrite_options, option)
rewriter_toggle("layout_optimizer")
rewriter_toggle("constant_folding")
rewriter_toggle("shape_optimization")
rewriter_toggle("remapping")
rewriter_toggle("arithmetic_optimization")
rewriter_toggle("dependency_optimization")
rewriter_toggle("loop_optimization")
rewriter_toggle("function_optimization")
rewriter_toggle("debug_stripper")
rewriter_bool("disable_model_pruning")
rewriter_toggle("scoped_allocator_optimization")
rewriter_toggle("pin_to_host_optimization")
rewriter_toggle("implementation_selector")
rewriter_toggle("auto_mixed_precision")
rewriter_bool("disable_meta_optimizer")
if rewrite_options.min_graph_nodes != 0:
options["min_graph_nodes"] = rewrite_options.min_graph_nodes
return options
def set_optimizer_experimental_options(self, options):
"""Set experimental options for the optimizer.
Args:
options: Dictionary of options to modify
"""
self._optimizer_experimental_options.update(options)
self._thread_local_data.function_call_options = None
@property
def intra_op_parallelism_threads(self):
return self.config.intra_op_parallelism_threads
@intra_op_parallelism_threads.setter
def intra_op_parallelism_threads(self, num_threads):
if self._intra_op_parallelism_threads == num_threads:
return
if self._context_handle is not None:
raise RuntimeError(
"Intra op parallelism cannot be modified after initialization.")
self._intra_op_parallelism_threads = num_threads
@property
def inter_op_parallelism_threads(self):
return self.config.inter_op_parallelism_threads
@inter_op_parallelism_threads.setter
def inter_op_parallelism_threads(self, num_threads):
if self._inter_op_parallelism_threads == num_threads:
return
if self._context_handle is not None:
raise RuntimeError(
"Inter op parallelism cannot be modified after initialization.")
self._inter_op_parallelism_threads = num_threads
@property
def soft_device_placement(self):
return self.config.allow_soft_placement
@soft_device_placement.setter
def soft_device_placement(self, enabled):
self._soft_device_placement = enabled
self._thread_local_data.function_call_options = None
@property
def log_device_placement(self):
return self.config.log_device_placement
@log_device_placement.setter
def log_device_placement(self, enabled):
if self._log_device_placement == enabled:
return
if self._context_handle is not None:
raise RuntimeError(
"Device placement logging must be set at program startup")
self._log_device_placement = enabled
self._thread_local_data.function_call_options = None
@property
def device_policy(self):
# Only get the policy from the context if it has already been initialized
if self._context_handle is not None:
return pywrap_tfe.TFE_ContextGetDevicePlacementPolicy(self._handle)
return self._device_policy
@device_policy.setter
def device_policy(self, policy):
if policy is None:
policy = DEVICE_PLACEMENT_SILENT
if self._device_policy != policy:
self._device_policy = policy
# Only set the policy if the context has already been initialized
if self._context_handle is not None:
pywrap_tfe.TFE_ContextSetThreadLocalDevicePlacementPolicy(
self._handle, self._device_policy)
@property
def mirroring_policy(self):
# Only get the policy from the context if it has already been initialized
if self._context_handle is not None:
return pywrap_tfe.TFE_ContextGetMirroringPolicy(self._handle)
return self._mirroring_policy
@mirroring_policy.setter
def mirroring_policy(self, policy):
if policy is None:
policy = MIRRORING_NONE
if self._mirroring_policy is None or self._mirroring_policy != policy:
self._mirroring_policy = policy
# Only set the policy if the context has already been initialized
if self._context_handle is not None:
pywrap_tfe.TFE_ContextSetThreadLocalMirroringPolicy(
self._handle, self._mirroring_policy)
@property
def lazy_remote_inputs_copy(self):
return self._lazy_remote_inputs_copy
@lazy_remote_inputs_copy.setter
def lazy_remote_inputs_copy(self, lazy_copy):
"""Sets whether to copy remote inputs lazily for functions."""
if not isinstance(lazy_copy, bool):
raise ValueError("Expecting a boolean but got %s" % type(lazy_copy))
if self._lazy_remote_inputs_copy != lazy_copy:
if self._initialized:
raise ValueError(
"lazy_remote_inputs_copy should be set before being initialized.")
self._lazy_remote_inputs_copy = lazy_copy
def enable_run_metadata(self):
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextEnableRunMetadata(self._handle)
def disable_run_metadata(self):
"""Disables tracing of op execution via RunMetadata."""
if not self._context_handle:
return
pywrap_tfe.TFE_ContextDisableRunMetadata(self._context_handle)
def enable_graph_collection(self):
"""Enables graph collection of executed functions.
To retrieve the accumulated graphs call context.export_run_metadata()
and to stop collecting graphs call context.disable_graph_collection().
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextEnableGraphCollection(self._handle)
def disable_graph_collection(self):
"""Disables graph collection of executed functions."""
if not self._context_handle:
return
pywrap_tfe.TFE_ContextDisableGraphCollection(self._context_handle)
def export_run_metadata(self):
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer. Or None if not enabled.
"""
if not self._context_handle:
return None
with c_api_util.tf_buffer() as buffer_:
pywrap_tfe.TFE_ContextExportRunMetadata(self._context_handle, buffer_)
proto_data = pywrap_tfe.TF_GetBuffer(buffer_)
run_metadata = config_pb2.RunMetadata()
run_metadata.ParseFromString(compat.as_bytes(proto_data))
return run_metadata
@property
def context_switches(self):
"""Returns a stack of context switches."""
return self._context_switches
def start_step(self):
pywrap_tfe.TFE_ContextStartStep(self._handle)
def end_step(self):
pywrap_tfe.TFE_ContextEndStep(self._handle)
class _EagerDeviceContext(object):
"""Context-manager forcing placement of ops and Tensors on a device."""
def __init__(self, ctx, device_name):
self._device_name = device_name
self._ctx = ctx
self._stack = []
def __enter__(self):
ctx = self._ctx
old_device_name = ctx.device_name
old_device_spec = ctx.device_spec
new_device_name = self._device_name
cache_key = (old_device_name, new_device_name)
try:
new_device_name, new_device_spec = _device_parsing_cache[cache_key]
except TypeError:
# Error while trying to compute the cache key.
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(new_device_name), new_device_name))
except KeyError:
# Handle a cache miss.
if new_device_name is not None:
if not isinstance(new_device_name, six.string_types):
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(new_device_name), new_device_name))
device_spec = pydev.DeviceSpec.from_string(new_device_name)
if old_device_name:
new_device_spec = copy.copy(old_device_spec)
else:
ctx.ensure_initialized()
new_device_spec = pydev.DeviceSpec.from_string(
ctx._context_devices[0]) # pylint: disable=protected-access
new_device_spec = new_device_spec.make_merged_spec(device_spec)
else:
new_device_spec = pydev.DeviceSpec.from_string("")
new_device_name = new_device_spec.to_string()
_device_parsing_cache[cache_key] = (new_device_name, new_device_spec)
ctx._set_device(new_device_name, new_device_spec) # pylint: disable=protected-access
self._stack.append((old_device_name, old_device_spec, new_device_spec))
def __exit__(self, *ex_info):
ctx = self._ctx
old_device_name, old_device_spec, new_device_spec = self._stack[-1]
if ctx.device_spec is not new_device_spec:
raise RuntimeError(
"Exiting device scope without proper scope nesting")
del self._stack[-1]
ctx._set_device(old_device_name, old_device_spec) # pylint: disable=protected-access
# Do not set directly. Use _set_context.
_context = None
_context_lock = threading.Lock()
def _set_context_locked(ctx):
global _context
pywrap_tfe.TFE_Py_SetEagerContext(ctx)
_context = ctx
def _set_context(ctx):
with _context_lock:
_set_context_locked(ctx)
def _create_context():
with _context_lock:
if _context is None:
ctx = Context()
_set_context_locked(ctx)
def _reset_context():
"""Clears and re-initializes the singleton context.
Should only be used for testing.
"""
global _context
with _context_lock:
if _context is not None:
_context = None
_create_context()
def context():
"""Returns a singleton context object."""
if _context is None:
_create_context()
return _context
def context_safe():
"""Returns current context (or None if one hasn't been initialized)."""
return _context
def ensure_initialized():
"""Initialize the context."""
context().ensure_initialized()
def set_global_seed(seed):
"""Sets the eager mode seed."""
context()._set_global_seed(seed) # pylint: disable=protected-access
def global_seed():
"""Returns the eager mode seed."""
return context()._seed # pylint: disable=protected-access
def internal_operation_seed():
"""Returns the operation seed generated based on global seed."""
return context()._internal_operation_seed() # pylint: disable=protected-access
@tf_export("executing_eagerly", v1=[])
def executing_eagerly():
"""Checks whether the current thread has eager execution enabled.
Eager execution is enabled by default and this API returns `True`
in most of cases. However, this API might return `False` in the following use
cases.
* Executing inside `tf.function`, unless under `tf.init_scope` or
`tf.config.experimental_run_functions_eagerly(True)` is previously called.
* Executing inside a transformation function for `tf.dataset`.
* `tf.compat.v1.disable_eager_execution()` is called.
General case:
>>> print(tf.executing_eagerly())
True
Inside `tf.function`:
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
False
Inside `tf.function` after
`tf.config.experimental_run_functions_eagerly(True)` is called:
>>> tf.config.experimental_run_functions_eagerly(True)
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
True
>>> tf.config.experimental_run_functions_eagerly(False)
Inside a transformation function for `tf.dataset`:
>>> def data_fn(x):
... print(tf.executing_eagerly())
... return x
>>> dataset = tf.data.Dataset.range(100)
>>> dataset = dataset.map(data_fn)
False
Returns:
`True` if the current thread has eager execution enabled.
"""
ctx = context_safe()
if ctx is None:
return default_execution_mode == EAGER_MODE
return ctx.executing_eagerly()
@tf_export(v1=["executing_eagerly"])
def executing_eagerly_v1():
"""Checks whether the current thread has eager execution enabled.
Eager execution is typically enabled via
`tf.compat.v1.enable_eager_execution`, but may also be enabled within the
context of a Python function via tf.contrib.eager.py_func.
When eager execution is enabled, returns `True` in most cases. However,
this API might return `False` in the following use cases.
* Executing inside `tf.function`, unless under `tf.init_scope` or
`tf.config.experimental_run_functions_eagerly(True)` is previously called.
* Executing inside a transformation function for `tf.dataset`.
* `tf.compat.v1.disable_eager_execution()` is called.
>>> tf.compat.v1.enable_eager_execution()
General case:
>>> print(tf.executing_eagerly())
True
Inside `tf.function`:
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
False
Inside `tf.function`
after `tf.config.experimental_run_functions_eagerly(True)` is called:
>>> tf.config.experimental_run_functions_eagerly(True)
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
True
>>> tf.config.experimental_run_functions_eagerly(False)
Inside a transformation function for `tf.dataset`:
>>> def data_fn(x):
... print(tf.executing_eagerly())
... return x
>>> dataset = tf.data.Dataset.range(100)
>>> dataset = dataset.map(data_fn)
False
Returns:
`True` if the current thread has eager execution enabled.
"""
return executing_eagerly()
def in_eager_mode():
"""Use executing_eagerly() instead. This function will be removed."""
return executing_eagerly()
def shared_name(name=None):
"""Returns the anonymous shared name GUID if no shared name is specified.
In eager mode we need to use a unique shared name to avoid spurious sharing
issues. The runtime generates a unique name on our behalf when the reserved
GUID is used as a shared name.
Args:
name: Optional shared name
Returns:
Eager compatible shared name.
"""
if name or not executing_eagerly():
return name
# Ensure a unique name when eager execution is enabled to avoid spurious
# sharing issues.
return "cd2c89b7-88b7-44c8-ad83-06c2a9158347"
def graph_mode():
"""Context-manager to disable eager execution for the current thread."""
return context()._mode(GRAPH_MODE) # pylint: disable=protected-access
def eager_mode():
"""Context-manager to enable eager execution for the current thread."""
return context()._mode(EAGER_MODE) # pylint: disable=protected-access
def scope_name():
"""Name of the current scope."""
return context().scope_name
def device(name):
"""Context-manager to force placement of operations and Tensors on a device.
Example:
```python
with tf.device('gpu:0'):
with tf.device('cpu:0'):
shape = tf.constant([], dtype=tf.int32)
x = tf.random.truncated_normal(shape, tf.float32)
```
will ensure that the `shape` Tensor is on CPU but the `truncated_normal`
operation runs on GPU 0.
Args:
name: Name of the device (see context().devices()), or None to
perform automatic placement.
Returns:
Context manager for setting the device.
"""
ensure_initialized()
return context().device(name)
@tf_export("debugging.get_log_device_placement")
def get_log_device_placement():
"""Get if device placements are logged.
Returns:
If device placements are logged.
"""
return context().log_device_placement
@tf_export("debugging.set_log_device_placement")
def set_log_device_placement(enabled):
"""Set if device placements should be logged.
Args:
enabled: Whether to enabled device placement logging.
"""
context().log_device_placement = enabled
@tf_contextlib.contextmanager
def device_policy(policy):
"""Context manager for setting device placement policy for current thread."""
ctx = context()
old_policy = ctx.device_policy
try:
ctx.device_policy = policy
yield
finally:
ctx.device_policy = old_policy
@tf_contextlib.contextmanager
def mirroring_policy(policy):
"""Context manager for setting mirroring policy for current thread."""
ctx = context()
old_policy = ctx.mirroring_policy
try:
ctx.mirroring_policy = policy
yield
finally:
ctx.mirroring_policy = old_policy
def set_execution_mode(mode):
"""Sets execution mode for the current thread."""
context().execution_mode = mode
# TODO(fishx): remove this method.
@tf_contextlib.contextmanager
def execution_mode(mode):
"""Context manager for setting execution mode for current thread."""
if mode is None:
yield
else:
ctx = context()
executor_new = executor.new_executor(mode == ASYNC)
executor_old = ctx.executor
try:
executor_old.wait()
ctx.executor = executor_new
yield
finally:
ctx.executor = executor_old
executor_new.wait()
@tf_contextlib.contextmanager
def executor_scope(e):
"""Context manager for changing executor for current thread.
Args:
e: A Executor to execute eager ops under this scope. Setting it to None will
switch back to use the default executor for the context.
Yields:
Context manager for setting the executor for current thread.
"""
ctx = context()
executor_old = ctx.executor
try:
ctx.executor = e
yield
finally:
ctx.executor = executor_old
@tf_export("experimental.function_executor_type")
@tf_contextlib.contextmanager
def function_executor_type(executor_type):
"""Context manager for setting the executor of eager defined functions.
Eager defined functions are functions decorated by tf.contrib.eager.defun.
Args:
executor_type: a string for the name of the executor to be used to execute
functions defined by tf.contrib.eager.defun.
Yields:
Context manager for setting the executor of eager defined functions.
"""
current_options = context().function_call_options
old_options = copy.copy(current_options)
try:
current_options.executor_type = executor_type
yield
finally:
context().function_call_options = old_options
def is_async():
"""Returns true if current thread is in async mode."""
return context().is_async()
def async_wait():
"""Waits for ops dispatched in ASYNC mode to finish."""
return context().executor.wait()
def async_clear_error():
"""Clears errors raised during ASYNC execution mode."""
return context().executor.clear_error()
def num_gpus():
"""Get the number of available GPU devices.
Returns:
The number of available GPU devices.
"""
return context().num_gpus()
def enable_run_metadata():
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
context().enable_run_metadata()
def disable_run_metadata():
"""Disables tracing of op execution via RunMetadata."""
context().disable_run_metadata()
def enable_graph_collection():
"""Enables graph collection of executed functions.
To retrieve the accumulated graphs call context.export_run_metadata()
and to stop collecting graphs call context.disable_graph_collection().
"""
context().enable_graph_collection()
def disable_graph_collection():
"""Disables graph collection of executed functions."""
context().disable_graph_collection()
def export_run_metadata():
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer.
"""
return context().export_run_metadata()
@contextlib.contextmanager
def collect_optimized_graphs():
"""Collects a flat list of post-optimization graphs.
The collected graphs include device placements, which can be useful for
testing.
Usage:
```
@def_function.function
def f(x):
return x + constant_op.constant(1.)
with context.collect_optimized_graphs() as graphs:
with ops.device("CPU:0"):
f(constant_op.constant(1.))
graph, = graphs # `graph` contains a single GraphDef for inspection
```
Yields:
A list of GraphDefs, populated when the context manager exits.
"""
ctx = context()
ctx.enable_graph_collection()
try:
graphs = []
yield graphs
metadata = ctx.export_run_metadata()
finally:
ctx.disable_graph_collection()
for graph in metadata.function_graphs:
graphs.append(graph.post_optimization_graph)
def get_server_def():
return context().get_server_def()
def set_server_def(server_def):
context().set_server_def(server_def)
def update_server_def(server_def):
context().update_server_def(server_def)
def check_alive(worker_name):
return context().check_alive(worker_name)
def add_function(fdef):
"""Add a function definition to the context."""
context().add_function(fdef)
def remove_function(name):
"""Remove a function from the context."""
context().remove_function(name)
# Not every user creates a Context via context.context()
# (for example, enable_eager_execution in python/framework/ops.py),
# but they do all import this file. Note that IS_IN_GRAPH_MODE and
# in_graph_mode are both parameterless functions.
def _tmp_in_graph_mode():
if context_safe() is None:
# Context not yet initialized. Assume graph mode following the
# default implementation in `is_in_graph_mode`.
return True
return not executing_eagerly()
is_in_graph_mode.IS_IN_GRAPH_MODE = _tmp_in_graph_mode
| 33.034204
| 89
| 0.719789
|
93b087456c44717bf74c6e9f9a7f13f4c4405411
| 2,603
|
py
|
Python
|
devicemanager.py
|
ToddFranks-TM/tinkerAccess
|
f875772f0cd452586d08d4c41c5390aed0f5aea6
|
[
"MIT"
] | 15
|
2016-01-29T21:52:58.000Z
|
2021-04-07T06:21:47.000Z
|
devicemanager.py
|
ToddFranks-TM/tinkerAccess
|
f875772f0cd452586d08d4c41c5390aed0f5aea6
|
[
"MIT"
] | 26
|
2015-07-18T00:04:25.000Z
|
2021-09-24T13:21:27.000Z
|
devicemanager.py
|
ToddFranks-TM/tinkerAccess
|
f875772f0cd452586d08d4c41c5390aed0f5aea6
|
[
"MIT"
] | 16
|
2015-07-18T00:12:04.000Z
|
2022-03-28T07:23:31.000Z
|
#!/usr/bin/python
import sqlite3
import optparse
import sys
parser = optparse.OptionParser()
parser.add_option("-a", "--add", default=False, help='add a device', dest="addDevice", action="store_true")
parser.add_option("-n", "--name", default=False, help='name of device', dest="deviceName", action="store")
parser.add_option("-i", "--id", default=False, help='id of device', dest="deviceId", action="store")
parser.add_option("-d", "--delete", default=False, help='delete device', dest="delDevice", action="store_true")
parser.add_option("-l", "--list", default=False , help='list devices', dest="deviceList", action="store_true")
parser.add_option("-r", "--rename", default=False , help='rename device id(-i) to name(-n)', dest="rename", action="store_true")
parser.add_option("-u", "--setallusers", default=False , help='set allUsers of device id(-i)', dest="setAllUsers", action="store_true")
parser.add_option("-c", "--clrallusers", default=False , help='clear allUsers of device id(-i)', dest="clrAllUsers", action="store_true")
(opts, args) = parser.parse_args()
db = sqlite3.connect('db.db')
if opts.deviceList:
cur = db.execute("select * from device")
print("{: >5} {: >30} {: >12}".format("ID", "Device Name", "All Users"))
for rec in cur.fetchall():
print("{: >5} {: >30} {: >12}".format(rec[0], rec[1], rec[2]))
if opts.addDevice:
if not opts.deviceName:
print("Required name use -n option")
sys.exit()
db.cursor().execute("insert into device (name, allUsers) values ('%s', 0)" % opts.deviceName )
db.commit()
if opts.delDevice:
if not opts.deviceId:
print("Required ID of device -i option")
sys.exit()
db.cursor().execute("delete from device where id=%s" % opts.deviceId)
db.cursor().execute("delete from deviceAccess where device=%s" % opts.deviceId)
db.commit()
if opts.rename:
if not opts.deviceId:
print("Required ID of device to rename -i option")
sys.exit()
if not opts.deviceName:
print("Required name to rename to -n option")
sys.exit
db.cursor().execute("update device set name='%s' where id=%s" % (opts.deviceName, opts.deviceId) )
db.commit()
if opts.setAllUsers:
if not opts.deviceId:
print("Required ID of device to set allUsers, need -i option")
sys.exit()
db.cursor().execute("update device set allUsers=1 where id=%s" % opts.deviceId)
db.commit()
if opts.clrAllUsers:
if not opts.deviceId:
print("Required ID of device to clear allUsers, need -i option")
sys.exit()
db.cursor().execute("update device set allUsers=0 where id=%s" % opts.deviceId)
db.commit()
| 38.279412
| 137
| 0.674606
|
b87ba305755efeb5865fd30577adc924bc13c6cc
| 2,736
|
py
|
Python
|
tests/pytorch_pfn_extras_tests/profiler_tests/test_time_summary.py
|
kmaehashi/pytorch-pfn-extras
|
70b5db0dad8a8e342cc231e8a18c6f32ce250d1c
|
[
"MIT"
] | null | null | null |
tests/pytorch_pfn_extras_tests/profiler_tests/test_time_summary.py
|
kmaehashi/pytorch-pfn-extras
|
70b5db0dad8a8e342cc231e8a18c6f32ce250d1c
|
[
"MIT"
] | null | null | null |
tests/pytorch_pfn_extras_tests/profiler_tests/test_time_summary.py
|
kmaehashi/pytorch-pfn-extras
|
70b5db0dad8a8e342cc231e8a18c6f32ce250d1c
|
[
"MIT"
] | null | null | null |
import multiprocessing as mp
import subprocess
import sys
import time
import pytest
from pytorch_pfn_extras.profiler import TimeSummary, time_summary
def test_report():
summary = TimeSummary()
with summary.report("foo"):
pass
summary.synchronize()
with summary.summary() as s:
assert "foo" in s[0].compute_mean()
assert "foo.min" in s[1]
assert "foo.max" in s[1]
summary.finalize()
def test_report_async():
summary = TimeSummary()
with summary.report("afoo") as notification:
notification.defer()
time.sleep(0.5)
# Explicitly call object completion
notification.complete()
summary.synchronize()
with summary.summary() as s:
stats = s[0].compute_mean()
assert "afoo" in stats
assert abs(0.5 - stats["afoo"]) < 2e-2
assert abs(0.5 - s[1]["afoo.min"]) < 2e-2
assert abs(0.5 - s[1]["afoo.max"]) < 2e-2
summary.finalize()
def worker(summary):
with summary.report("foo"):
pass
@pytest.mark.skipif(
sys.platform == 'win32',
reason='Multiprocessing not fully supported on Windows')
def test_report_from_other_process():
summary = TimeSummary()
p = mp.Process(target=worker, args=(summary,))
p.start()
p.join()
summary.synchronize()
with summary.summary() as s:
assert "foo" in s[0].compute_mean()
assert "foo.min" in s[1]
assert "foo.max" in s[1]
summary.finalize()
def worker1():
with time_summary.report("foo"):
pass
@pytest.mark.skipif(
sys.platform == 'win32',
reason='Multiprocessing not fully supported on Windows')
def test_global_summary():
time_summary.initialize()
p = mp.Process(target=worker1)
p.start()
p.join()
time_summary.synchronize()
with time_summary.summary() as s:
assert "foo" in s[0].compute_mean()
assert "foo.min" in s[1]
assert "foo.max" in s[1]
def test_clear():
summary = TimeSummary()
summary.add("foo", 10)
summary.add("foo", 5)
summary.add("foo", 15)
summary.synchronize()
with summary.summary(clear=True) as s:
assert s[0].compute_mean() == {"foo": 10}
assert s[1] == {"foo.min": 5, "foo.max": 15}
with summary.summary(clear=True) as s:
assert s[0].compute_mean() == {}
assert s[1] == {}
summary.finalize()
def test_multiprocessing_start_method():
# Ensure that importing PPE does not initialize multiprocessing context.
# See #238 for the context.
subprocess.check_call([
sys.executable,
'-c',
('import multiprocessing as mp; '
+ 'import pytorch_pfn_extras; '
+ 'mp.set_start_method("spawn"); ')
])
| 25.811321
| 76
| 0.621711
|
4677c0c3806d91a34a20159e9b5ec9e51d4e1cce
| 1,641
|
py
|
Python
|
iPokeMon/ipokemon/EdgeManager/terminater.py
|
yalhaizaey/Dreich
|
9528856c3879d4c9d3ced453f223785a71188808
|
[
"Apache-2.0"
] | 25
|
2019-05-09T19:03:37.000Z
|
2022-02-06T20:47:37.000Z
|
Experiments/iPokeMon/ipokemon/EdgeManager/terminater.py
|
jonathanmcchesney/DeFog
|
bc314d41471d00b9d605bb4519f31a465e0a6b75
|
[
"Apache-2.0"
] | null | null | null |
Experiments/iPokeMon/ipokemon/EdgeManager/terminater.py
|
jonathanmcchesney/DeFog
|
bc314d41471d00b9d605bb4519f31a465e0a6b75
|
[
"Apache-2.0"
] | 9
|
2019-08-19T19:00:41.000Z
|
2021-12-09T04:46:07.000Z
|
import os
import redis
def connect_redis(conn_dict):
conn = redis.StrictRedis(host=conn_dict['host'],
port=conn_dict['port'],
db=conn_dict['db'])
return conn
def conn_string_type(string):
format = '<host>:<port>/<db>'
try:
host, portdb = string.split(':')
port, db = portdb.split('/')
db = int(db)
except ValueError:
raise argparse.ArgumentTypeError('incorrect format, should be: %s' % format)
return {'host': host,
'port': port,
'db': db}
def migrate_redis(lxc):
src = connect_redis(conn_string_type(lxc['lxcIP']))
dst = connect_redis(conn_string_type(lxc['cloudIP']))
for key in src.keys('*'):
ttl = src.ttl(key)
# we handle TTL command returning -1 (no expire) or -2 (no key)
if ttl < 0:
ttl = 0
print "Dumping key: %s" % key
value = src.dump(key)
print "Restoring key: %s" % key
try:
dst.restore(key, ttl * 1000, value)
except redis.exceptions.ResponseError:
print "Failed to restore key: %s" % key
pass
return
def release_ports(lxc):
"""
Release ports from Edge node
"""
for p in lxc['Ports']:
os.system('iptables -t nat -D PREROUTING -i eth0 -p tcp --dport %d -j DNAT --to %s:%d' % (p, lxc['IP'], p))
print "Done port releasing."
def terminate(lxc):
migrate_redis(lxc)
release_ports(lxc)
os.system('lxc-stop -n %s' % lxc['App'])
os.system('lxc-destroy -n %s' % lxc['App'])
print '%s is terminated.' % lxc['App']
| 29.303571
| 115
| 0.553931
|
0015cf7b1b2dad91f575c7d44becd6d694c854e8
| 5,499
|
py
|
Python
|
bioinfo_sec3/week2/No3_edit_distance.py
|
maqueredkop/Bioinformatics_Specialization_Coursea_Exercise
|
286fb1420994e78612e4dc419420548e3b8f86e7
|
[
"MIT"
] | null | null | null |
bioinfo_sec3/week2/No3_edit_distance.py
|
maqueredkop/Bioinformatics_Specialization_Coursea_Exercise
|
286fb1420994e78612e4dc419420548e3b8f86e7
|
[
"MIT"
] | null | null | null |
bioinfo_sec3/week2/No3_edit_distance.py
|
maqueredkop/Bioinformatics_Specialization_Coursea_Exercise
|
286fb1420994e78612e4dc419420548e3b8f86e7
|
[
"MIT"
] | null | null | null |
'''
Edit Distance Problem: Find the edit distance between two strings.
Input: Two strings.
Output: The edit distance between these strings.
'''
import sys
sys.setrecursionlimit(1500)
score_matrix = dict()
with open('BLOSUM62.txt','r') as f:
#with open('PAM250.txt','r') as f:
amino_acid = next(f).strip().split()
for line in f:
line = line.strip().split()
score = [-1 for x in line[1:]]
score_matrix[line[0]] = score
for i in score_matrix:
index = amino_acid.index(i)
score_matrix[i][index] = 0
#print(score_matrix)
own_score = dict()
for i in amino_acid:
score_list = score_matrix[i]
index = amino_acid.index(i)
score = score_list[index]
own_score[i] = 0
#v = 'PLEASANTLY'
#w = 'MEANLY'
#v = 'GGACRNQMSEVNMWGCWWASVWVSWCEYIMPSGWRRMKDRHMWHWSVHQQSSPCAKSICFHETKNQWNQDACGPKVTQHECMRRRLVIAVKEEKSRETKMLDLRHRMSGRMNEHNVTLRKSPCVKRIMERTTYHRFMCLFEVVPAKRQAYNSCDTYTMMACVAFAFVNEADWWKCNCAFATVPYYFDDSCRMVCGARQCYRLWQWEVNTENYVSIEHAEENPFSKLKQQWCYIPMYANFAWSANHMFWAYIANELQLDWQHPNAHPIKWLQNFLMRPYHPNCGLQHKERITPLHKSFYGMFTQHHLFCKELDWRIMAHANRYYCIQHGWHTNNPMDPIDTRHCCMIQGIPKRDHHCAWSTCDVAPLQGNWMLMHHCHHWNRVESMIQNQHEVAAGIKYWRLNRNGKLPVHTADNYGVLFQRWWFLGWYNFMMWHYSLHFFAVNFYFPELNAGQMPRFQDDQNRDDVYDTCIWYFAWSNTEFMEVFGNMMMYSRPMTKMGFHGMMLPYIAINGLRSISHVNKGIGPISGENCNLSTGLHHYGQLRMVMCGYCTPYRTEVKNQREMISAVHCHQHIDWRWIWCSGHWFGSNKCDLRIEDLQNYEPAKNKSNWPYMKECRKTEPYQDNIETMFFHQHDLARDSGYIANGWHENCRQHQDFSNTFAGGHKGTPKGEHMRRSLYVWDTDCVEKCQWVPELFALCWWTPLPDGVPVMLGTYRQYMFGLVVLYWFEVKYSCHNSWDYYNFHEGTMKDSDPENWCFWGMQIIQFHDHGKPEFFQDPMKQIIKTECTAYNSFMMGHIGKTTIVYLVSYIGRLWMKSCCLTWPPYATAPIKWAEETLLDFGQGPHPKYACHFTHQNMIRLAKLPMYWLWKLMFHE'
#w = 'GMWGFVQVSTQSRFRHMWHWSVHQQSSECAKSICHHEWKNQWNQDACGPKVTQHECMANMPMHKCNNWFWRLVIAVKEEKVRETKMLDLIHRHWLVLNQGRMNEHNVTLRKSPCVKRIMHKWKSRTTFHRFMCLMASEVVPAKRGAQCWRQLGTYATYTVYTMMACVAFAFEYQQDNDNEADWWKCNCAFVPVYFDDSCRPVVGAFQCYRLGLPFGTGWNYAEENPFSKLKQQMHRKTMGECKNMMIWAYCANELQLPIKWGSMYHEHDFQLPPYHPNRFHKIRITILHKSFYGMFTQHHLFCKELDWRIMAWANRYYCIQHGWHTNNPDDPITRHKCMIQGGQNSRNADIRHMPVQCGNWGHAIGLEMPMPMHHCHHANRVESMIQTQHYWGPKLNRNADWWFLGWQNFEIFRMPILRWMGAYEWHYSLHFFAVNFYFPELNAGQMPRFQDDQNNNACYDVWAWSNTEFMEVNGIKKLRFGNMMMYSRPMTKMGFHGMMKSRSISHVNKGIGPISGENCSTGLHHYGQLTEVKNQREMISAVHCHQHIWCKCDLRIEPAKNKGYWPYQKEFCWRKQINSRKTEPYQVAPVINIETMFFDFWYIANGMHENCRRTGHKPNPDCVEKCQWVPELFALCWWRAMPDGVPVMLGTMFGLVVYWFEVKYSCHNSLYRRVTDYYNFHEGTMKDHEVPWNWDNEHCHDHGKAEFFFQMLKIPICDPMKAIIPSTEMVNTPWHPFSFMMGHDGKTTIVYSGSYIGRLWVPSRWKPYAPANWKMPIKWAEETLLMVPHPHFTHQQLWGTTLRLAKLPMYWLWKLMFHHLFGVK'
a = 1
with open('dataset_248_3.txt','r') as f:
v = next(f).strip()
w = next(f).strip()
m = len(v)
n = len(w)
s1 = [0]*(len(w)+1)
s = []
for i in range(len(v)+1):
s.append(s1.copy())
s2 = ['']*(len(w)+1)
backtrack = []
for i in range(len(v)+1):
backtrack.append(s2.copy())
backtrack[0][0] = 'start'
for i in range(1,len(v)+1):
backtrack[i][0] = 'down'
for i in range(1,len(w)+1):
backtrack[0][i] = 'hor'
def LCS_backtrack(v,w):
s[0][0] = 0
for i in range(1,len(v)+1):
s[i][0] = s[i-1][0] - a
for j in range(1,len(w)+1):
s[0][j] = s[0][j-1] - a
for i in range(1,len(v)+1):
for j in range(1,len(w)+1):
if v[i-1] == w[j-1]:
score_list = score_matrix[v[i-1]]
index = amino_acid.index(v[i-1])
score = score_list[index]
s[i][j] = max(s[i-1][j]-a, s[i][j-1]-a, s[i-1][j-1] + score)
else:
score_list = score_matrix[v[i-1]]
index = amino_acid.index(w[j-1])
penaty = score_list[index]
s[i][j] = max(s[i-1][j]-a, s[i][j-1]-a, s[i-1][j-1] + penaty)
if s[i][j] == s[i-1][j] - a:
backtrack[i][j] = 'down'
elif s[i][j] == s[i][j-1] -a:
backtrack[i][j] = 'hor'
elif (s[i][j] == s[i-1][j-1] + own_score[v[i-1]]) and (v[i-1] == w[j-1]):
backtrack[i][j] = 'digo'
else:
backtrack[i][j] = 'digo_mis'
#print(s[len(v)][len(w)])
return backtrack
backtrack = LCS_backtrack(v,w)
#for i in backtrack:
# print(i)
align_v = []
align_w = []
v_list = list(v)
w_list = list(w)
def output_LCS(backtrack,v,w,v_list,w_list,m,n,lcs=[]):
if m == 0 and n == 0:
return ''
if backtrack[m][n] == 'down':
align_v.append(v_list[-1])
v_list.pop()
align_w.append('-')
output_LCS(backtrack,v,w,v_list,w_list,m-1,n)
elif backtrack[m][n] == 'hor':
align_v.append('-')
align_w.append(w_list[-1])
w_list.pop()
output_LCS(backtrack,v,w,v_list,w_list,m,n-1)
elif backtrack[m][n] == 'digo_mis':
align_v.append(v_list[-1])
v_list.pop()
align_w.append(w_list[-1])
w_list.pop()
output_LCS(backtrack,v,w,v_list,w_list,m-1,n-1)
else:
align_v.append(v_list[-1])
v_list.pop()
align_w.append(w_list[-1])
w_list.pop()
output_LCS(backtrack,v,w,v_list,w_list,m-1,n-1)
lcs.append(v[m-1])
return [align_v,align_w]
align_result = output_LCS(backtrack,v,w,v_list,w_list,m,n)
align_v = align_result[0]
align_w = align_result[1]
#print(align_v)
#print(align_w)
align_v.reverse()
align_w.reverse()
v_string = ''.join(align_v)
w_string = ''.join(align_w)
#print(v_string)
#print(w_string)
hamming_dist = 0
for i in range(len(v_string)):
if v_string[i] != w_string[i]:
hamming_dist += 1
print(hamming_dist)
| 34.584906
| 882
| 0.681942
|
10476ae5d1789a3f8162c2c3179d8ace85408e30
| 7,156
|
py
|
Python
|
pretrained-model/tts/tacotron2/test/tacotron2-male.py
|
ishine/malaya-speech
|
fd34afc7107af1656dff4b3201fa51dda54fde18
|
[
"MIT"
] | 111
|
2020-08-31T04:58:54.000Z
|
2022-03-29T15:44:18.000Z
|
pretrained-model/tts/tacotron2/test/tacotron2-male.py
|
ishine/malaya-speech
|
fd34afc7107af1656dff4b3201fa51dda54fde18
|
[
"MIT"
] | 14
|
2020-12-16T07:27:22.000Z
|
2022-03-15T17:39:01.000Z
|
pretrained-model/tts/tacotron2/test/tacotron2-male.py
|
ishine/malaya-speech
|
fd34afc7107af1656dff4b3201fa51dda54fde18
|
[
"MIT"
] | 29
|
2021-02-09T08:57:15.000Z
|
2022-03-12T14:09:19.000Z
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import tensorflow as tf
from glob import glob
from itertools import cycle
import tensorflow as tf
import malaya_speech
import malaya_speech.train
from malaya_speech.train.model import tacotron2
import malaya_speech.config
import numpy as np
import json
from malaya_speech.train.loss import calculate_2d_loss, calculate_3d_loss
import malaya_speech.train as train
with open('mels-male.json') as fopen:
files = json.load(fopen)
import random
reduction_factor = 2
maxlen = 550
def generate(files):
file_cycle = cycle(files)
while True:
f = next(file_cycle).decode()
mel = np.load(f)
mel_length = len(mel)
if mel_length > maxlen:
continue
remainder = mel_length % reduction_factor
if remainder != 0:
new_mel_length = mel_length + reduction_factor - remainder
mel = np.pad(mel, [[0, new_mel_length - mel_length], [0, 0]])
mel_length = new_mel_length
text_ids = np.load(f.replace('mels', 'text_ids'), allow_pickle=True)[
1
]
len_mel = [len(mel)]
len_text_ids = [len(text_ids)]
yield {
'mel': mel,
'text_ids': text_ids,
'len_mel': len_mel,
'len_text_ids': len_text_ids,
}
def parse(example):
mel_len = example['len_mel'][0]
input_len = example['len_text_ids'][0]
g = tacotron2.generate_guided_attention(
mel_len, input_len, reduction_factor=reduction_factor
)
example['g'] = g
return example
def get_dataset(files, batch_size=32, shuffle_size=32, thread_count=24):
def get():
dataset = tf.data.Dataset.from_generator(
generate,
{
'mel': tf.float32,
'text_ids': tf.int32,
'len_mel': tf.int32,
'len_text_ids': tf.int32,
},
output_shapes={
'mel': tf.TensorShape([None, 80]),
'text_ids': tf.TensorShape([None]),
'len_mel': tf.TensorShape([1]),
'len_text_ids': tf.TensorShape([1]),
},
args=(files,),
)
dataset = dataset.map(parse, num_parallel_calls=thread_count)
dataset = dataset.shuffle(batch_size)
dataset = dataset.padded_batch(
shuffle_size,
padded_shapes={
'mel': tf.TensorShape([None, 80]),
'text_ids': tf.TensorShape([None]),
'len_mel': tf.TensorShape([1]),
'len_text_ids': tf.TensorShape([1]),
'g': tf.TensorShape([None, None]),
},
padding_values={
'mel': tf.constant(0, dtype=tf.float32),
'text_ids': tf.constant(0, dtype=tf.int32),
'len_mel': tf.constant(0, dtype=tf.int32),
'len_text_ids': tf.constant(0, dtype=tf.int32),
'g': tf.constant(-1.0, dtype=tf.float32),
},
)
return dataset
return get
_pad = 'pad'
_eos = 'eos'
_punctuation = "!'(),.:;? "
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
# Export all symbols:
MALAYA_SPEECH_SYMBOLS = (
[_pad] + list(_special) + list(_punctuation) + list(_letters) + [_eos]
)
learning_rate = 0.001
num_train_steps = 200000
num_warmup_steps = 3000
end_learning_rate = 0.00001
weight_decay_rate = 0.01
def model_fn(features, labels, mode, params):
tacotron2_config = malaya_speech.config.tacotron2_config
tacotron2_config['reduction_factor'] = reduction_factor
c = tacotron2.Config(
vocab_size=len(MALAYA_SPEECH_SYMBOLS) + 1, **tacotron2_config
)
model = tacotron2.Model(c)
input_ids = features['text_ids']
input_lengths = features['len_text_ids'][:, 0]
speaker_ids = tf.constant([0], dtype=tf.int32)
mel_outputs = features['mel']
mel_lengths = features['len_mel'][:, 0]
mel_actuals = features['mel']
guided = features['g']
r = model(
input_ids,
input_lengths,
speaker_ids,
mel_outputs,
mel_lengths,
training=True,
)
binary_crossentropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
mae = tf.keras.losses.MeanAbsoluteError()
decoder_output, post_mel_outputs, stop_token_predictions, alignment_histories = (
r
)
mel_loss_before = calculate_3d_loss(
mel_actuals, decoder_output, loss_fn=mae
)
mel_loss_after = calculate_3d_loss(
mel_actuals, post_mel_outputs, loss_fn=mae
)
max_mel_length = tf.reduce_max(mel_lengths)
stop_gts = tf.expand_dims(
tf.range(tf.reduce_max(max_mel_length), dtype=tf.int32), 0
)
stop_gts = tf.tile(stop_gts, [tf.shape(mel_lengths)[0], 1])
stop_gts = tf.cast(
tf.math.greater_equal(stop_gts, tf.expand_dims(mel_lengths, 1)),
tf.float32,
)
stop_token_loss = calculate_2d_loss(
stop_gts, stop_token_predictions, loss_fn=binary_crossentropy
)
attention_masks = tf.cast(tf.math.not_equal(guided, -1.0), tf.float32)
loss_att = tf.reduce_sum(
tf.abs(alignment_histories * guided) * attention_masks, axis=[1, 2]
)
loss_att /= tf.reduce_sum(attention_masks, axis=[1, 2])
loss_att = tf.reduce_mean(loss_att)
loss = stop_token_loss + mel_loss_before + mel_loss_after + loss_att
tf.identity(loss, 'loss')
tf.identity(stop_token_loss, name='stop_token_loss')
tf.identity(mel_loss_before, name='mel_loss_before')
tf.identity(mel_loss_after, name='mel_loss_after')
tf.identity(loss_att, name='loss_att')
tf.summary.scalar('stop_token_loss', stop_token_loss)
tf.summary.scalar('mel_loss_before', mel_loss_before)
tf.summary.scalar('mel_loss_after', mel_loss_after)
tf.summary.scalar('loss_att', loss_att)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = train.optimizer.adamw.create_optimizer(
loss=loss,
init_lr=learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
end_learning_rate=end_learning_rate,
weight_decay_rate=weight_decay_rate,
)
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode, loss=loss, train_op=train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL, loss=loss
)
return estimator_spec
train_hooks = [
tf.train.LoggingTensorHook(
[
'loss',
'stop_token_loss',
'mel_loss_before',
'mel_loss_after',
'loss_att',
],
every_n_iter=1,
)
]
train_dataset = get_dataset(files['train'])
dev_dataset = get_dataset(files['test'])
train.run_training(
train_fn=train_dataset,
model_fn=model_fn,
model_dir='tacotron2-male',
num_gpus=1,
log_step=1,
save_checkpoint_step=5000,
max_steps=num_train_steps,
eval_fn=dev_dataset,
train_hooks=train_hooks,
)
| 29.816667
| 85
| 0.627306
|
370a9d3cb22cae1a74e9d23d2e9de2fb700334a0
| 2,672
|
py
|
Python
|
Script/componentList.py
|
bveenema/KiCad-Cabling
|
792b7c613fa57ea3990c3fc3ca3e3535edaf95d2
|
[
"MIT"
] | null | null | null |
Script/componentList.py
|
bveenema/KiCad-Cabling
|
792b7c613fa57ea3990c3fc3ca3e3535edaf95d2
|
[
"MIT"
] | null | null | null |
Script/componentList.py
|
bveenema/KiCad-Cabling
|
792b7c613fa57ea3990c3fc3ca3e3535edaf95d2
|
[
"MIT"
] | null | null | null |
def getComponentList(net):
nets = net.nets;
components = net.getInterestingComponents()
componentList = [];
# [
# {
# ref: "HOUSING1",
# Connections: [
# {pin: 3, net: "Net-(HOUSING1-Pad3)"},
# {pin: 2, net: "Net-(HOUSING1-Pad2)"},
# {pin: 1, net: "Net-(HOUSING1-Pad1)"}
# ],
# isAnchor: True,
# KiCadComponent: <comp>,
# },
# ...
# ]
# initialize componentList with refs
for component in components:
newComponent = {};
newComponent['ref'] = component.getRef();
newComponent['connections'] = [];
newComponent['isAnchor'] = (str.lower(component.getField("Anchor")) == "yes");
newComponent['KiCadComponent'] = component;
componentList.append(newComponent);
# add nets to components
for thisNet in nets:
netName = thisNet.attributes["name"];
nodes = thisNet.getChildren();
for node in nodes:
ref = node.attributes["ref"];
pin = node.attributes['pin'];
for component in componentList:
if(component['ref'] == ref):
component['connections'].append({'net': netName, 'pin': pin});
for component in componentList:
component['connections'] = sorted(component['connections'], key=lambda k: k['net'])
return componentList;
def printComponentList(componentList):
print("COMPONENT LIST");
print("-------------");
for component in componentList:
print("Ref: ", component['ref']);
print('Nets', component['connections']);
print('isAnchor', component['isAnchor']);
print("-------------");
def removeComponent(component, componentList):
# theList = [{'id': 1, 'name': 'paul'},{'id': 2, 'name': 'john'}]
# thelist[:] = [d for d in thelist if d.get('id') != 2]
componentList[:] = [d for d in componentList if d.get('ref') != component];
return componentList;
def getComponent(ref, componentList):
for component in componentList:
if(ref == component['ref']):
return component;
return None;
def getKiCadComponent(ref, components):
for component in components:
if(ref == component.getRef()):
return component;
return None;
def getPosition(ref, netName, netList):
for net in netList:
if(netName == net['netName']):
for node in net['nodes']:
if(ref == node['ref']):
return node['pin'];
return None;
| 33.4
| 95
| 0.534805
|
7d0a1fa488282b218638b5ae6a7664e2247a2ccf
| 11,505
|
py
|
Python
|
build/ninja/android.py
|
JohannesLorenz/rpmalloc
|
8d790d2b45e1818e531c61bf649c5225556dd07a
|
[
"Unlicense",
"MIT"
] | 811
|
2016-11-18T12:57:43.000Z
|
2019-05-23T17:07:27.000Z
|
build/ninja/android.py
|
JohannesLorenz/rpmalloc
|
8d790d2b45e1818e531c61bf649c5225556dd07a
|
[
"Unlicense",
"MIT"
] | 108
|
2019-05-24T21:46:54.000Z
|
2022-03-28T07:48:40.000Z
|
build/ninja/android.py
|
JohannesLorenz/rpmalloc
|
8d790d2b45e1818e531c61bf649c5225556dd07a
|
[
"Unlicense",
"MIT"
] | 84
|
2019-06-01T05:26:43.000Z
|
2022-03-31T21:17:27.000Z
|
#!/usr/bin/env python
"""Ninja toolchain abstraction for Android platform"""
import os
import subprocess
import toolchain
def make_target(toolchain, host, target):
return Android(toolchain, host, target)
class Android(object):
def __init__(self, toolchain, host, target):
self.host = host
if host.is_windows():
self.exe_suffix = '.exe'
else:
self.exe_suffix = ''
self.javaccmd = toolchain.mkdircmd('$outpath') + ' && $javac -d $outpath -classpath $outpath -sourcepath $sourcepath -target 1.5 -bootclasspath $androidjar -g -source 1.5 -Xlint:-options $in'
self.dexcmd = '$dex --dex --output $out $in'
self.aaptcmd = toolchain.cdcmd('$apkbuildpath') + ' && $aapt p -f -m -M AndroidManifest.xml -F $apk -I $androidjar -S res --debug-mode --no-crunch -J gen $aaptflags'
self.aaptdeploycmd = toolchain.cdcmd('$apkbuildpath') + ' && $aapt c -S res -C bin/res && $aapt p -f -m -M AndroidManifest.xml -F $apk -I $androidjar -S bin/res -S res -J gen $aaptflags'
self.aaptaddcmd = toolchain.cdcmd('$apkbuildpath') + ' && ' + toolchain.copycmd('$apksource', '$apk' ) + ' && $aapt a $apk $apkaddfiles'
self.zipcmd = '$zip -r -9 $out $in $implicitin'
self.zipaligncmd = '$zipalign -f 4 $in $out'
self.codesigncmd = 'build/ninja/codesign.py --target $target --prefs codesign.json --zipfile $in --config $config --jarsigner $jarsigner $out'
if host.is_windows():
self.codesigncmd = 'python ' + self.codesigncmd
def initialize_toolchain(self):
self.ndkpath = os.getenv('NDK_HOME', '')
self.sdkpath = os.getenv('ANDROID_HOME', '')
self.sysroot = ''
self.platformversion = '21'
self.gcc_toolchainversion = '4.9'
self.javasdk = ''
self.archname = dict()
self.archname['x86'] = 'x86'
self.archname['x86-64'] = 'x86_64'
self.archname['arm6'] = 'arm'
self.archname['arm7'] = 'arm'
self.archname['arm64'] = 'arm64'
self.archname['mips'] = 'mips'
self.archname['mips64'] = 'mips64'
self.archpath = dict()
self.archpath['x86'] = 'x86'
self.archpath['x86-64'] = 'x86-64'
self.archpath['arm6'] = 'armeabi'
self.archpath['arm7'] = 'armeabi-v7a'
self.archpath['arm64'] = 'arm64-v8a'
self.archpath['mips'] = 'mips'
self.archpath['mips64'] = 'mips64'
self.gcc_toolchainname = dict()
self.gcc_toolchainname['x86'] = 'x86-' + self.gcc_toolchainversion
self.gcc_toolchainname['x86-64'] = 'x86_64-' + self.gcc_toolchainversion
self.gcc_toolchainname['arm6'] = 'arm-linux-androideabi-' + self.gcc_toolchainversion
self.gcc_toolchainname['arm7'] = 'arm-linux-androideabi-' + self.gcc_toolchainversion
self.gcc_toolchainname['arm64'] = 'aarch64-linux-android-' + self.gcc_toolchainversion
self.gcc_toolchainname['mips'] = 'mipsel-linux-android-' + self.gcc_toolchainversion
self.gcc_toolchainname['mips64'] = 'mips64el-linux-android-' + self.gcc_toolchainversion
self.gcc_toolchainprefix = dict()
self.gcc_toolchainprefix['x86'] = 'i686-linux-android-'
self.gcc_toolchainprefix['x86-64'] = 'x86_64-linux-android-'
self.gcc_toolchainprefix['arm6'] = 'arm-linux-androideabi-'
self.gcc_toolchainprefix['arm7'] = 'arm-linux-androideabi-'
self.gcc_toolchainprefix['arm64'] = 'aarch64-linux-android-'
self.gcc_toolchainprefix['mips'] = 'mipsel-linux-android-'
self.gcc_toolchainprefix['mips64'] = 'mips64el-linux-android-'
if self.host.is_windows():
if os.getenv('PROCESSOR_ARCHITECTURE', 'AMD64').find('64') != -1:
self.hostarchname = 'windows-x86_64'
else:
self.hostarchname = 'windows-x86'
elif self.host.is_linux():
localarch = toolchain.check_output(['uname', '-m'])
if localarch == 'x86_64':
self.hostarchname = 'linux-x86_64'
else:
self.hostarchname = 'linux-x86'
elif self.host.is_macos():
self.hostarchname = 'darwin-x86_64'
def build_toolchain(self):
buildtools_path = os.path.join(self.sdkpath, 'build-tools')
buildtools_list = [item for item in os.listdir(buildtools_path) if os.path.isdir(os.path.join(buildtools_path, item))]
buildtools_list.sort(key = lambda s: map(int, s.split('-')[0].split('.')))
self.buildtools_path = os.path.join(self.sdkpath, 'build-tools', buildtools_list[-1])
self.android_jar = os.path.join(self.sdkpath, 'platforms', 'android-' + self.platformversion, 'android.jar')
self.javac = 'javac'
self.jarsigner = 'jarsigner'
if self.javasdk != '':
self.javac = os.path.join(self.javasdk, 'bin', self.javac)
self.jarsigner = os.path.join(self.javasdk, 'bin', self.jarsigner)
if self.host.is_windows():
self.dex = os.path.join(self.buildtools_path, 'dx.bat')
else:
self.dex = os.path.join(self.buildtools_path, 'dx' + self.exe_suffix)
if not os.path.isfile(self.dex):
self.dex = os.path.join(self.sdkpath, 'tools', 'dx' + self.exe_suffix)
self.aapt = os.path.join(self.buildtools_path, 'aapt' + self.exe_suffix)
self.zipalign = os.path.join(self.buildtools_path, 'zipalign' + self.exe_suffix)
if not os.path.isfile( self.zipalign ):
self.zipalign = os.path.join(self.sdkpath, 'tools', 'zipalign' + self.exe_suffix)
def parse_prefs(self, prefs):
if 'android' in prefs:
androidprefs = prefs['android']
if 'ndkpath' in androidprefs:
self.ndkpath = os.path.expanduser(androidprefs['ndkpath'])
if 'sdkpath' in androidprefs:
self.sdkpath = os.path.expanduser(androidprefs['sdkpath'])
if 'platformversion' in androidprefs:
self.platformversion = androidprefs['platformversion']
if 'gccversion' in androidprefs:
self.gcc_toolchainversion = androidprefs['gccversion']
if 'javasdk' in androidprefs:
self.javasdk = androidprefs['javasdk']
def write_variables(self, writer):
writer.variable('ndk', self.ndkpath)
writer.variable('sdk', self.sdkpath)
writer.variable('sysroot', self.sysroot)
writer.variable('androidjar', self.android_jar )
writer.variable('apkbuildpath', '')
writer.variable('apk', '')
writer.variable('apksource', '')
writer.variable('apkaddfiles', '')
writer.variable('javac', self.javac)
writer.variable('dex', self.dex)
writer.variable('aapt', self.aapt)
writer.variable('zipalign', self.zipalign)
writer.variable('jarsigner', self.jarsigner)
writer.variable('aaptflags', '')
def write_rules(self, writer):
writer.rule('aapt', command = self.aaptcmd, description = 'AAPT $out')
writer.rule('aaptdeploy', command = self.aaptdeploycmd, description = 'AAPT $out')
writer.rule('aaptadd', command = self.aaptaddcmd, description = 'AAPT $out')
writer.rule('javac', command = self.javaccmd, description = 'JAVAC $in')
writer.rule('dex', command = self.dexcmd, description = 'DEX $out')
writer.rule('zip', command = self.zipcmd, description = 'ZIP $out')
writer.rule('zipalign', command = self.zipaligncmd, description = 'ZIPALIGN $out')
writer.rule('codesign', command = self.codesigncmd, description = 'CODESIGN $out')
def make_sysroot_path(self, arch):
return os.path.join(self.ndkpath, 'platforms', 'android-' + self.platformversion, 'arch-' + self.archname[arch])
def make_gcc_toolchain_path(self, arch):
return os.path.join(self.ndkpath, 'toolchains', self.gcc_toolchainname[arch], 'prebuilt', self.hostarchname)
def make_gcc_bin_path(self, arch):
return os.path.join(self.make_gcc_toolchain_path(arch), 'bin', self.gcc_toolchainprefix[arch])
def archname(self):
return self.archname
def archpath(self):
return self.archpath
def hostarchname(self):
return self.hostarchname
def apk(self, toolchain, writer, module, archbins, javasources, outpath, binname, basepath, config, implicit_deps, resources):
buildpath = os.path.join('$buildpath', config, 'apk', binname)
baseapkname = binname + ".base.apk"
unsignedapkname = binname + ".unsigned.apk"
unalignedapkname = binname + ".unaligned.apk"
apkname = binname + ".apk"
apkfiles = []
libfiles = []
locallibs = []
resfiles = []
manifestfile = []
writer.comment('Make APK')
for _, value in archbins.iteritems():
for archbin in value:
archpair = os.path.split(archbin)
libname = archpair[1]
arch = os.path.split(archpair[0])[1]
locallibpath = os.path.join('lib', self.archpath[arch], libname)
archpath = os.path.join(buildpath, locallibpath)
locallibs += [locallibpath + ' ']
libfiles += toolchain.copy(writer, archbin, archpath)
for resource in resources:
filename = os.path.split(resource)[1]
if filename == 'AndroidManifest.xml':
manifestfile = toolchain.copy(writer, os.path.join(basepath, module, resource), os.path.join(buildpath, 'AndroidManifest.xml'))
else:
restype = os.path.split(os.path.split(resource)[0])[1]
if restype == 'asset':
pass #todo: implement
else:
resfiles += toolchain.copy(writer, os.path.join(basepath, module, resource), os.path.join(buildpath, 'res', restype, filename))
#Make directories
gendir = toolchain.mkdir(writer, os.path.join(buildpath, 'gen'))
bindir = toolchain.mkdir(writer, os.path.join(buildpath, 'bin'))
binresdir = toolchain.mkdir(writer, os.path.join(buildpath, 'bin', 'res'), order_only = bindir)
alldirs = gendir + bindir + binresdir
aaptvars = [('apkbuildpath', buildpath), ('apk', baseapkname)]
aaptout = os.path.join(buildpath, baseapkname)
if config == 'deploy':
baseapkfile = writer.build(aaptout, 'aaptdeploy', manifestfile, variables = aaptvars, implicit = manifestfile + resfiles, order_only = alldirs)
else:
baseapkfile = writer.build(aaptout, 'aapt', manifestfile, variables = aaptvars, implicit = manifestfile + resfiles, order_only = alldirs)
#Compile java code
javafiles = []
localjava = []
if javasources != []:
#self.javaccmd = '$javac -d $outpath -classpath $outpath -sourcepath $sourcepath -target 1.5 -bootclasspath $androidjar -g -source 1.5 -Xlint:-options $in'
#self.dexcmd = '$dex --dex --output $out $in'
javasourcepath = '.'
if self.host.is_windows():
javasourcepath += ';'
else:
javasourcepath += ':'
javasourcepath += os.path.join(buildpath, 'gen')
classpath = os.path.join(buildpath, 'classes')
javavars = [('outpath', classpath), ('sourcepath', javasourcepath)]
javaclasses = writer.build(classpath, 'javac', javasources, variables = javavars, implicit = baseapkfile)
localjava += ['classes.dex']
javafiles += writer.build(os.path.join(buildpath, 'classes.dex'), 'dex', classpath)
#Add native libraries and java classes to apk
aaptvars = [('apkbuildpath', buildpath), ('apk', unsignedapkname), ('apksource', baseapkname), ('apkaddfiles', toolchain.paths_forward_slash(locallibs + localjava))]
unsignedapkfile = writer.build(os.path.join(buildpath, unsignedapkname), 'aaptadd', baseapkfile, variables = aaptvars, implicit = libfiles + javafiles, order_only = alldirs)
#Sign the APK
codesignvars = [('config', config)]
unalignedapkfile = writer.build(os.path.join(buildpath, unalignedapkname), 'codesign', unsignedapkfile, variables = codesignvars)
#Run zipalign
outfile = writer.build(os.path.join(outpath, config, apkname), 'zipalign', unalignedapkfile)
return outfile
| 46.204819
| 195
| 0.673012
|
385563ddcb6d357d6b5e1600edcedf6c34634312
| 487
|
py
|
Python
|
priv/python/user/pyclass.py
|
heyoka/pythra
|
8776294b060e2a0f25e4cb6a88d3570c04f37d82
|
[
"Apache-2.0"
] | 3
|
2019-11-17T03:03:02.000Z
|
2019-11-21T08:03:42.000Z
|
priv/python/user/pyclass.py
|
heyoka/pythra
|
8776294b060e2a0f25e4cb6a88d3570c04f37d82
|
[
"Apache-2.0"
] | null | null | null |
priv/python/user/pyclass.py
|
heyoka/pythra
|
8776294b060e2a0f25e4cb6a88d3570c04f37d82
|
[
"Apache-2.0"
] | null | null | null |
from erlport.erlterms import Map
class Pyclass:
def __init__(self, value):
print("pyClass __init__")
print(value)
print(type(value))
# if isinstance(value, list):
# myval = list()
# for d in value:
# myval.append(dict(d))
# elif isinstance(value, dict):
# myval = dict(value)
# print("myval", myval)
self.value = value
def get_value(self):
return self.value
| 22.136364
| 39
| 0.529774
|
8c96746e93740316199f82544b37b6a74e5198f0
| 13,072
|
py
|
Python
|
sppas/sppas/src/ui/phoenix/dialogs/messages.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
sppas/sppas/src/ui/phoenix/dialogs/messages.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
sppas/sppas/src/ui/phoenix/dialogs/messages.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
src.ui.phoenix.dialogs.messages.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import logging
import wx
from sppas.src.config import ui_translation
from ..windows import sppasMessageText
from ..windows import sppasPanel
from ..windows import sppasDialog
# ----------------------------------------------------------------------------
MSG_HEADER_ERROR = ui_translation.gettext("Error")
MSG_HEADER_WARNING = ui_translation.gettext("Warning")
MSG_HEADER_QUESTION = ui_translation.gettext("Question")
MSG_HEADER_INFO = ui_translation.gettext("Information")
# ----------------------------------------------------------------------------
class sppasBaseMessageDialog(sppasDialog):
"""Base class to create message dialogs.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2019 Brigitte Bigi
"""
def __init__(self, parent, message, title=None, style=wx.ICON_INFORMATION):
"""Create a dialog with a message.
:param parent: (wx.Window)
:param message: (str) the file to display in this frame.
:param title: (str) a title to display in the header. Default is the icon one.
:param style: ONE of wx.ICON_INFORMATION, wx.ICON_ERROR, wx.ICON_EXCLAMATION, wx.YES_NO
"""
super(sppasBaseMessageDialog, self).__init__(
parent=parent,
title="Message",
style=wx.FRAME_TOOL_WINDOW | wx.RESIZE_BORDER | wx.CLOSE_BOX | wx.STAY_ON_TOP) # | wx.DIALOG_NO_PARENT)
self._create_header(style, title)
self._create_content(message)
self._create_buttons()
# Fix frame properties
self.SetMinSize(wx.Size(sppasDialog.fix_size(256),
sppasDialog.fix_size(164)))
self.LayoutComponents()
self.CenterOnParent()
self.GetSizer().Fit(self)
self.FadeIn(deltaN=-10)
# -----------------------------------------------------------------------
def _create_header(self, style, title):
"""Create the header of the message dialog."""
# Create the header
if style == wx.ICON_ERROR:
icon = "error"
if title is None:
title = MSG_HEADER_ERROR
elif style == wx.ICON_WARNING:
icon = "warning"
if title is None:
title = MSG_HEADER_WARNING
elif style == wx.YES_NO:
icon = "question"
if title is None:
title = MSG_HEADER_QUESTION
else:
icon = "information"
if title is None:
title = MSG_HEADER_INFO
self.CreateHeader(title, icon_name=icon)
# -----------------------------------------------------------------------
def _create_content(self, message):
"""Create the content of the message dialog."""
p = sppasPanel(self)
s = wx.BoxSizer(wx.HORIZONTAL)
txt = sppasMessageText(p, message)
s.Add(txt, 1, wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL, 10)
p.SetSizer(s)
p.SetName("content")
p.SetMinSize(wx.Size(-1, sppasDialog.fix_size(96)))
# -----------------------------------------------------------------------
def _create_buttons(self):
"""Override to create the buttons and bind events."""
raise NotImplementedError
# ---------------------------------------------------------------------------
# Message dialogs
# ---------------------------------------------------------------------------
class sppasYesNoDialog(sppasBaseMessageDialog):
"""Create a message in a wx.Dialog with a yes-no question.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 Brigitte Bigi
wx.ID_YES or wx.ID_NO is returned if a button is clicked.
wx.ID_CANCEL is returned if the dialog is destroyed.
>>> dialog = sppasYesNoDialog("Really exit?")
>>> response = dialog.ShowModal()
>>> dialog.Destroy()
>>> if response == wx.ID_YES:
>>> # do something here
"""
def __init__(self, message):
super(sppasYesNoDialog, self).__init__(
parent=None,
message=message,
style=wx.YES_NO)
# -----------------------------------------------------------------------
def _create_buttons(self):
self.CreateActions([wx.ID_NO, wx.ID_YES])
self.Bind(wx.EVT_BUTTON, self._process_event)
self.SetAffirmativeId(wx.ID_YES)
# -----------------------------------------------------------------------
def _process_event(self, event):
"""Process any kind of events.
:param event: (wx.Event)
"""
event_obj = event.GetEventObject()
event_id = event_obj.GetId()
if event_id == wx.ID_NO:
self.EndModal(wx.ID_NO)
else:
event.Skip()
# ---------------------------------------------------------------------------
class sppasConfirm(sppasBaseMessageDialog):
"""Create a message in a wx.Dialog to confirm an action after an error.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2019 Brigitte Bigi
wx.ID_YES is returned if 'yes' is clicked.
wx.ID_CANCEL is returned if the dialog is destroyed or cancel is clicked.
>>> dialog = sppasConfirm("Confirm..."))
>>> response = dialog.ShowModal()
>>> dialog.Destroy()
>>> if response == wx.ID_YES:
>>> # do something here
"""
def __init__(self, message, title=None):
super(sppasConfirm, self).__init__(
parent=None,
message=message,
title=title,
style=wx.ICON_ERROR)
# -----------------------------------------------------------------------
def _create_buttons(self):
self.CreateActions([wx.ID_CANCEL, wx.ID_YES])
self.Bind(wx.EVT_BUTTON, self._process_event)
self.SetAffirmativeId(wx.ID_YES)
# -----------------------------------------------------------------------
def _process_event(self, event):
"""Process any kind of events.
:param event: (wx.Event)
"""
event_obj = event.GetEventObject()
event_id = event_obj.GetId()
if event_id == wx.ID_CANCEL:
self.EndModal(wx.ID_CANCEL)
else:
event.Skip()
# ---------------------------------------------------------------------------
class sppasInformationDialog(sppasBaseMessageDialog):
"""Create a message in a wx.Dialog with an information.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 Brigitte Bigi
wx.ID_OK is returned if the button is clicked.
wx.ID_CANCEL is returned if the dialog is destroyed.
>>> dialog = sppasInformationDialog("you are here")
>>> dialog.ShowModal()
>>> dialog.Destroy()
"""
def __init__(self, message):
super(sppasInformationDialog, self).__init__(
parent=None,
message=message,
style=wx.ICON_INFORMATION)
# -----------------------------------------------------------------------
def _create_buttons(self):
self.CreateActions([wx.ID_OK])
self.SetAffirmativeId(wx.ID_OK)
# ---------------------------------------------------------------------------
class sppasErrorDialog(sppasBaseMessageDialog):
"""Create a message in a wx.Dialog with a error message.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2019 Brigitte Bigi
wx.ID_OK is returned if the button is clicked.
wx.ID_CANCEL is returned if the dialog is destroyed.
>>> dialog = sppasErrorDialog("an error occurred")
>>> dialog.ShowModal()
>>> dialog.Destroy()
"""
def __init__(self, message, title=None):
super(sppasErrorDialog, self).__init__(
parent=None,
message=message,
title=title,
style=wx.ICON_ERROR)
# -----------------------------------------------------------------------
def _create_buttons(self):
self.CreateActions([wx.ID_OK])
self.SetAffirmativeId(wx.ID_OK)
# ---------------------------------------------------------------------------
# Ready-to-use functions to display messages
# ---------------------------------------------------------------------------
def YesNoQuestion(message):
"""Display a yes-no question.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 Brigitte Bigi
:param message: (str) The question to ask
:returns: the response
wx.ID_YES or wx.ID_NO is returned if a button is clicked.
wx.ID_CANCEL is returned if the dialog is destroyed.
"""
logging.info(message)
dialog = sppasYesNoDialog(message)
response = dialog.ShowModal()
dialog.Destroy()
logging.info("User clicked yes" if response == wx.ID_YES else "User clicked no")
return response
def Confirm(message, title=None):
"""Display a confirmation after an error.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2019 Brigitte Bigi
:param message: (str) The error and confirmation question
:param title: (str) Title of the dialog window
:returns: the response
wx.ID_YES if ok button is clicked.
wx.ID_CANCEL is returned if the dialog is destroyed or cancel clicked.
"""
logging.error(message)
dialog = sppasConfirm(message, title)
response = dialog.ShowModal()
dialog.Destroy()
logging.info("Confirmed by user." if response == wx.ID_YES else "User cancelled.")
return response
def Error(message, title=None):
"""Display a error.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 Brigitte Bigi
:param message: (str) The question to ask
:returns: the response
wx.ID_OK is returned if a button is clicked.
wx.ID_CANCEL is returned if the dialog is destroyed.
"""
logging.error(message)
dialog = sppasErrorDialog(message, title=None)
response = dialog.ShowModal()
dialog.Destroy()
return response
def Information(message):
"""Display an information.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 Brigitte Bigi
:param message: (str) The question to ask
:returns: the response
wx.ID_OK is returned if a button is clicked.
wx.ID_CANCEL is returned if the dialog is destroyed.
"""
logging.info(message)
dialog = sppasInformationDialog(message)
response = dialog.ShowModal()
dialog.Destroy()
return response
| 32.039216
| 116
| 0.554697
|
0845b812c7fd2b442414d75629db9e255dbc8dd1
| 1,350
|
py
|
Python
|
pokemonCodeChecker.py
|
DapperDog/PokemonTCGOCodeChecker
|
46f786cba10a6f97970902c860ee0dbac6d7bc05
|
[
"MIT"
] | 5
|
2019-08-26T00:09:51.000Z
|
2021-12-25T04:56:01.000Z
|
pokemonCodeChecker.py
|
DapperDog/PokemonTCGOCodeChecker
|
46f786cba10a6f97970902c860ee0dbac6d7bc05
|
[
"MIT"
] | 3
|
2018-08-28T04:31:52.000Z
|
2020-10-07T08:29:26.000Z
|
pokemonCodeChecker.py
|
DapperDog/PokemonTCGOCodeChecker
|
46f786cba10a6f97970902c860ee0dbac6d7bc05
|
[
"MIT"
] | 2
|
2020-05-06T05:19:14.000Z
|
2021-07-03T00:01:25.000Z
|
import requests
import time
import json
from configparser import ConfigParser
if __name__ == '__main__':
try:
config = ConfigParser()
config.read('settings.ini')
session_id = config.get('Session','session_id')
source_codes = config.get('Global','source_codes')
print session_id
cookies = {
'main_session_id': session_id,
'op_session_id': session_id
}
headers = {
}
with open("CheckedCodes-"+time.strftime("%Y%m%d-%H%M%S")+".txt",'a',0) as cc:
with open(source_codes) as sc:
for cnt, line in enumerate(sc):
data = [
('code', line)
]
response = requests.post('https://www.pokemon.com/us/pokemon-trainer-club/verify_code/', headers=headers, cookies=cookies, data=data)
code_json = json.loads(response.text)
print response.text
if code_json['valid']:
cc.write("{},{},{} \n".format(code_json['valid'],code_json['coupon_code'],code_json['coupon_title'].encode('utf-8')))
else:
cc.write("{},{},{} \n".format(code_json['valid'],code_json['coupon_code'],code_json['error_message'].encode('utf-8')))
sc.close
cc.close
except:
import sys
print sys.exc_info()[0]
import traceback
print traceback.format_exc()
print "Press Enter to continue ..."
raw_input()
| 30.681818
| 139
| 0.619259
|
b16ae1d488e0db54435971ec8cf9f0a58ef71c58
| 6,849
|
py
|
Python
|
tests/components/zha/common.py
|
silvertoken/core
|
9b8688b0fc624e7bfcb6cac81bcdadd9d2b4be79
|
[
"Apache-2.0"
] | 1
|
2021-09-04T14:03:00.000Z
|
2021-09-04T14:03:00.000Z
|
tests/components/zha/common.py
|
silvertoken/core
|
9b8688b0fc624e7bfcb6cac81bcdadd9d2b4be79
|
[
"Apache-2.0"
] | 1
|
2022-03-31T06:35:00.000Z
|
2022-03-31T06:35:00.000Z
|
tests/components/zha/common.py
|
silvertoken/core
|
9b8688b0fc624e7bfcb6cac81bcdadd9d2b4be79
|
[
"Apache-2.0"
] | 1
|
2020-05-18T09:13:52.000Z
|
2020-05-18T09:13:52.000Z
|
"""Common test objects."""
import time
from zigpy.device import Device as zigpy_dev
from zigpy.endpoint import Endpoint as zigpy_ep
import zigpy.profiles.zha
import zigpy.types
import zigpy.zcl
import zigpy.zcl.clusters.general
import zigpy.zcl.foundation as zcl_f
import zigpy.zdo.types
import homeassistant.components.zha.core.const as zha_const
from homeassistant.util import slugify
from tests.async_mock import AsyncMock, Mock
class FakeEndpoint:
"""Fake endpoint for moking zigpy."""
def __init__(self, manufacturer, model, epid=1):
"""Init fake endpoint."""
self.device = None
self.endpoint_id = epid
self.in_clusters = {}
self.out_clusters = {}
self._cluster_attr = {}
self.member_of = {}
self.status = 1
self.manufacturer = manufacturer
self.model = model
self.profile_id = zigpy.profiles.zha.PROFILE_ID
self.device_type = None
self.request = AsyncMock(return_value=[0])
def add_input_cluster(self, cluster_id):
"""Add an input cluster."""
cluster = zigpy.zcl.Cluster.from_id(self, cluster_id, is_server=True)
patch_cluster(cluster)
self.in_clusters[cluster_id] = cluster
if hasattr(cluster, "ep_attribute"):
setattr(self, cluster.ep_attribute, cluster)
def add_output_cluster(self, cluster_id):
"""Add an output cluster."""
cluster = zigpy.zcl.Cluster.from_id(self, cluster_id, is_server=False)
patch_cluster(cluster)
self.out_clusters[cluster_id] = cluster
@property
def __class__(self):
"""Fake being Zigpy endpoint."""
return zigpy_ep
@property
def unique_id(self):
"""Return the unique id for the endpoint."""
return self.device.ieee, self.endpoint_id
FakeEndpoint.add_to_group = zigpy_ep.add_to_group
FakeEndpoint.remove_from_group = zigpy_ep.remove_from_group
def patch_cluster(cluster):
"""Patch a cluster for testing."""
cluster.bind = AsyncMock(return_value=[0])
cluster.configure_reporting = AsyncMock(return_value=[0])
cluster.deserialize = Mock()
cluster.handle_cluster_request = Mock()
cluster.read_attributes = AsyncMock(return_value=[{}, {}])
cluster.read_attributes_raw = Mock()
cluster.unbind = AsyncMock(return_value=[0])
cluster.write_attributes = AsyncMock(return_value=[0])
if cluster.cluster_id == 4:
cluster.add = AsyncMock(return_value=[0])
class FakeDevice:
"""Fake device for mocking zigpy."""
def __init__(self, app, ieee, manufacturer, model, node_desc=None, nwk=0xB79C):
"""Init fake device."""
self._application = app
self.application = app
self.ieee = zigpy.types.EUI64.convert(ieee)
self.nwk = nwk
self.zdo = Mock()
self.endpoints = {0: self.zdo}
self.lqi = 255
self.rssi = 8
self.last_seen = time.time()
self.status = 2
self.initializing = False
self.skip_configuration = False
self.manufacturer = manufacturer
self.model = model
self.node_desc = zigpy.zdo.types.NodeDescriptor()
self.remove_from_group = AsyncMock()
if node_desc is None:
node_desc = b"\x02@\x807\x10\x7fd\x00\x00*d\x00\x00"
self.node_desc = zigpy.zdo.types.NodeDescriptor.deserialize(node_desc)[0]
FakeDevice.add_to_group = zigpy_dev.add_to_group
def get_zha_gateway(hass):
"""Return ZHA gateway from hass.data."""
try:
return hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
except KeyError:
return None
def make_attribute(attrid, value, status=0):
"""Make an attribute."""
attr = zcl_f.Attribute()
attr.attrid = attrid
attr.value = zcl_f.TypeValue()
attr.value.value = value
return attr
def send_attribute_report(hass, cluster, attrid, value):
"""Send a single attribute report."""
return send_attributes_report(hass, cluster, {attrid: value})
async def send_attributes_report(hass, cluster: int, attributes: dict):
"""Cause the sensor to receive an attribute report from the network.
This is to simulate the normal device communication that happens when a
device is paired to the zigbee network.
"""
attrs = [make_attribute(attrid, value) for attrid, value in attributes.items()]
hdr = make_zcl_header(zcl_f.Command.Report_Attributes)
cluster.handle_message(hdr, [attrs])
await hass.async_block_till_done()
async def find_entity_id(domain, zha_device, hass):
"""Find the entity id under the testing.
This is used to get the entity id in order to get the state from the state
machine so that we can test state changes.
"""
ieeetail = "".join([f"{o:02x}" for o in zha_device.ieee[:4]])
head = f"{domain}.{slugify(f'{zha_device.name} {ieeetail}')}"
enitiy_ids = hass.states.async_entity_ids(domain)
await hass.async_block_till_done()
for entity_id in enitiy_ids:
if entity_id.startswith(head):
return entity_id
return None
def async_find_group_entity_id(hass, domain, group):
"""Find the group entity id under test."""
entity_id = f"{domain}.{group.name.lower().replace(' ','_')}_zha_group_0x{group.group_id:04x}"
entity_ids = hass.states.async_entity_ids(domain)
if entity_id in entity_ids:
return entity_id
return None
async def async_enable_traffic(hass, zha_devices):
"""Allow traffic to flow through the gateway and the zha device."""
for zha_device in zha_devices:
zha_device.update_available(True)
await hass.async_block_till_done()
def make_zcl_header(
command_id: int, global_command: bool = True, tsn: int = 1
) -> zcl_f.ZCLHeader:
"""Cluster.handle_message() ZCL Header helper."""
if global_command:
frc = zcl_f.FrameControl(zcl_f.FrameType.GLOBAL_COMMAND)
else:
frc = zcl_f.FrameControl(zcl_f.FrameType.CLUSTER_COMMAND)
return zcl_f.ZCLHeader(frc, tsn=tsn, command_id=command_id)
def reset_clusters(clusters):
"""Reset mocks on cluster."""
for cluster in clusters:
cluster.bind.reset_mock()
cluster.configure_reporting.reset_mock()
cluster.write_attributes.reset_mock()
async def async_test_rejoin(hass, zigpy_device, clusters, report_counts, ep_id=1):
"""Test device rejoins."""
reset_clusters(clusters)
zha_gateway = get_zha_gateway(hass)
await zha_gateway.async_device_initialized(zigpy_device)
await hass.async_block_till_done()
for cluster, reports in zip(clusters, report_counts):
assert cluster.bind.call_count == 1
assert cluster.bind.await_count == 1
assert cluster.configure_reporting.call_count == reports
assert cluster.configure_reporting.await_count == reports
| 32.614286
| 98
| 0.68959
|
65cf6813778a528ab3521436c18bec8b2ec967f7
| 1,790
|
py
|
Python
|
data_preprocessing/main.py
|
SamedHrmn/MachineLearningWorks
|
9253c3d78881ce6085efa8f01b36943320a2f083
|
[
"MIT"
] | 1
|
2021-02-28T14:04:56.000Z
|
2021-02-28T14:04:56.000Z
|
data_preprocessing/main.py
|
SamedHrmn/MachineLearningWorks
|
9253c3d78881ce6085efa8f01b36943320a2f083
|
[
"MIT"
] | null | null | null |
data_preprocessing/main.py
|
SamedHrmn/MachineLearningWorks
|
9253c3d78881ce6085efa8f01b36943320a2f083
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
#veri ön işleme aşaması, verilerin makine öğrenmesi
#algoritmalarına hazırlandığı aşamadır.
#verinin okunması aşaması.
data = pd.read_csv('veriler.csv')
#verideki eksik kısımların tamamlanması, loc metodu ile.
imputer = SimpleImputer(missing_values=np.nan,strategy="mean")
age = data.loc[:,['yas']]
imputer = imputer.fit(age)
age = imputer.transform(age)
print(age)
#iloc metodu ile çekersek
'''
age = data.iloc[:,1:4].values
imputer = imputer.fit(age[:,1:4])
age[:,1:4] = imputer.transform(age[:,1:4])
print(age)
'''
#kategorik verilerin nümerik verilere dönüşümü.
country = data.loc[:,['ulke']].values
labelEncoder = preprocessing.LabelEncoder()
country[:,0] = labelEncoder.fit_transform(data.iloc[:,0])
oneHotEncoder = preprocessing.OneHotEncoder()
country = oneHotEncoder.fit_transform(country).toarray()
#numpy dizilerinin dataframe'e dönüştürülmesi.
countryFrame = pd.DataFrame(data=country, index= range(len(data)),columns=['fr','tr','us'])
ageFrame = pd.DataFrame(data=age,index=range(len(data)),columns=['yas'])
data[['yas']] = ageFrame['yas'].values
data = data.drop(columns="ulke")
resultFrame = pd.concat([countryFrame,data],axis=1)
print(resultFrame)
#
x1_frame = resultFrame
y1_frame = resultFrame.iloc[:,-1]
x1_frame = x1_frame.drop(columns='cinsiyet')
print(x1_frame)
print(y1_frame)
#verilerin eğitim ve test olarak ayrılması.
x_train,x_test,y_train,y_test = \
train_test_split(x1_frame,y1_frame,test_size=0.33,random_state=0)
#veri ölçeklemesi.
sc = StandardScaler()
X_train = sc.fit_transform(x_train)
X_test = sc.fit_transform(x_test)
| 27.538462
| 91
| 0.765363
|
2e3250063a5ead30354526ffcb6555612ac9287e
| 2,975
|
py
|
Python
|
pyostie/insights_ext.py
|
rishabhjha708/Pyostie
|
47091748bc746920f386952c1a6c1002340e3224
|
[
"MIT"
] | null | null | null |
pyostie/insights_ext.py
|
rishabhjha708/Pyostie
|
47091748bc746920f386952c1a6c1002340e3224
|
[
"MIT"
] | null | null | null |
pyostie/insights_ext.py
|
rishabhjha708/Pyostie
|
47091748bc746920f386952c1a6c1002340e3224
|
[
"MIT"
] | null | null | null |
import pytesseract
from pytesseract import Output
import cv2
import pandas as pd
import numpy as np
from PIL import Image
df = pd.DataFrame()
class generate_insights:
def __init__(self, filename, data):
"""
Parameters
----------
filename : The file that needs to be processed.
data : Dataframe to process the insights.
"""
self.file = filename
self.data = data
def generate_df(self):
"""
Returns
-------
A dataframe of insights with word coordinates, para num, line num,
word num, word height, word width, image height, image width etc.,
"""
top_plus_height = []
left_plus_width = []
img = cv2.imread(self.file)
image = Image.open(self.file)
w, h = image.size
d = pytesseract.image_to_data(img, output_type=Output.DICT)
self.data = self.data.assign(**d)
self.data.replace("", np.NaN, inplace=True)
self.data.replace(" ", np.NaN, inplace=True)
self.data.dropna(subset=["text"], inplace=True)
self.data = self.data.reset_index()
self.data = self.data.drop(["index", "block_num", "level"], 1)
image_width = [w] * len(self.data)
image_height = [h] * len(self.data)
self.data["conf"] = [i / 100 for i in self.data["conf"]]
self.data["image_width"] = image_width
self.data["image_height"] = image_height
for val in range(len(self.data)):
output = self.data["left"][val] + self.data["width"][val]
left_plus_width.append(output)
for val in range(len(self.data)):
output = self.data["top"][val] + self.data["height"][val]
top_plus_height.append(output)
self.data["top_plus_height"] = top_plus_height
self.data["left_plus_width"] = left_plus_width
self.data['topLeft'] = tuple(self.data[['left', 'top']].
apply(lambda x: ','.join(x.fillna('').map(str)), axis=1))
self.data['bottomLeft'] = tuple(self.data[['left', 'top_plus_height']].
apply(lambda x: ','.join(x.fillna('').map(str)), axis=1))
self.data['bottomRight'] = tuple(self.data[['left_plus_width', 'top_plus_height']].
apply(lambda x: ','.join(x.fillna('').map(str)), axis=1))
self.data['topRight'] = tuple(self.data[['left_plus_width', 'top']].
apply(lambda x: ','.join(x.fillna('').map(str)), axis=1))
self.data['topLeft'] = self.data['topLeft'].str.strip(',')
self.data['bottomLeft'] = self.data['bottomLeft'].str.strip(',')
self.data['bottomRight'] = self.data['bottomRight'].str.strip(',')
self.data['topRight'] = self.data['topRight'].str.strip(',')
self.data = self.data.drop(["left_plus_width", "top_plus_height"], 1)
return self.data
| 39.666667
| 98
| 0.563697
|
554ee05c0434555daadd3669c898381d0aace1b2
| 423
|
py
|
Python
|
dronekarta/wsgi.py
|
tlubenov/dronekarta
|
e743f0150ca25e55f1304ef56b2e3b708f9cf04b
|
[
"MIT"
] | null | null | null |
dronekarta/wsgi.py
|
tlubenov/dronekarta
|
e743f0150ca25e55f1304ef56b2e3b708f9cf04b
|
[
"MIT"
] | null | null | null |
dronekarta/wsgi.py
|
tlubenov/dronekarta
|
e743f0150ca25e55f1304ef56b2e3b708f9cf04b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
WSGI config for dronekarta project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dronekarta.settings")
application = get_wsgi_application()
| 22.263158
| 78
| 0.765957
|
72e04a6bc569e6c2d4c7daa90e8c5aae06ef42e0
| 8,191
|
py
|
Python
|
ppfa/selenium_tests/migrations/0005_auto__add_ppfateststeptype.py
|
Goldcap/django-selenium-testing
|
ad6fc09eb4fd8a54d0532c4478add0753ead1d96
|
[
"MIT"
] | null | null | null |
ppfa/selenium_tests/migrations/0005_auto__add_ppfateststeptype.py
|
Goldcap/django-selenium-testing
|
ad6fc09eb4fd8a54d0532c4478add0753ead1d96
|
[
"MIT"
] | null | null | null |
ppfa/selenium_tests/migrations/0005_auto__add_ppfateststeptype.py
|
Goldcap/django-selenium-testing
|
ad6fc09eb4fd8a54d0532c4478add0753ead1d96
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PpfaTestStepType'
db.create_table(u'selenium_tests_ppfateststeptype', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'selenium_tests', ['PpfaTestStepType'])
def backwards(self, orm):
# Deleting model 'PpfaTestStepType'
db.delete_table(u'selenium_tests_ppfateststeptype')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'selenium_tests.ppfatest': {
'Meta': {'object_name': 'PpfaTest'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_run': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'status': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'selenium_tests.ppfatestassertion': {
'Meta': {'object_name': 'PpfaTestAssertion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ppfa_test': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ppfa_test_assertion_test'", 'to': u"orm['selenium_tests.PpfaTest']"}),
'ppfa_test_run': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ppfa_test_assertion_run'", 'to': u"orm['selenium_tests.PpfaTestRun']"}),
'status': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'selenium_tests.ppfatestrequest': {
'Meta': {'object_name': 'PpfaTestRequest'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ppfa_test': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ppfa_test_queue_test'", 'to': u"orm['selenium_tests.PpfaTest']"}),
'request_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'selenium_tests.ppfatestrun': {
'Meta': {'object_name': 'PpfaTestRun'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ppfa_test': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ppfa_test_runs'", 'to': u"orm['selenium_tests.PpfaTest']"}),
'status': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'selenium_tests.ppfateststep': {
'Meta': {'object_name': 'PpfaTestStep'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ppfa_test': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ppfa_test_steps'", 'to': u"orm['selenium_tests.PpfaTest']"}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'selenium_tests.ppfateststeptype': {
'Meta': {'object_name': 'PpfaTestStepType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'selenium_tests.profile': {
'Approved': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'DateSubmitted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 3, 0, 0)', 'null': 'True', 'blank': 'True'}),
'FirstName': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'LastName': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'Meta': {'object_name': 'Profile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['selenium_tests']
| 70.008547
| 195
| 0.575266
|
f269c06374fe0321ab849c15b38a370061717143
| 8,673
|
py
|
Python
|
redock/base.py
|
xolox/python-redock
|
12f8282d7e6199468992c5ee558513cc2a5cc536
|
[
"MIT"
] | 8
|
2015-05-20T23:06:54.000Z
|
2020-12-03T03:45:23.000Z
|
redock/base.py
|
xolox/python-redock
|
12f8282d7e6199468992c5ee558513cc2a5cc536
|
[
"MIT"
] | null | null | null |
redock/base.py
|
xolox/python-redock
|
12f8282d7e6199468992c5ee558513cc2a5cc536
|
[
"MIT"
] | 2
|
2016-06-21T20:44:09.000Z
|
2017-08-29T21:11:49.000Z
|
# Initialization of the base image used by Redock.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: September 30, 2013
# URL: https://github.com/xolox/python-redock
"""
The :py:mod:`redock.base` module implements the initialization of the base
image used by Redock. You'll probably never need to use this module directly
because :py:func:`redock.api.Container.start()` calls
:py:func:`find_base_image()` and :py:func:`create_base_image()` as needed.
"""
# Standard library modules.
import pipes
# External dependencies.
from humanfriendly import Timer
from verboselogs import VerboseLogger
# Modules included in our package.
from redock.utils import (RemoteTerminal, get_ssh_public_key,
select_ubuntu_mirror, summarize_id)
# Initialize a logger for this module.
logger = VerboseLogger(__name__)
# The repository and tag of Redock's base image.
BASE_IMAGE_REPO = 'redock'
BASE_IMAGE_TAG = 'base'
BASE_IMAGE_NAME = '%s:%s' % (BASE_IMAGE_REPO, BASE_IMAGE_TAG)
SSHD_LOG_FILE = '/var/log/sshd.log'
APT_CONFIG = '''
# /etc/apt/apt.conf.d/90redock:
# Disable automatic installation of recommended packages. Debian doesn't do
# this; the policy change came from Ubuntu, and I don't like it one bit!
# Fortunately we can still change it :-)
APT::Install-Recommends "false";
# vim: ft=perl
'''
SOURCES_LIST = '''
# /etc/apt/sources.list: Use a local package mirror.
deb {mirror} precise main universe
# vim: ft=debsources
'''
SUPERVISOR_CONFIG = '''
# /etc/supervisor/conf.d/ssh-server.conf:
# Replacement for /etc/init/ssh.conf that doesn't need upstart.
[program:ssh-server]
command = bash -c 'mkdir -p -m0755 /var/run/sshd && /usr/sbin/sshd -eD'
# stdout_logfile = /var/log/supervisor/ssh-server.log
# redirect_stderr = true
autorestart = true
# vim: ft=dosini
'''.format(log_file=SSHD_LOG_FILE)
def find_base_image(client):
"""
Find the id of the base image that's used by Redock to create new
containers. If the image doesn't exist yet it will be created using
:py:func:`create_base_image()`.
:param client: Connection to Docker (instance of :py:class:`docker.Client`)
:returns: The unique id of the base image.
"""
logger.verbose("Looking for base image ..")
image_id = find_named_image(client, BASE_IMAGE_REPO, BASE_IMAGE_TAG)
if image_id:
logger.verbose("Found base image: %s", summarize_id(image_id))
return image_id
else:
logger.verbose("No base image found, creating it ..")
return create_base_image(client)
def create_base_image(client):
"""
Create the base image that's used by Redock to create new containers. This
base image differs from the ubuntu:precise_ image (on which it is based) on
a couple of points:
- Automatic installation of recommended packages is disabled to conserve
disk space.
- The Ubuntu package mirror is set to a geographically close location to
speed up downloading of system packages (see
:py:func:`redock.utils.select_ubuntu_mirror()`).
- The package list is updated to make sure apt-get_ installs the most up to
date packages.
- The following system packages are installed:
language-pack-en-base_
In a base Docker Ubuntu 12.04 image lots of commands complain loudly
about the locale_. This silences the warnings by fixing the problem
(if you want to call it that).
openssh-server_
After creating a new container Redock will connect to it over SSH_,
so having an SSH server installed is a good start :-)
supervisor_
The base Docker Ubuntu 12.04 image has init_ (upstart_) disabled.
Indeed we don't need all of the bagage that comes with init but it is
nice to have a process runner for the SSH_ server (and eventually maybe
more).
- The initscripts_ and upstart_ system packages are marked 'on hold' so
that apt-get_ will not upgrade them. This makes it possible to run
``apt-get dist-upgrade`` inside containers.
- An SSH_ key pair is generated and the SSH public key is installed inside
the base image so that Redock can connect to the container over SSH (you
need ssh-keygen_ installed).
- Supervisor_ is configured to automatically start the SSH_ server.
:param client: Connection to Docker (instance of :py:class:`docker.Client`)
:returns: The unique id of the base image.
.. _apt-get: http://manpages.ubuntu.com/manpages/precise/man8/apt-get.8.html
.. _init: http://manpages.ubuntu.com/manpages/precise/man8/init.8.html
.. _initscripts: http://packages.ubuntu.com/precise/initscripts
.. _language-pack-en-base: http://packages.ubuntu.com/precise/language-pack-en-base
.. _locale: http://en.wikipedia.org/wiki/Locale
.. _openssh-server: http://packages.ubuntu.com/precise/openssh-server
.. _ssh-keygen: http://manpages.ubuntu.com/manpages/precise/man1/ssh-keygen.1.html
.. _supervisor: http://packages.ubuntu.com/precise/supervisor
.. _ubuntu:precise: https://index.docker.io/_/ubuntu/
.. _upstart: http://packages.ubuntu.com/precise/upstart
"""
download_image(client, 'ubuntu', 'precise')
creation_timer = Timer()
logger.info("Initializing base image (this can take a few minutes but you only have to do it once) ..")
command = ' && '.join([
'echo %s > /etc/apt/apt.conf.d/90redock' % pipes.quote(APT_CONFIG.strip()),
'echo %s > /etc/apt/sources.list' % pipes.quote(SOURCES_LIST.format(mirror=select_ubuntu_mirror()).strip()),
'apt-get update',
'DEBIAN_FRONTEND=noninteractive apt-get install -q -y language-pack-en-base openssh-server supervisor',
'apt-get clean', # Don't keep the +/- 20 MB of *.deb archives after installation.
# Make it possible to run `apt-get dist-upgrade'.
# https://help.ubuntu.com/community/PinningHowto#Introduction_to_Holding_Packages
'apt-mark hold initscripts upstart',
# Install the generated SSH public key.
'mkdir -p /root/.ssh',
'echo %s > /root/.ssh/authorized_keys' % pipes.quote(get_ssh_public_key()),
# Create the Supervisor configuration for the SSH server.
'echo %s > /etc/supervisor/conf.d/ssh-server.conf' % pipes.quote(SUPERVISOR_CONFIG.strip())])
logger.debug("Generated command line: %s", command)
result = client.create_container(image='ubuntu:precise',
command='bash -c %s' % pipes.quote(command),
hostname='redock-template',
ports=['22'])
container_id = result['Id']
for text in result.get('Warnings', []):
logger.warn("%s", text)
logger.verbose("Created container %s.", summarize_id(container_id))
client.start(container_id)
with RemoteTerminal(container_id):
logger.info("Waiting for initialization to finish ..")
client.wait(container_id)
logger.info("Finished initialization in %s.", creation_timer)
commit_timer = Timer()
logger.info("Saving initialized container as new base image ..")
result = client.commit(container_id, repository='redock', tag='base')
logger.info("Done! Committed base image as %s in %s.", summarize_id(result['Id']), commit_timer)
return result['Id']
def find_named_image(client, repository, tag):
"""
Find the most recent Docker image with the given repository and tag.
:param repository: The name of the image's repository.
:param tag: The name of the image's tag.
:returns: The unique id of the most recent image available, or ``None`` if
no images were matched.
"""
matches = []
for image in client.images():
if image.get('Repository') == repository and image.get('Tag') == tag:
matches.append(image)
if matches:
matches.sort(key=lambda i: i['Created'])
return matches[-1]['Id']
def download_image(client, repository, tag):
"""
Download the requested image. If the image is already available locally it
won't be downloaded again.
:param client: Connection to Docker (instance of :py:class:`docker.Client`)
:param repository: The name of the image's repository.
:param tag: The name of the image's tag.
"""
if not find_named_image(client, repository, tag):
download_timer = Timer()
logger.info("Downloading image %s:%s (please be patient, this can take a while) ..", repository, tag)
client.pull(repository=repository, tag=tag)
logger.info("Finished downloading image in %s.", download_timer)
# vim: ts=4 sw=4 et
| 41.104265
| 116
| 0.688113
|
62a5ed2e1a57aad92c4658a03f54aeb7f07e7fc2
| 19,724
|
py
|
Python
|
tests/test_setup_chronos_job.py
|
jackchi/paasta
|
0899adcef43cb07c247a36f5af82f09bb6f8db12
|
[
"Apache-2.0"
] | 1
|
2019-05-07T12:01:48.000Z
|
2019-05-07T12:01:48.000Z
|
tests/test_setup_chronos_job.py
|
jackchi/paasta
|
0899adcef43cb07c247a36f5af82f09bb6f8db12
|
[
"Apache-2.0"
] | 4
|
2021-02-08T20:42:08.000Z
|
2021-06-02T00:51:04.000Z
|
tests/test_setup_chronos_job.py
|
eric-erki/An-open-distributed-platform-as-a-service
|
6769c5601685deb1017910ab8d09109e8e998892
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import mock
from pysensu_yelp import Status
from pytest import raises
from paasta_tools import chronos_tools
from paasta_tools import setup_chronos_job
from paasta_tools.utils import compose_job_id
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import NoDeploymentsAvailable
class TestSetupChronosJob:
fake_docker_image = 'test_docker:1.0'
fake_client = mock.MagicMock()
fake_service = 'test_service'
fake_instance = 'test'
fake_cluster = 'fake_test_cluster'
fake_config_dict = {
'name': 'test_service test gitsha config',
'description': 'This is a test Chronos job.',
'command': '/bin/sleep 40',
'bounce_method': 'graceful',
'epsilon': 'PT30M',
'retries': 5,
'owner': 'test@test.com',
'async': False,
'cpus': 5.5,
'mem': 1024.4,
'disk': 2048.5,
'disabled': 'true',
'schedule': 'R/2015-03-25T19:36:35Z/PT5M',
'schedule_time_zone': 'Zulu',
}
fake_branch_dict = {
'docker_image': f'paasta-{fake_service}-{fake_cluster}',
'git_sha': 'fake_sha',
'force_bounce': None,
'desired_state': 'start',
}
fake_chronos_job_config = chronos_tools.ChronosJobConfig(
service=fake_service,
cluster=fake_cluster,
instance=fake_instance,
config_dict=fake_config_dict,
branch_dict=fake_branch_dict,
)
fake_docker_registry = 'remote_registry.com'
fake_args = mock.MagicMock(
service_instance=compose_job_id(fake_service, fake_instance),
soa_dir='no_more',
verbose=False,
)
def test_config_with_historical_stats(self):
with mock.patch(
'paasta_tools.setup_chronos_job.chronos_tools.lookup_chronos_jobs',
autospec=True,
) as mock_lookup_chronos_jobs:
ret = [{
'lastSuccess': '2017-04-01T00:00:00Z',
'lastError': '2017-04-02T00:00:00Z',
'successCount': 1,
'errorCount': 1,
}]
mock_lookup_chronos_jobs.return_value = ret
init_config = {
'name': 'foo bar',
}
expected_merge = {
'name': 'foo bar',
'lastSuccess': '2017-04-01T00:00:00Z',
'lastError': '2017-04-02T00:00:00Z',
'successCount': 1,
'errorCount': 1,
}
actual = setup_chronos_job.config_with_historical_stats(
chronos_client=mock.Mock(),
service='foo',
instance='bar',
job_config=init_config,
)
assert actual == expected_merge
def test_config_with_historical_stats_no_existing(self):
with mock.patch(
'paasta_tools.setup_chronos_job.chronos_tools.lookup_chronos_jobs',
autospec=True,
) as mock_lookup_chronos_jobs:
ret = []
mock_lookup_chronos_jobs.return_value = ret
init_config = {
'name': 'foo bar',
}
expected_merge = {
'name': 'foo bar',
}
actual = setup_chronos_job.config_with_historical_stats(
chronos_client=mock.Mock(),
service='foo',
instance='bar',
job_config=init_config,
)
assert actual == expected_merge
def test_main_success(self):
expected_status = 0
expected_output = 'it_is_finished'
fake_complete_job_config = {'foo': 'bar'}
with mock.patch(
'paasta_tools.setup_chronos_job.parse_args',
return_value=self.fake_args,
autospec=True,
) as parse_args_patch, mock.patch(
'paasta_tools.chronos_tools.load_chronos_config', autospec=True,
) as load_chronos_config_patch, mock.patch(
'paasta_tools.chronos_tools.get_chronos_client',
return_value=self.fake_client,
autospec=True,
) as get_client_patch, mock.patch(
'paasta_tools.chronos_tools.create_complete_config',
return_value=fake_complete_job_config,
autospec=True,
), mock.patch(
'paasta_tools.setup_chronos_job.setup_job',
return_value=(expected_status, expected_output),
autospec=True,
) as setup_job_patch, mock.patch(
'paasta_tools.setup_chronos_job.send_event', autospec=True,
) as send_event_patch, mock.patch(
'paasta_tools.setup_chronos_job.load_system_paasta_config', autospec=True,
) as load_system_paasta_config_patch, mock.patch(
'sys.exit', autospec=True,
) as sys_exit_patch:
load_system_paasta_config_patch.return_value.get_cluster = mock.MagicMock(return_value=self.fake_cluster)
setup_chronos_job.main()
parse_args_patch.assert_called_once_with()
get_client_patch.assert_called_once_with(load_chronos_config_patch.return_value)
setup_job_patch.assert_called_once_with(
service=self.fake_service,
instance=self.fake_instance,
complete_job_config=fake_complete_job_config,
client=self.fake_client,
cluster=self.fake_cluster,
)
send_event_patch.assert_called_once_with(
service=self.fake_service,
instance=self.fake_instance,
soa_dir=self.fake_args.soa_dir,
status=expected_status,
output=expected_output,
)
sys_exit_patch.assert_called_once_with(0)
def test_main_no_deployments(self):
with mock.patch(
'paasta_tools.setup_chronos_job.parse_args',
return_value=self.fake_args,
autospec=True,
), mock.patch(
'paasta_tools.chronos_tools.load_chronos_config', autospec=True,
), mock.patch(
'paasta_tools.chronos_tools.get_chronos_client',
return_value=self.fake_client,
autospec=True,
), mock.patch(
'paasta_tools.chronos_tools.create_complete_config',
return_value={},
autospec=True,
side_effect=NoDeploymentsAvailable,
), mock.patch(
'paasta_tools.setup_chronos_job.setup_job',
return_value=(0, 'it_is_finished'),
autospec=True,
), mock.patch(
'paasta_tools.setup_chronos_job.load_system_paasta_config', autospec=True,
) as load_system_paasta_config_patch, mock.patch(
'paasta_tools.setup_chronos_job.send_event', autospec=True,
):
load_system_paasta_config_patch.return_value.get_cluster = mock.MagicMock(return_value=self.fake_cluster)
with raises(SystemExit) as excinfo:
setup_chronos_job.main()
assert excinfo.value.code == 0
def test_main_bad_chronos_job_config_notifies_user(self):
with mock.patch(
'paasta_tools.setup_chronos_job.parse_args',
return_value=self.fake_args,
autospec=True,
), mock.patch(
'paasta_tools.chronos_tools.load_chronos_config', autospec=True,
), mock.patch(
'paasta_tools.chronos_tools.get_chronos_client',
return_value=self.fake_client,
autospec=True,
), mock.patch(
'paasta_tools.chronos_tools.create_complete_config',
autospec=True,
side_effect=NoConfigurationForServiceError('test bad configuration'),
), mock.patch(
'paasta_tools.setup_chronos_job.setup_job',
return_value=(0, 'it_is_finished'),
autospec=True,
), mock.patch(
'paasta_tools.setup_chronos_job.load_system_paasta_config', autospec=True,
) as load_system_paasta_config_patch, mock.patch(
'paasta_tools.setup_chronos_job.send_event', autospec=True,
) as send_event_patch:
load_system_paasta_config_patch.return_value.get_cluster = mock.MagicMock(return_value=self.fake_cluster)
with raises(SystemExit) as excinfo:
setup_chronos_job.main()
assert excinfo.value.code == 0
expected_error_msg = (
"Could not read chronos configuration file for %s in cluster %s\nError was: test bad configuration"
% (compose_job_id(self.fake_service, self.fake_instance), self.fake_cluster)
)
send_event_patch.assert_called_once_with(
service=self.fake_service,
instance=self.fake_instance,
soa_dir=self.fake_args.soa_dir,
status=Status.CRITICAL,
output=expected_error_msg,
)
def test_setup_job_new_app_with_no_previous_jobs(self):
fake_existing_jobs = []
with mock.patch(
'paasta_tools.setup_chronos_job.bounce_chronos_job', autospec=True, return_value=(0, 'ok'),
) as mock_bounce_chronos_job, mock.patch(
'paasta_tools.chronos_tools.lookup_chronos_jobs',
autospec=True,
), mock.patch(
'paasta_tools.chronos_tools.sort_jobs',
autospec=True,
return_value=fake_existing_jobs,
), mock.patch(
'paasta_tools.utils.load_system_paasta_config', autospec=True,
), mock.patch(
'paasta_tools.chronos_tools.load_system_paasta_config', autospec=True,
) as load_system_paasta_config_patch, mock.patch(
'paasta_tools.chronos_tools.load_chronos_job_config',
autospec=True,
return_value=self.fake_chronos_job_config,
):
load_system_paasta_config_patch.return_value.get_cluster.return_value = self.fake_cluster
load_system_paasta_config_patch.return_value.get_volumes.return_value = []
load_system_paasta_config_patch.return_value.get_deploy_whitelist.return_value = None
load_system_paasta_config_patch.return_value.get_dockercfg_location.return_value = \
'file:///root/.dockercfg'
complete_config = chronos_tools.create_complete_config(
service=self.fake_service,
job_name=self.fake_instance,
soa_dir=self.fake_args.soa_dir,
)
actual = setup_chronos_job.setup_job(
service=self.fake_service,
instance=self.fake_instance,
complete_job_config=complete_config,
client=self.fake_client,
cluster=self.fake_cluster,
)
mock_bounce_chronos_job.assert_called_once_with(
service=self.fake_service,
instance=self.fake_instance,
cluster=self.fake_cluster,
job_to_update=complete_config,
client=self.fake_client,
)
assert actual == mock_bounce_chronos_job.return_value
def test_setup_job_with_previously_enabled_job(self):
fake_existing_job = {
'name': 'fake_job',
'disabled': False,
}
with mock.patch(
'paasta_tools.setup_chronos_job.bounce_chronos_job', autospec=True, return_value=(0, 'ok'),
) as mock_bounce_chronos_job, mock.patch(
'paasta_tools.chronos_tools.lookup_chronos_jobs',
autospec=True,
) as mock_lookup_chronos_jobs, mock.patch(
'paasta_tools.chronos_tools.sort_jobs',
autospec=True,
return_value=[fake_existing_job],
), mock.patch(
'paasta_tools.utils.load_system_paasta_config', autospec=True,
), mock.patch(
'paasta_tools.chronos_tools.load_system_paasta_config', autospec=True,
) as load_system_paasta_config_patch, mock.patch(
'paasta_tools.chronos_tools.load_chronos_job_config',
autospec=True, return_value=self.fake_chronos_job_config,
):
load_system_paasta_config_patch.return_value.get_cluster.return_value = self.fake_cluster
load_system_paasta_config_patch.return_value.get_volumes.return_value = []
load_system_paasta_config_patch.return_value.get_deploy_whitelist.return_value = None
load_system_paasta_config_patch.return_value.get_dockercfg_location.return_value = \
"file:///root/.dockercfg"
complete_config = chronos_tools.create_complete_config(
service=self.fake_service,
job_name=self.fake_instance,
soa_dir=self.fake_args.soa_dir,
)
actual = setup_chronos_job.setup_job(
service=self.fake_service,
instance=self.fake_instance,
complete_job_config=complete_config,
client=self.fake_client,
cluster=self.fake_cluster,
)
mock_bounce_chronos_job.assert_called_once_with(
service=self.fake_service,
instance=self.fake_instance,
cluster=self.fake_cluster,
job_to_update=complete_config,
client=self.fake_client,
)
assert mock_lookup_chronos_jobs.called
assert actual == mock_bounce_chronos_job.return_value
def test_setup_job_does_nothing_with_only_existing_app(self):
fake_existing_job = copy.deepcopy(self.fake_config_dict)
with mock.patch(
'paasta_tools.setup_chronos_job.bounce_chronos_job', autospec=True, return_value=(0, 'ok'),
) as mock_bounce_chronos_job, mock.patch(
'paasta_tools.chronos_tools.lookup_chronos_jobs',
autospec=True, return_value=[fake_existing_job],
) as mock_lookup_chronos_jobs, mock.patch(
'paasta_tools.chronos_tools.load_system_paasta_config', autospec=True,
) as load_system_paasta_config_patch, mock.patch(
'paasta_tools.chronos_tools.load_chronos_job_config',
autospec=True, return_value=self.fake_chronos_job_config,
):
load_system_paasta_config_patch.return_value.get_cluster.return_value = self.fake_cluster
complete_config = copy.deepcopy(self.fake_config_dict)
# Force the complete_config's name to match the return value of
# lookup_chronos_jobs to simulate that they have the same name
complete_config["name"] = fake_existing_job["name"]
actual = setup_chronos_job.setup_job(
service=self.fake_service,
instance=self.fake_instance,
complete_job_config=complete_config,
client=self.fake_client,
cluster=self.fake_cluster,
)
mock_bounce_chronos_job.assert_called_once_with(
service=self.fake_service,
instance=self.fake_instance,
cluster=self.fake_cluster,
job_to_update=None,
client=self.fake_client,
)
assert mock_lookup_chronos_jobs.called
assert actual == mock_bounce_chronos_job.return_value
def test_send_event(self):
fake_status = '42'
fake_output = 'something went wrong'
fake_soa_dir = ''
expected_check_name = 'setup_chronos_job.%s' % compose_job_id(self.fake_service, self.fake_instance)
with mock.patch(
"paasta_tools.monitoring_tools.send_event", autospec=True,
) as mock_send_event, mock.patch(
"paasta_tools.chronos_tools.load_chronos_job_config", autospec=True,
) as mock_load_chronos_job_config, mock.patch(
"paasta_tools.setup_chronos_job.load_system_paasta_config", autospec=True,
) as mock_load_system_paasta_config:
mock_load_system_paasta_config.return_value.get_cluster = mock.Mock(return_value='fake_cluster')
mock_load_chronos_job_config.return_value.get_monitoring.return_value = {}
setup_chronos_job.send_event(
service=self.fake_service,
instance=self.fake_instance,
soa_dir=fake_soa_dir,
status=fake_status,
output=fake_output,
)
mock_send_event.assert_called_once_with(
service=self.fake_service,
check_name=expected_check_name,
overrides={'alert_after': '10m', 'check_every': '10s'},
status=fake_status,
output=fake_output,
soa_dir=fake_soa_dir,
)
mock_load_chronos_job_config.assert_called_once_with(
service=self.fake_service,
instance=self.fake_instance,
cluster=mock_load_system_paasta_config.return_value.get_cluster.return_value,
soa_dir=fake_soa_dir,
load_deployments=False,
)
def test_bounce_chronos_job_takes_actions(self):
fake_job_to_update = {'name': 'job_to_update'}
with mock.patch(
"paasta_tools.setup_chronos_job._log", autospec=True,
) as mock_log, mock.patch(
"paasta_tools.chronos_tools.update_job", autospec=True,
) as mock_update_job:
setup_chronos_job.bounce_chronos_job(
service=self.fake_service,
instance=self.fake_instance,
cluster=self.fake_cluster,
job_to_update=fake_job_to_update,
client=self.fake_client,
)
mock_log.assert_any_call(
line=mock.ANY,
level='debug',
instance=self.fake_instance,
cluster=self.fake_cluster,
component='deploy',
service=self.fake_service,
)
mock_log.assert_any_call(
line="Updated Chronos job: job_to_update",
level='event',
instance=self.fake_instance,
cluster=self.fake_cluster,
component='deploy',
service=self.fake_service,
)
mock_update_job.assert_called_once_with(job=fake_job_to_update, client=self.fake_client)
def test_bounce_chronos_job_doesnt_log_when_nothing_to_do(self):
with mock.patch(
"paasta_tools.setup_chronos_job._log", autospec=True,
) as mock_log, mock.patch(
"paasta_tools.chronos_tools.update_job", autospec=True,
) as mock_update_job:
setup_chronos_job.bounce_chronos_job(
service=self.fake_service,
instance=self.fake_instance,
cluster=self.fake_cluster,
job_to_update=None,
client=self.fake_client,
)
assert not mock_log.called
assert not mock_update_job.called
| 42.971678
| 117
| 0.625482
|
2a17006a7c407c682d9ebca0702c3c2ac2e5dd44
| 5,924
|
py
|
Python
|
pandas/tests/base/test_misc.py
|
BhavarthShah/pandas
|
efb068f25b911ff3009d5692eb831df35bb042e5
|
[
"BSD-3-Clause"
] | 1
|
2021-09-17T14:27:25.000Z
|
2021-09-17T14:27:25.000Z
|
pandas/tests/base/test_misc.py
|
BhavarthShah/pandas
|
efb068f25b911ff3009d5692eb831df35bb042e5
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/base/test_misc.py
|
BhavarthShah/pandas
|
efb068f25b911ff3009d5692eb831df35bb042e5
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import numpy as np
import pytest
from pandas.compat import PYPY
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
)
import pandas as pd
from pandas import DataFrame, Index, IntervalIndex, Series
@pytest.mark.parametrize(
"op_name, op",
[
("add", "+"),
("sub", "-"),
("mul", "*"),
("mod", "%"),
("pow", "**"),
("truediv", "/"),
("floordiv", "//"),
],
)
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_binary_ops_docstring(klass, op_name, op):
# not using the all_arithmetic_functions fixture with _get_opstr
# as _get_opstr is used internally in the dynamic implementation of the docstring
operand1 = klass.__name__.lower()
operand2 = "other"
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
def test_none_comparison(series_with_simple_index):
series = series_with_simple_index
if isinstance(series.index, IntervalIndex):
# IntervalIndex breaks on "series[0] = np.nan" below
pytest.skip("IntervalIndex doesn't support assignment")
if len(series) < 1:
pytest.skip("Test doesn't make sense on empty data")
# bug brought up by #1079
# changed from TypeError in 0.17.0
series[0] = np.nan
# noinspection PyComparisonWithNone
result = series == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = series != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == series # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != series # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(series.dtype) or is_datetime64tz_dtype(series.dtype):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
msg = "Invalid comparison"
with pytest.raises(TypeError, match=msg):
None > series
with pytest.raises(TypeError, match=msg):
series > None
else:
result = None > series
assert not result.iat[0]
assert not result.iat[1]
result = series < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(index_or_series_obj):
obj = index_or_series_obj
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(obj, p, None) is not None
# deprecated properties
for p in ["strides", "itemsize", "base", "data"]:
assert not hasattr(obj, p)
msg = "can only convert an array of size 1 to a Python scalar"
with pytest.raises(ValueError, match=msg):
obj.item() # len > 1
assert obj.ndim == 1
assert obj.size == len(obj)
assert Index([1]).item() == 1
assert Series([1]).item() == 1
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(index_or_series_obj):
obj = index_or_series_obj
res = obj.memory_usage()
res_deep = obj.memory_usage(deep=True)
is_object = is_object_dtype(obj) or (
isinstance(obj, Series) and is_object_dtype(obj.index)
)
is_categorical = is_categorical_dtype(obj.dtype) or (
isinstance(obj, Series) and is_categorical_dtype(obj.index.dtype)
)
if len(obj) == 0:
expected = 0 if isinstance(obj, Index) else 80
assert res_deep == res == expected
elif is_object or is_categorical:
# only deep will pick them up
assert res_deep > res
else:
assert res == res_deep
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(obj)
assert abs(diff) < 100
def test_memory_usage_components_series(series_with_simple_index):
series = series_with_simple_index
total_usage = series.memory_usage(index=True)
non_index_usage = series.memory_usage(index=False)
index_usage = series.index.memory_usage()
assert total_usage == non_index_usage + index_usage
def test_memory_usage_components_narrow_series(narrow_series):
series = narrow_series
total_usage = series.memory_usage(index=True)
non_index_usage = series.memory_usage(index=False)
index_usage = series.index.memory_usage()
assert total_usage == non_index_usage + index_usage
def test_searchsorted(index_or_series_obj):
# numpy.searchsorted calls obj.searchsorted under the hood.
# See gh-12238
obj = index_or_series_obj
if isinstance(obj, pd.MultiIndex):
# See gh-14833
pytest.skip("np.searchsorted doesn't work on pd.MultiIndex")
max_obj = max(obj, default=0)
index = np.searchsorted(obj, max_obj)
assert 0 <= index <= len(obj)
index = np.searchsorted(obj, max_obj, sorter=range(len(obj)))
assert 0 <= index <= len(obj)
def test_access_by_position(index):
if len(index) == 0:
pytest.skip("Test doesn't make sense on empty data")
elif isinstance(index, pd.MultiIndex):
pytest.skip("Can't instantiate Series from MultiIndex")
series = Series(index)
assert index[0] == series.iloc[0]
assert index[5] == series.iloc[5]
assert index[-1] == series.iloc[-1]
size = len(index)
assert index[-1] == index[size - 1]
msg = f"index {size} is out of bounds for axis 0 with size {size}"
with pytest.raises(IndexError, match=msg):
index[size]
msg = "single positional indexer is out-of-bounds"
with pytest.raises(IndexError, match=msg):
series.iloc[size]
| 29.919192
| 85
| 0.664247
|
055ac0c4ad64dc5c7eecefcd0848535784813a81
| 2,082
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/FCESCSWRJT_template.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/FCESCSWRJT_template.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/FCESCSWRJT_template.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FC_ESC_SW_RJT(Base):
__slots__ = ()
_SDM_NAME = 'FCESCSWRJT'
_SDM_ATT_MAP = {
'FC Header': 'fcESCSWRJT.header.fcHeader',
'FC Command': 'fcESCSWRJT.header.fcCmd',
'Reserved1': 'fcESCSWRJT.header.reserved1',
'Reserved2': 'fcESCSWRJT.header.reserved2',
'Reason Code': 'fcESCSWRJT.header.reasonCode',
'Reason Code Explanation': 'fcESCSWRJT.header.reasonExplain',
'Vendor Specific': 'fcESCSWRJT.header.vendor',
}
def __init__(self, parent):
super(FC_ESC_SW_RJT, self).__init__(parent)
@property
def FC_Header(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FC Header']))
@property
def FC_Command(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FC Command']))
@property
def Reserved1(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Reserved1']))
@property
def Reserved2(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Reserved2']))
@property
def Reason_Code(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Reason Code']))
@property
def Reason_Code_Explanation(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Reason Code Explanation']))
@property
def Vendor_Specific(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Vendor Specific']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 35.896552
| 98
| 0.708934
|
dda3b73e5bd709fa2505bd70f6aa0ca886c9f3c1
| 144
|
py
|
Python
|
statistics/dist/weibull.py
|
Sunhick/ml-tutorials
|
c1b9ee075e43263d99f85d0842e3cfa0820b4432
|
[
"MIT"
] | 1
|
2019-11-17T09:50:22.000Z
|
2019-11-17T09:50:22.000Z
|
statistics/dist/weibull.py
|
Sunhick/ml-tutorials
|
c1b9ee075e43263d99f85d0842e3cfa0820b4432
|
[
"MIT"
] | null | null | null |
statistics/dist/weibull.py
|
Sunhick/ml-tutorials
|
c1b9ee075e43263d99f85d0842e3cfa0820b4432
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
probability distributions.
"""
import sys
def main(args):
pass
if __name__ == "__main__":
main(sys.argv[1:])
| 10.285714
| 26
| 0.631944
|
d70ed7942d90b41d1369e28934a0a2f86d205c1c
| 63,700
|
py
|
Python
|
examples/rc2.py
|
thibaultfalque/pysat
|
ed60b2571101bcfd05cd26617bf66c32540cf241
|
[
"MIT"
] | null | null | null |
examples/rc2.py
|
thibaultfalque/pysat
|
ed60b2571101bcfd05cd26617bf66c32540cf241
|
[
"MIT"
] | null | null | null |
examples/rc2.py
|
thibaultfalque/pysat
|
ed60b2571101bcfd05cd26617bf66c32540cf241
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
##
## rc2.py
##
## Created on: Dec 2, 2017
## Author: Alexey S. Ignatiev
## E-mail: aignatiev@ciencias.ulisboa.pt
##
"""
===============
List of classes
===============
.. autosummary::
:nosignatures:
RC2
RC2Stratified
==================
Module description
==================
An implementation of the RC2 algorithm for solving maximum
satisfiability. RC2 stands for *relaxable cardinality constraints*
(alternatively, *soft cardinality constraints*) and represents an
improved version of the OLLITI algorithm, which was described in
[1]_ and [2]_ and originally implemented in the `MSCG MaxSAT
solver <https://reason.di.fc.ul.pt/wiki/doku.php?id=mscg>`_.
Initially, this solver was supposed to serve as an example of a possible
PySAT usage illustrating how a state-of-the-art MaxSAT algorithm could be
implemented in Python and still be efficient. It participated in the
`MaxSAT Evaluations 2018
<https://maxsat-evaluations.github.io/2018/rankings.html>`_ and `2019
<https://maxsat-evaluations.github.io/2019/rankings.html>`_ where,
surprisingly, it was ranked first in two complete categories: *unweighted*
and *weighted*. A brief solver description can be found in [3]_. A more
detailed solver description can be found in [4]_.
.. [1] António Morgado, Carmine Dodaro, Joao Marques-Silva.
*Core-Guided MaxSAT with Soft Cardinality Constraints*. CP
2014. pp. 564-573
.. [2] António Morgado, Alexey Ignatiev, Joao Marques-Silva.
*MSCG: Robust Core-Guided MaxSAT Solving*. JSAT 9. 2014.
pp. 129-134
.. [3] Alexey Ignatiev, António Morgado, Joao Marques-Silva.
*RC2: A Python-based MaxSAT Solver*. MaxSAT Evaluation 2018.
p. 22
.. [4] Alexey Ignatiev, António Morgado, Joao Marques-Silva.
*RC2: An Efficient MaxSAT Solver*. MaxSAT Evaluation 2018.
JSAT 11. 2019. pp. 53-64
The file implements two classes: :class:`RC2` and
:class:`RC2Stratified`. The former class is the basic
implementation of the algorithm, which can be applied to a MaxSAT
formula in the :class:`.WCNF` format. The latter class
additionally implements Boolean lexicographic optimization (BLO)
[5]_ and stratification [6]_ on top of :class:`RC2`.
.. [5] Joao Marques-Silva, Josep Argelich, Ana Graça, Inês Lynce.
*Boolean lexicographic optimization: algorithms &
applications*. Ann. Math. Artif. Intell. 62(3-4). 2011.
pp. 317-343
.. [6] Carlos Ansótegui, Maria Luisa Bonet, Joel Gabàs, Jordi
Levy. *Improving WPM2 for (Weighted) Partial MaxSAT*. CP
2013. pp. 117-132
The implementation can be used as an executable (the list of
available command-line options can be shown using ``rc2.py -h``)
in the following way:
::
$ xzcat formula.wcnf.xz
p wcnf 3 6 4
1 1 0
1 2 0
1 3 0
4 -1 -2 0
4 -1 -3 0
4 -2 -3 0
$ rc2.py -vv formula.wcnf.xz
c formula: 3 vars, 3 hard, 3 soft
c cost: 1; core sz: 2; soft sz: 2
c cost: 2; core sz: 2; soft sz: 1
s OPTIMUM FOUND
o 2
v -1 -2 3
c oracle time: 0.0001
Alternatively, the algorithm can be accessed and invoked through the
standard ``import`` interface of Python, e.g.
.. code-block:: python
>>> from pysat.examples.rc2 import RC2
>>> from pysat.formula import WCNF
>>>
>>> wcnf = WCNF(from_file='formula.wcnf.xz')
>>>
>>> with RC2(wcnf) as rc2:
... for m in rc2.enumerate():
... print('model {0} has cost {1}'.format(m, rc2.cost))
model [-1, -2, 3] has cost 2
model [1, -2, -3] has cost 2
model [-1, 2, -3] has cost 2
model [-1, -2, -3] has cost 3
As can be seen in the example above, the solver can be instructed
either to compute one MaxSAT solution of an input formula, or to
enumerate a given number (or *all*) of its top MaxSAT solutions.
==============
Module details
==============
"""
#
#==============================================================================
from __future__ import print_function
import collections
import getopt
import itertools
from math import copysign
import os
from pysat.formula import CNFPlus, WCNFPlus
from pysat.card import ITotalizer
from pysat.solvers import Solver, SolverNames
import re
import six
from six.moves import range
import sys
#
#==============================================================================
class RC2(object):
"""
Implementation of the basic RC2 algorithm. Given a (weighted)
(partial) CNF formula, i.e. formula in the :class:`.WCNF`
format, this class can be used to compute a given number of
MaxSAT solutions for the input formula. :class:`RC2` roughly
follows the implementation of algorithm OLLITI [1]_ [2]_ of
MSCG and applies a few heuristics on top of it. These include
- *unsatisfiable core exhaustion* (see method :func:`exhaust_core`),
- *unsatisfiable core reduction* (see method :func:`minimize_core`),
- *intrinsic AtMost1 constraints* (see method :func:`adapt_am1`).
:class:`RC2` can use any SAT solver available in PySAT. The
default SAT solver to use is ``g3`` (see
:class:`.SolverNames`). Additionally, if Glucose is chosen,
the ``incr`` parameter controls whether to use the incremental
mode of Glucose [7]_ (turned off by default). Boolean
parameters ``adapt``, ``exhaust``, and ``minz`` control
whether or to apply detection and adaptation of intrinsic
AtMost1 constraints, core exhaustion, and core reduction.
Unsatisfiable cores can be trimmed if the ``trim`` parameter
is set to a non-zero integer. Finally, verbosity level can be
set using the ``verbose`` parameter.
.. [7] Gilles Audemard, Jean-Marie Lagniez, Laurent Simon.
*Improving Glucose for Incremental SAT Solving with
Assumptions: Application to MUS Extraction*. SAT 2013.
pp. 309-317
:param formula: (weighted) (partial) CNF formula
:param solver: SAT oracle name
:param adapt: detect and adapt intrinsic AtMost1 constraints
:param exhaust: do core exhaustion
:param incr: use incremental mode of Glucose
:param minz: do heuristic core reduction
:param trim: do core trimming at most this number of times
:param verbose: verbosity level
:type formula: :class:`.WCNF`
:type solver: str
:type adapt: bool
:type exhaust: bool
:type incr: bool
:type minz: bool
:type trim: int
:type verbose: int
"""
def __init__(self, formula, solver='g3', adapt=False, exhaust=False,
incr=False, minz=False, trim=0, verbose=0):
"""
Constructor.
"""
# saving verbosity level and other options
self.verbose = verbose
self.exhaust = exhaust
self.solver = solver
self.adapt = adapt
self.minz = minz
self.trim = trim
# clause selectors and mapping from selectors to clause ids
self.sels, self.smap, self.sall, self.s2cl, self.sneg = [], {}, [], {}, set([])
# other MaxSAT related stuff
self.topv = formula.nv
self.wght = {} # weights of soft clauses
self.sums = [] # totalizer sum assumptions
self.bnds = {} # a mapping from sum assumptions to totalizer bounds
self.tobj = {} # a mapping from sum assumptions to totalizer objects
self.cost = 0
# mappings between internal and external variables
VariableMap = collections.namedtuple('VariableMap', ['e2i', 'i2e'])
self.vmap = VariableMap(e2i={}, i2e={})
# initialize SAT oracle with hard clauses only
self.init(formula, incr=incr)
# core minimization is going to be extremely expensive
# for large plain formulas, and so we turn it off here
wght = self.wght.values()
if not formula.hard and len(self.sels) > 100000 and min(wght) == max(wght):
self.minz = False
def __del__(self):
"""
Destructor.
"""
self.delete()
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
def init(self, formula, incr=False):
"""
Initialize the internal SAT oracle. The oracle is used
incrementally and so it is initialized only once when
constructing an object of class :class:`RC2`. Given an
input :class:`.WCNF` formula, the method bootstraps the
oracle with its hard clauses. It also augments the soft
clauses with "fresh" selectors and adds them to the oracle
afterwards.
Optional input parameter ``incr`` (``False`` by default)
regulates whether or not Glucose's incremental mode [7]_
is turned on.
:param formula: input formula
:param incr: apply incremental mode of Glucose
:type formula: :class:`.WCNF`
:type incr: bool
"""
# creating a solver object
self.oracle = Solver(name=self.solver, bootstrap_with=formula.hard,
incr=incr, use_timer=True)
# adding native cardinality constraints (if any) as hard clauses
# this can be done only if the Minicard solver is in use
# this cannot be done if RC2 is run from the command line
if isinstance(formula, WCNFPlus) and formula.atms:
assert self.solver in SolverNames.minicard, \
'Only Minicard supports native cardinality constraints. Make sure you use the right type of formula.'
for atm in formula.atms:
self.oracle.add_atmost(*atm)
# adding soft clauses to oracle
for i, cl in enumerate(formula.soft):
selv = cl[0] # if clause is unit, selector variable is its literal
if len(cl) > 1:
self.topv += 1
selv = self.topv
self.s2cl[selv] = cl[:]
cl.append(-self.topv)
self.oracle.add_clause(cl)
if selv not in self.wght:
# record selector and its weight
self.sels.append(selv)
self.wght[selv] = formula.wght[i]
self.smap[selv] = i
else:
# selector is not new; increment its weight
self.wght[selv] += formula.wght[i]
# storing the set of selectors
self.sels_set = set(self.sels)
self.sall = self.sels[:]
# at this point internal and external variables are the same
for v in range(1, formula.nv + 1):
self.vmap.e2i[v] = v
self.vmap.i2e[v] = v
if self.verbose > 1:
print('c formula: {0} vars, {1} hard, {2} soft'.format(formula.nv,
len(formula.hard), len(formula.soft)))
def add_clause(self, clause, weight=None):
"""
The method for adding a new hard of soft clause to the
problem formula. Although the input formula is to be
specified as an argument of the constructor of
:class:`RC2`, adding clauses may be helpful when
*enumerating* MaxSAT solutions of the formula. This way,
the clauses are added incrementally, i.e. *on the fly*.
The clause to add can be any iterable over integer
literals. The additional integer parameter ``weight`` can
be set to meaning the the clause being added is soft
having the corresponding weight (note that parameter
``weight`` is set to ``None`` by default meaning that the
clause is hard).
:param clause: a clause to add
:param weight: weight of the clause (if any)
:type clause: iterable(int)
:type weight: int
.. code-block:: python
>>> from pysat.examples.rc2 import RC2
>>> from pysat.formula import WCNF
>>>
>>> wcnf = WCNF()
>>> wcnf.append([-1, -2]) # adding hard clauses
>>> wcnf.append([-1, -3])
>>>
>>> wcnf.append([1], weight=1) # adding soft clauses
>>> wcnf.append([2], weight=1)
>>> wcnf.append([3], weight=1)
>>>
>>> with RC2(wcnf) as rc2:
... rc2.compute() # solving the MaxSAT problem
[-1, 2, 3]
... print(rc2.cost)
1
... rc2.add_clause([-2, -3]) # adding one more hard clause
... rc2.compute() # computing another model
[-1, -2, 3]
... print(rc2.cost)
2
"""
# first, map external literals to internal literals
# introduce new variables if necessary
cl = list(map(lambda l: self._map_extlit(l), clause if not len(clause) == 2 or not type(clause[0]) == list else clause[0]))
if not weight:
if not len(clause) == 2 or not type(clause[0]) == list:
# the clause is hard, and so we simply add it to the SAT oracle
self.oracle.add_clause(cl)
else:
# this should be a native cardinality constraint,
# which can be used only together with Minicard
assert self.solver in SolverNames.minicard, \
'Only Minicard supports native cardinality constraints.'
self.oracle.add_atmost(cl, clause[1])
else:
# soft clauses should be augmented with a selector
selv = cl[0] # for a unit clause, no selector is needed
if len(cl) > 1:
self.topv += 1
selv = self.topv
self.s2cl[selv] = cl[:]
cl.append(-self.topv)
self.oracle.add_clause(cl)
if selv not in self.wght:
# record selector and its weight
self.sels.append(selv)
self.wght[selv] = weight
self.smap[selv] = len(self.sels) - 1
else:
# selector is not new; increment its weight
self.wght[selv] += weight
self.sall.append(selv)
self.sels_set.add(selv)
def delete(self):
"""
Explicit destructor of the internal SAT oracle and all the
totalizer objects creating during the solving process.
"""
if self.oracle:
self.oracle.delete()
self.oracle = None
if self.solver not in SolverNames.minicard: # for minicard, there is nothing to free
for t in six.itervalues(self.tobj):
t.delete()
def compute(self):
"""
This method can be used for computing one MaxSAT solution,
i.e. for computing an assignment satisfying all hard
clauses of the input formula and maximizing the sum of
weights of satisfied soft clauses. It is a wrapper for the
internal :func:`compute_` method, which does the job,
followed by the model extraction.
Note that the method returns ``None`` if no MaxSAT model
exists. The method can be called multiple times, each
being followed by blocking the last model. This way one
can enumerate top-:math:`k` MaxSAT solutions (this can
also be done by calling :meth:`enumerate()`).
:returns: a MaxSAT model
:rtype: list(int)
.. code-block:: python
>>> from pysat.examples.rc2 import RC2
>>> from pysat.formula import WCNF
>>>
>>> rc2 = RC2(WCNF()) # passing an empty WCNF() formula
>>> rc2.add_clause([-1, -2])
>>> rc2.add_clause([-1, -3])
>>> rc2.add_clause([-2, -3])
>>>
>>> rc2.add_clause([1], weight=1)
>>> rc2.add_clause([2], weight=1)
>>> rc2.add_clause([3], weight=1)
>>>
>>> model = rc2.compute()
>>> print(model)
[-1, -2, 3]
>>> print(rc2.cost)
2
>>> rc2.delete()
"""
# simply apply MaxSAT only once
res = self.compute_()
if res:
# extracting a model
self.model = self.oracle.get_model()
self.model = filter(lambda l: abs(l) in self.vmap.i2e, self.model)
self.model = map(lambda l: int(copysign(self.vmap.i2e[abs(l)], l)), self.model)
self.model = sorted(self.model, key=lambda l: abs(l))
return self.model
def enumerate(self, block=0):
"""
Enumerate top MaxSAT solutions (from best to worst). The
method works as a generator, which iteratively calls
:meth:`compute` to compute a MaxSAT model, blocks it
internally and returns it.
An optional parameter can be used to enforce computation of MaxSAT
models corresponding to different maximal satisfiable subsets
(MSSes) or minimal correction subsets (MCSes). To block MSSes, one
should set the ``block`` parameter to ``1``. To block MCSes, set
it to ``-1``. By the default (for blocking MaxSAT models),
``block`` is set to ``0``.
:param block: preferred way to block solutions when enumerating
:type block: int
:returns: a MaxSAT model
:rtype: list(int)
.. code-block:: python
>>> from pysat.examples.rc2 import RC2
>>> from pysat.formula import WCNF
>>>
>>> rc2 = RC2(WCNF()) # passing an empty WCNF() formula
>>> rc2.add_clause([-1, -2]) # adding clauses "on the fly"
>>> rc2.add_clause([-1, -3])
>>> rc2.add_clause([-2, -3])
>>>
>>> rc2.add_clause([1], weight=1)
>>> rc2.add_clause([2], weight=1)
>>> rc2.add_clause([3], weight=1)
>>>
>>> for model in rc2.enumerate():
... print(model, rc2.cost)
[-1, -2, 3] 2
[1, -2, -3] 2
[-1, 2, -3] 2
[-1, -2, -3] 3
>>> rc2.delete()
"""
done = False
while not done:
model = self.compute()
if model != None:
if block == 1:
# to block an MSS corresponding to the model, we add
# a clause enforcing at least one of the MSS clauses
# to be falsified next time
m, cl = set(self.oracle.get_model()), []
for selv in self.sall:
if selv in m:
# clause is satisfied
cl.append(-selv)
# next time we want to falsify one of these
# clauses, i.e. we should encode the negation
# of each of these selectors
if selv in self.s2cl and not selv in self.sneg:
self.sneg.add(selv)
for il in self.s2cl[selv]:
self.oracle.add_clause([selv, -il])
self.oracle.add_clause(cl)
elif block == -1:
# a similar (but simpler) piece of code goes here,
# to block the MCS corresponding to the model
# (this blocking is stronger than MSS blocking above)
m = set(self.oracle.get_model())
self.oracle.add_clause([l for l in filter(lambda l: -l in m, self.sall)])
else:
# here, we simply block a previous MaxSAT model
self.add_clause([-l for l in model])
yield model
else:
done = True
def compute_(self):
"""
Main core-guided loop, which iteratively calls a SAT
oracle, extracts a new unsatisfiable core and processes
it. The loop finishes as soon as a satisfiable formula is
obtained. If specified in the command line, the method
additionally calls :meth:`adapt_am1` to detect and adapt
intrinsic AtMost1 constraints before executing the loop.
:rtype: bool
"""
# trying to adapt (simplify) the formula
# by detecting and using atmost1 constraints
if self.adapt:
self.adapt_am1()
# main solving loop
while not self.oracle.solve(assumptions=self.sels + self.sums):
self.get_core()
if not self.core:
# core is empty, i.e. hard part is unsatisfiable
return False
self.process_core()
if self.verbose > 1:
print('c cost: {0}; core sz: {1}; soft sz: {2}'.format(self.cost,
len(self.core), len(self.sels) + len(self.sums)))
return True
def get_core(self):
"""
Extract unsatisfiable core. The result of the procedure is
stored in variable ``self.core``. If necessary, core
trimming and also heuristic core reduction is applied
depending on the command-line options. A *minimum weight*
of the core is computed and stored in ``self.minw``.
Finally, the core is divided into two parts:
1. clause selectors (``self.core_sels``),
2. sum assumptions (``self.core_sums``).
"""
# extracting the core
self.core = self.oracle.get_core()
if self.core:
# try to reduce the core by trimming
self.trim_core()
# and by heuristic minimization
self.minimize_core()
# the core may be empty after core minimization
if not self.core:
return
# core weight
self.minw = min(map(lambda l: self.wght[l], self.core))
# dividing the core into two parts
iter1, iter2 = itertools.tee(self.core)
self.core_sels = list(l for l in iter1 if l in self.sels_set)
self.core_sums = list(l for l in iter2 if l not in self.sels_set)
def process_core(self):
"""
The method deals with a core found previously in
:func:`get_core`. Clause selectors ``self.core_sels`` and
sum assumptions involved in the core are treated
separately of each other. This is handled by calling
methods :func:`process_sels` and :func:`process_sums`,
respectively. Whenever necessary, both methods relax the
core literals, which is followed by creating a new
totalizer object encoding the sum of the new relaxation
variables. The totalizer object can be "exhausted"
depending on the option.
"""
# updating the cost
self.cost += self.minw
# assumptions to remove
self.garbage = set()
if len(self.core_sels) != 1 or len(self.core_sums) > 0:
# process selectors in the core
self.process_sels()
# process previously introducded sums in the core
self.process_sums()
if len(self.rels) > 1:
# create a new cardunality constraint
t = self.create_sum()
# apply core exhaustion if required
b = self.exhaust_core(t) if self.exhaust else 1
if b:
# save the info about this sum and
# add its assumption literal
self.set_bound(t, b)
else:
# impossible to satisfy any of these clauses
# they must become hard
for relv in self.rels:
self.oracle.add_clause([relv])
else:
# unit cores are treated differently
# (their negation is added to the hard part)
self.oracle.add_clause([-self.core_sels[0]])
self.garbage.add(self.core_sels[0])
# remove unnecessary assumptions
self.filter_assumps()
def adapt_am1(self):
"""
Detect and adapt intrinsic AtMost1 constraints. Assume
there is a subset of soft clauses
:math:`\\mathcal{S}'\subseteq \\mathcal{S}` s.t.
:math:`\sum_{c\in\\mathcal{S}'}{c\leq 1}`, i.e. at most
one of the clauses of :math:`\\mathcal{S}'` can be
satisfied.
Each AtMost1 relationship between the soft clauses can be
detected in the following way. The method traverses all
soft clauses of the formula one by one, sets one
respective selector literal to true and checks whether
some other soft clauses are forced to be false. This is
checked by testing if selectors for other soft clauses are
unit-propagated to be false. Note that this method for
detection of AtMost1 constraints is *incomplete*, because
in general unit propagation does not suffice to test
whether or not :math:`\\mathcal{F}\wedge l_i\\models
\\neg{l_j}`.
Each intrinsic AtMost1 constraint detected this way is
handled by calling :func:`process_am1`.
"""
# literal connections
conns = collections.defaultdict(lambda: set([]))
confl = []
# prepare connections
for l1 in self.sels:
st, props = self.oracle.propagate(assumptions=[l1], phase_saving=2)
if st:
for l2 in props:
if -l2 in self.sels_set:
conns[l1].add(-l2)
conns[-l2].add(l1)
else:
# propagating this literal results in a conflict
confl.append(l1)
if confl: # filtering out unnecessary connections
ccopy = {}
confl = set(confl)
for l in conns:
if l not in confl:
cc = conns[l].difference(confl)
if cc:
ccopy[l] = cc
conns = ccopy
confl = list(confl)
# processing unit size cores
for l in confl:
self.core, self.minw = [l], self.wght[l]
self.core_sels, self.core_sums = [l], []
self.process_core()
if self.verbose > 1:
print('c unit cores found: {0}; cost: {1}'.format(len(confl),
self.cost))
nof_am1 = 0
len_am1 = []
lits = set(conns.keys())
while lits:
am1 = [min(lits, key=lambda l: len(conns[l]))]
for l in sorted(conns[am1[0]], key=lambda l: len(conns[l])):
if l in lits:
for l_added in am1[1:]:
if l_added not in conns[l]:
break
else:
am1.append(l)
# updating remaining lits and connections
lits.difference_update(set(am1))
for l in conns:
conns[l] = conns[l].difference(set(am1))
if len(am1) > 1:
# treat the new atmost1 relation
self.process_am1(am1)
nof_am1 += 1
len_am1.append(len(am1))
# updating the set of selectors
self.sels_set = set(self.sels)
if self.verbose > 1 and nof_am1:
print('c am1s found: {0}; avgsz: {1:.1f}; cost: {2}'.format(nof_am1,
sum(len_am1) / float(nof_am1), self.cost))
def process_am1(self, am1):
"""
Process an AtMost1 relation detected by :func:`adapt_am1`.
Note that given a set of soft clauses
:math:`\\mathcal{S}'` at most one of which can be
satisfied, one can immediately conclude that the formula
has cost at least :math:`|\\mathcal{S}'|-1` (assuming
*unweighted* MaxSAT). Furthermore, it is safe to replace
all clauses of :math:`\\mathcal{S}'` with a single soft
clause :math:`\sum_{c\in\\mathcal{S}'}{c}`.
Here, input parameter ``am1`` plays the role of subset
:math:`\\mathcal{S}'` mentioned above. The procedure bumps
the MaxSAT cost by ``self.minw * (len(am1) - 1)``.
All soft clauses involved in ``am1`` are replaced by a
single soft clause, which is a disjunction of the
selectors of clauses in ``am1``. The weight of the new
soft clause is set to ``self.minw``.
:param am1: a list of selectors connected by an AtMost1 constraint
:type am1: list(int)
"""
# computing am1's weight
self.minw = min(map(lambda l: self.wght[l], am1))
# pretending am1 to be a core, and the bound is its size - 1
self.core_sels, b = am1, len(am1) - 1
# incrementing the cost
self.cost += b * self.minw
# assumptions to remove
self.garbage = set()
# splitting and relaxing if needed
self.process_sels()
# new selector
self.topv += 1
selv = self.topv
self.oracle.add_clause([-l for l in self.rels] + [-selv])
# integrating the new selector
self.sels.append(selv)
self.wght[selv] = self.minw
self.smap[selv] = len(self.wght) - 1
# removing unnecessary assumptions
self.filter_assumps()
def trim_core(self):
"""
This method trims a previously extracted unsatisfiable
core at most a given number of times. If a fixed point is
reached before that, the method returns.
"""
for i in range(self.trim):
# call solver with core assumption only
# it must return 'unsatisfiable'
self.oracle.solve(assumptions=self.core)
# extract a new core
new_core = self.oracle.get_core()
if len(new_core) == len(self.core):
# stop if new core is not better than the previous one
break
# otherwise, update core
self.core = new_core
def minimize_core(self):
"""
Reduce a previously extracted core and compute an
over-approximation of an MUS. This is done using the
simple deletion-based MUS extraction algorithm.
The idea is to try to deactivate soft clauses of the
unsatisfiable core one by one while checking if the
remaining soft clauses together with the hard part of the
formula are unsatisfiable. Clauses that are necessary for
preserving unsatisfiability comprise an MUS of the input
formula (it is contained in the given unsatisfiable core)
and are reported as a result of the procedure.
During this core minimization procedure, all SAT calls are
dropped after obtaining 1000 conflicts.
"""
if self.minz and len(self.core) > 1:
self.core = sorted(self.core, key=lambda l: self.wght[l])
self.oracle.conf_budget(1000)
i = 0
while i < len(self.core):
to_test = self.core[:i] + self.core[(i + 1):]
if self.oracle.solve_limited(assumptions=to_test) == False:
self.core = to_test
else:
i += 1
def exhaust_core(self, tobj):
"""
Exhaust core by increasing its bound as much as possible.
Core exhaustion was originally referred to as *cover
optimization* in [6]_.
Given a totalizer object ``tobj`` representing a sum of
some *relaxation* variables :math:`r\in R` that augment
soft clauses :math:`\\mathcal{C}_r`, the idea is to
increase the right-hand side of the sum (which is equal to
1 by default) as much as possible, reaching a value
:math:`k` s.t. formula
:math:`\\mathcal{H}\wedge\\mathcal{C}_r\wedge(\sum_{r\in
R}{r\leq k})` is still unsatisfiable while increasing it
further makes the formula satisfiable (here
:math:`\\mathcal{H}` denotes the hard part of the
formula).
The rationale is that calling an oracle incrementally on a
series of slightly modified formulas focusing only on the
recently computed unsatisfiable core and disregarding the
rest of the formula may be practically effective.
"""
# the first case is simpler
if self.oracle.solve(assumptions=[-tobj.rhs[1]]):
return 1
else:
self.cost += self.minw
for i in range(2, len(self.rels)):
# saving the previous bound
self.tobj[-tobj.rhs[i - 1]] = tobj
self.bnds[-tobj.rhs[i - 1]] = i - 1
# increasing the bound
self.update_sum(-tobj.rhs[i - 1])
if self.oracle.solve(assumptions=[-tobj.rhs[i]]):
# the bound should be equal to i
return i
# the cost should increase further
self.cost += self.minw
return None
def process_sels(self):
"""
Process soft clause selectors participating in a new core.
The negation :math:`\\neg{s}` of each selector literal
:math:`s` participating in the unsatisfiable core is added
to the list of relaxation literals, which will be later
used to create a new totalizer object in
:func:`create_sum`.
If the weight associated with a selector is equal to the
minimal weight of the core, e.g. ``self.minw``, the
selector is marked as garbage and will be removed in
:func:`filter_assumps`. Otherwise, the clause is split as
described in [1]_.
"""
# new relaxation variables
self.rels = []
for l in self.core_sels:
if self.wght[l] == self.minw:
# marking variable as being a part of the core
# so that next time it is not used as an assump
self.garbage.add(l)
# reuse assumption variable as relaxation
self.rels.append(-l)
else:
# do not remove this variable from assumps
# since it has a remaining non-zero weight
self.wght[l] -= self.minw
# it is an unrelaxed soft clause,
# a new relaxed copy of which we add to the solver
self.topv += 1
self.oracle.add_clause([l, self.topv])
self.rels.append(self.topv)
def process_sums(self):
"""
Process cardinality sums participating in a new core.
Whenever necessary, some of the sum assumptions are
removed or split (depending on the value of
``self.minw``). Deleted sums are marked as garbage and are
dealt with in :func:`filter_assumps`.
In some cases, the process involves updating the
right-hand sides of the existing cardinality sums (see the
call to :func:`update_sum`). The overall procedure is
detailed in [1]_.
"""
for l in self.core_sums:
if self.wght[l] == self.minw:
# marking variable as being a part of the core
# so that next time it is not used as an assump
self.garbage.add(l)
else:
# do not remove this variable from assumps
# since it has a remaining non-zero weight
self.wght[l] -= self.minw
# increase bound for the sum
t, b = self.update_sum(l)
# updating bounds and weights
if b < len(t.rhs):
lnew = -t.rhs[b]
if lnew in self.garbage:
self.garbage.remove(lnew)
self.wght[lnew] = 0
if lnew not in self.wght:
self.set_bound(t, b)
else:
self.wght[lnew] += self.minw
# put this assumption to relaxation vars
self.rels.append(-l)
def create_sum(self, bound=1):
"""
Create a totalizer object encoding a cardinality
constraint on the new list of relaxation literals obtained
in :func:`process_sels` and :func:`process_sums`. The
clauses encoding the sum of the relaxation literals are
added to the SAT oracle. The sum of the totalizer object
is encoded up to the value of the input parameter
``bound``, which is set to ``1`` by default.
:param bound: right-hand side for the sum to be created
:type bound: int
:rtype: :class:`.ITotalizer`
Note that if Minicard is used as a SAT oracle, native
cardinality constraints are used instead of
:class:`.ITotalizer`.
"""
if self.solver not in SolverNames.minicard: # standard totalizer-based encoding
# new totalizer sum
t = ITotalizer(lits=self.rels, ubound=bound, top_id=self.topv)
# updating top variable id
self.topv = t.top_id
# adding its clauses to oracle
for cl in t.cnf.clauses:
self.oracle.add_clause(cl)
else:
# for minicard, use native cardinality constraints instead of the
# standard totalizer, i.e. create a new (empty) totalizer sum and
# fill it with the necessary data supported by minicard
t = ITotalizer()
t.lits = self.rels
self.topv += 1 # a new variable will represent the bound
# proper initial bound
t.rhs = [None] * (len(t.lits))
t.rhs[bound] = self.topv
# new atmostb constraint instrumented with
# an implication and represented natively
rhs = len(t.lits)
amb = [[-self.topv] * (rhs - bound) + t.lits, rhs]
# add constraint to the solver
self.oracle.add_atmost(*amb)
return t
def update_sum(self, assump):
"""
The method is used to increase the bound for a given
totalizer sum. The totalizer object is identified by the
input parameter ``assump``, which is an assumption literal
associated with the totalizer object.
The method increases the bound for the totalizer sum,
which involves adding the corresponding new clauses to the
internal SAT oracle.
The method returns the totalizer object followed by the
new bound obtained.
:param assump: assumption literal associated with the sum
:type assump: int
:rtype: :class:`.ITotalizer`, int
Note that if Minicard is used as a SAT oracle, native
cardinality constraints are used instead of
:class:`.ITotalizer`.
"""
# getting a totalizer object corresponding to assumption
t = self.tobj[assump]
# increment the current bound
b = self.bnds[assump] + 1
if self.solver not in SolverNames.minicard: # the case of standard totalizer encoding
# increasing its bound
t.increase(ubound=b, top_id=self.topv)
# updating top variable id
self.topv = t.top_id
# adding its clauses to oracle
if t.nof_new:
for cl in t.cnf.clauses[-t.nof_new:]:
self.oracle.add_clause(cl)
else: # the case of cardinality constraints represented natively
# right-hand side is always equal to the number of input literals
rhs = len(t.lits)
if b < rhs:
# creating an additional bound
if not t.rhs[b]:
self.topv += 1
t.rhs[b] = self.topv
# a new at-most-b constraint
amb = [[-t.rhs[b]] * (rhs - b) + t.lits, rhs]
self.oracle.add_atmost(*amb)
return t, b
def set_bound(self, tobj, rhs):
"""
Given a totalizer sum and its right-hand side to be
enforced, the method creates a new sum assumption literal,
which will be used in the following SAT oracle calls.
:param tobj: totalizer sum
:param rhs: right-hand side
:type tobj: :class:`.ITotalizer`
:type rhs: int
"""
# saving the sum and its weight in a mapping
self.tobj[-tobj.rhs[rhs]] = tobj
self.bnds[-tobj.rhs[rhs]] = rhs
self.wght[-tobj.rhs[rhs]] = self.minw
# adding a new assumption to force the sum to be at most rhs
self.sums.append(-tobj.rhs[rhs])
def filter_assumps(self):
"""
Filter out unnecessary selectors and sums from the list of
assumption literals. The corresponding values are also
removed from the dictionaries of bounds and weights.
Note that assumptions marked as garbage are collected in
the core processing methods, i.e. in :func:`process_core`,
:func:`process_sels`, and :func:`process_sums`.
"""
self.sels = list(filter(lambda x: x not in self.garbage, self.sels))
self.sums = list(filter(lambda x: x not in self.garbage, self.sums))
self.bnds = {l: b for l, b in six.iteritems(self.bnds) if l not in self.garbage}
self.wght = {l: w for l, w in six.iteritems(self.wght) if l not in self.garbage}
self.sels_set.difference_update(set(self.garbage))
self.garbage.clear()
def oracle_time(self):
"""
Report the total SAT solving time.
"""
return self.oracle.time_accum()
def _map_extlit(self, l):
"""
Map an external variable to an internal one if necessary.
This method is used when new clauses are added to the
formula incrementally, which may result in introducing new
variables clashing with the previously used *clause
selectors*. The method makes sure no clash occurs, i.e. it
maps the original variables used in the new problem
clauses to the newly introduced auxiliary variables (see
:func:`add_clause`).
Given an integer literal, a fresh literal is returned. The
returned integer has the same sign as the input literal.
:param l: literal to map
:type l: int
:rtype: int
"""
v = abs(l)
if v in self.vmap.e2i:
return int(copysign(self.vmap.e2i[v], l))
else:
self.topv += 1
self.vmap.e2i[v] = self.topv
self.vmap.i2e[self.topv] = v
return int(copysign(self.topv, l))
#
#==============================================================================
class RC2Stratified(RC2, object):
"""
RC2 augmented with BLO and stratification techniques. Although
class :class:`RC2` can deal with weighted formulas, there are
situations when it is necessary to apply additional heuristics
to improve the performance of the solver on weighted MaxSAT
formulas. This class extends capabilities of :class:`RC2` with
two heuristics, namely
1. Boolean lexicographic optimization (BLO) [5]_
2. stratification [6]_
There is no way to enable only one of them -- both heuristics
are applied at the same time. Except for the aforementioned
additional techniques, every other component of the solver
remains as in the base class :class:`RC2`. Therefore, a user
is referred to the documentation of :class:`RC2` for details.
"""
def __init__(self, formula, solver='g3', adapt=False, exhaust=False,
incr=False, minz=False, nohard=False, trim=0, verbose=0):
"""
Constructor.
"""
# calling the constructor for the basic version
super(RC2Stratified, self).__init__(formula, solver=solver,
adapt=adapt, exhaust=exhaust, incr=incr, minz=minz, trim=trim,
verbose=verbose)
self.levl = 0 # initial optimization level
self.blop = [] # a list of blo levels
# do clause hardening
self.hard = nohard == False
# backing up selectors
self.bckp, self.bckp_set = self.sels, self.sels_set
self.sels = []
# initialize Boolean lexicographic optimization
self.init_wstr()
def init_wstr(self):
"""
Compute and initialize optimization levels for BLO and
stratification. This method is invoked once, from the
constructor of an object of :class:`RC2Stratified`. Given
the weights of the soft clauses, the method divides the
MaxSAT problem into several optimization levels.
"""
# a mapping for stratified problem solving,
# i.e. from a weight to a list of selectors
self.wstr = collections.defaultdict(lambda: [])
for s, w in six.iteritems(self.wght):
self.wstr[w].append(s)
# sorted list of distinct weight levels
self.blop = sorted([w for w in self.wstr], reverse=True)
# diversity parameter for stratification
self.sdiv = len(self.blop) / 2.0
# number of finished levels
self.done = 0
def compute(self):
"""
This method solves the MaxSAT problem iteratively. Each
optimization level is tackled the standard way, i.e. by
calling :func:`compute_`. A new level is started by
calling :func:`next_level` and finished by calling
:func:`finish_level`. Each new optimization level
activates more soft clauses by invoking
:func:`activate_clauses`.
"""
if self.done == 0:
# it is a fresh start of the solver
# i.e. no optimization level is finished yet
# first attempt to get an optimization level
self.next_level()
while self.levl != None and self.done < len(self.blop):
# add more clauses
self.done = self.activate_clauses(self.done)
if self.verbose > 1:
print('c wght str:', self.blop[self.levl])
# call RC2
if self.compute_() == False:
return
# updating the list of distinct weight levels
self.blop = sorted([w for w in self.wstr], reverse=True)
if self.done < len(self.blop):
if self.verbose > 1:
print('c curr opt:', self.cost)
# done with this level
if self.hard:
# harden the clauses if necessary
self.finish_level()
self.levl += 1
# get another level
self.next_level()
if self.verbose > 1:
print('c')
else:
# we seem to be in the model enumeration mode
# with the first model being already computed
# i.e. all levels are finished and so all clauses are present
# thus, we need to simply call RC2 for the next model
self.done = -1 # we are done with stratification, disabling it
if self.compute_() == False:
return
# extracting a model
self.model = self.oracle.get_model()
self.model = filter(lambda l: abs(l) in self.vmap.i2e, self.model)
self.model = map(lambda l: int(copysign(self.vmap.i2e[abs(l)], l)), self.model)
self.model = sorted(self.model, key=lambda l: abs(l))
return self.model
def next_level(self):
"""
Compute the next optimization level (starting from the
current one). The procedure represents a loop, each
iteration of which checks whether or not one of the
conditions holds:
- partial BLO condition
- stratification condition
If any of these holds, the loop stops.
"""
if self.levl >= len(self.blop):
self.levl = None
return
while self.levl < len(self.blop) - 1:
# number of selectors with weight less than current weight
numc = sum([len(self.wstr[w]) for w in self.blop[(self.levl + 1):]])
# sum of their weights
sumw = sum([w * len(self.wstr[w]) for w in self.blop[(self.levl + 1):]])
# partial BLO
if self.blop[self.levl] > sumw and sumw != 0:
break
# stratification
if numc / float(len(self.blop) - self.levl - 1) > self.sdiv:
break
self.levl += 1
def activate_clauses(self, beg):
"""
This method is used for activating the clauses that belong
to optimization levels up to the newly computed level. It
also reactivates previously deactivated clauses (see
:func:`process_sels` and :func:`process_sums` for
details).
"""
end = min(self.levl + 1, len(self.blop))
for l in range(beg, end):
for sel in self.wstr[self.blop[l]]:
if sel in self.bckp_set:
self.sels.append(sel)
else:
self.sums.append(sel)
# updating set of selectors
self.sels_set = set(self.sels)
return end
def finish_level(self):
"""
This method does postprocessing of the current
optimization level after it is solved. This includes
*hardening* some of the soft clauses (depending on their
remaining weights) and also garbage collection.
"""
# assumptions to remove
self.garbage = set()
# sum of weights of the remaining levels
sumw = sum([w * len(self.wstr[w]) for w in self.blop[(self.levl + 1):]])
# trying to harden selectors and sums
for s in self.sels + self.sums:
if self.wght[s] > sumw:
self.oracle.add_clause([s])
self.garbage.add(s)
if self.verbose > 1:
print('c hardened:', len(self.garbage))
# remove unnecessary assumptions
self.filter_assumps()
def process_am1(self, am1):
"""
Due to the solving process involving multiple optimization
levels to be treated individually, new soft clauses for
the detected intrinsic AtMost1 constraints should be
remembered. The method is a slightly modified version of
the base method :func:`RC2.process_am1` taking care of
this.
"""
# computing am1's weight
self.minw = min(map(lambda l: self.wght[l], am1))
# pretending am1 to be a core, and the bound is its size - 1
self.core_sels, b = am1, len(am1) - 1
# incrementing the cost
self.cost += b * self.minw
# assumptions to remove
self.garbage = set()
# splitting and relaxing if needed
self.process_sels()
# new selector
self.topv += 1
selv = self.topv
self.oracle.add_clause([-l for l in self.rels] + [-selv])
# integrating the new selector
self.sels.append(selv)
self.wght[selv] = self.minw
self.smap[selv] = len(self.wght) - 1
# do not forget this newly selector!
self.bckp_set.add(selv)
# removing unnecessary assumptions
self.filter_assumps()
def process_sels(self):
"""
A redefined version of :func:`RC2.process_sels`. The only
modification affects the clauses whose weight after
splitting becomes less than the weight of the current
optimization level. Such clauses are deactivated and to be
reactivated at a later stage.
"""
# new relaxation variables
self.rels = []
# selectors that should be deactivated (but not removed completely)
to_deactivate = set([])
for l in self.core_sels:
if self.wght[l] == self.minw:
# marking variable as being a part of the core
# so that next time it is not used as an assump
self.garbage.add(l)
# reuse assumption variable as relaxation
self.rels.append(-l)
else:
# do not remove this variable from assumps
# since it has a remaining non-zero weight
self.wght[l] -= self.minw
# deactivate this assumption and put at a lower level
# if self.done != -1, i.e. if stratification is disabled
if self.done != -1 and self.wght[l] < self.blop[self.levl]:
self.wstr[self.wght[l]].append(l)
to_deactivate.add(l)
# it is an unrelaxed soft clause,
# a new relaxed copy of which we add to the solver
self.topv += 1
self.oracle.add_clause([l, self.topv])
self.rels.append(self.topv)
# deactivating unnecessary selectors
self.sels = list(filter(lambda x: x not in to_deactivate, self.sels))
def process_sums(self):
"""
A redefined version of :func:`RC2.process_sums`. The only
modification affects the clauses whose weight after
splitting becomes less than the weight of the current
optimization level. Such clauses are deactivated and to be
reactivated at a later stage.
"""
# sums that should be deactivated (but not removed completely)
to_deactivate = set([])
for l in self.core_sums:
if self.wght[l] == self.minw:
# marking variable as being a part of the core
# so that next time it is not used as an assump
self.garbage.add(l)
else:
# do not remove this variable from assumps
# since it has a remaining non-zero weight
self.wght[l] -= self.minw
# deactivate this assumption and put at a lower level
# if self.done != -1, i.e. if stratification is disabled
if self.done != -1 and self.wght[l] < self.blop[self.levl]:
self.wstr[self.wght[l]].append(l)
to_deactivate.add(l)
# increase bound for the sum
t, b = self.update_sum(l)
# updating bounds and weights
if b < len(t.rhs):
lnew = -t.rhs[b]
if lnew in self.garbage:
self.garbage.remove(lnew)
self.wght[lnew] = 0
if lnew not in self.wght:
self.set_bound(t, b)
else:
self.wght[lnew] += self.minw
# put this assumption to relaxation vars
self.rels.append(-l)
# deactivating unnecessary sums
self.sums = list(filter(lambda x: x not in to_deactivate, self.sums))
#
#==============================================================================
def parse_options():
"""
Parses command-line option
"""
try:
opts, args = getopt.getopt(sys.argv[1:], 'ab:c:e:hilms:t:vx',
['adapt', 'block=', 'comp=', 'enum=', 'exhaust', 'help',
'incr', 'blo', 'minimize', 'solver=', 'trim=', 'verbose',
'vnew'])
except getopt.GetoptError as err:
sys.stderr.write(str(err).capitalize())
usage()
sys.exit(1)
adapt = False
block = 'model'
exhaust = False
cmode = None
to_enum = 1
incr = False
blo = False
minz = False
solver = 'g3'
trim = 0
verbose = 1
vnew = False
for opt, arg in opts:
if opt in ('-a', '--adapt'):
adapt = True
elif opt in ('-b', '--block'):
block = str(arg)
elif opt in ('-c', '--comp'):
cmode = str(arg)
elif opt in ('-e', '--enum'):
to_enum = str(arg)
if to_enum != 'all':
to_enum = int(to_enum)
else:
to_enum = 0
elif opt in ('-h', '--help'):
usage()
sys.exit(0)
elif opt in ('-i', '--incr'):
incr = True
elif opt in ('-l', '--blo'):
blo = True
elif opt in ('-m', '--minimize'):
minz = True
elif opt in ('-s', '--solver'):
solver = str(arg)
elif opt in ('-t', '--trim'):
trim = int(arg)
elif opt in ('-v', '--verbose'):
verbose += 1
elif opt == '--vnew':
vnew = True
elif opt in ('-x', '--exhaust'):
exhaust = True
else:
assert False, 'Unhandled option: {0} {1}'.format(opt, arg)
bmap = {'mcs': -1, 'mcses': -1, 'model': 0, 'models': 0, 'mss': 1, 'msses': 1}
assert block in bmap, 'Unknown solution blocking'
block = bmap[block]
return adapt, blo, block, cmode, to_enum, exhaust, incr, minz, \
solver, trim, verbose, vnew, args
#
#==============================================================================
def usage():
"""
Prints usage message.
"""
print('Usage:', os.path.basename(sys.argv[0]), '[options] dimacs-file')
print('Options:')
print(' -a, --adapt Try to adapt (simplify) input formula')
print(' -b, --block=<string> When enumerating MaxSAT models, how to block previous solutions')
print(' Available values: mcs, model, mss (default = model)')
print(' -c, --comp=<string> Enable one of the MSE18 configurations')
print(' Available values: a, b, none (default = none)')
print(' -e, --enum=<int> Number of MaxSAT models to compute')
print(' Available values: [1 .. INT_MAX], all (default = 1)')
print(' -h, --help Show this message')
print(' -i, --incr Use SAT solver incrementally (only for g3 and g4)')
print(' -l, --blo Use BLO and stratification')
print(' -m, --minimize Use a heuristic unsatisfiable core minimizer')
print(' -s, --solver=<string> SAT solver to use')
print(' Available values: g3, g4, lgl, mcb, mcm, mpl, m22, mc, mgh (default = g3)')
print(' -t, --trim=<int> How many times to trim unsatisfiable cores')
print(' Available values: [0 .. INT_MAX] (default = 0)')
print(' -v, --verbose Be verbose')
print(' --vnew Print v-line in the new format')
print(' -x, --exhaust Exhaust new unsatisfiable cores')
#
#==============================================================================
if __name__ == '__main__':
adapt, blo, block, cmode, to_enum, exhaust, incr, minz, solver, trim, \
verbose, vnew, files = parse_options()
if files:
# parsing the input formula
if re.search('\.wcnf[p|+]?(\.(gz|bz2|lzma|xz))?$', files[0]):
formula = WCNFPlus(from_file=files[0])
else: # expecting '*.cnf[,p,+].*'
formula = CNFPlus(from_file=files[0]).weighted()
# enabling the competition mode
if cmode:
assert cmode in ('a', 'b'), 'Wrong MSE18 mode chosen: {0}'.format(cmode)
adapt, blo, exhaust, solver, verbose = True, True, True, 'g3', 3
if cmode == 'a':
trim = 5 if max(formula.wght) > min(formula.wght) else 0
minz = False
else:
trim, minz = 0, True
# trying to use unbuffered standard output
if sys.version_info.major == 2:
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
# deciding whether or not to stratify
if blo and max(formula.wght) > min(formula.wght):
MXS = RC2Stratified
else:
MXS = RC2
# starting the solver
with MXS(formula, solver=solver, adapt=adapt, exhaust=exhaust,
incr=incr, minz=minz, trim=trim, verbose=verbose) as rc2:
# disable clause hardening in case we enumerate multiple models
if isinstance(rc2, RC2Stratified) and to_enum != 1:
print('c hardening is disabled for model enumeration')
rc2.hard = False
optimum_found = False
for i, model in enumerate(rc2.enumerate(block=block), 1):
optimum_found = True
if verbose:
if i == 1:
print('s OPTIMUM FOUND')
print('o {0}'.format(rc2.cost))
if verbose > 2:
if vnew: # new format of the v-line
print('v', ''.join(str(int(l > 0)) for l in model))
else:
print('v', ' '.join([str(l) for l in model]))
if i == to_enum:
break
else:
# needed for MSE'20
if verbose > 2 and vnew and to_enum != 1 and block == 1:
print('v')
if verbose:
if not optimum_found:
print('s UNSATISFIABLE')
elif to_enum != 1:
print('c models found:', i)
if verbose > 1:
print('c oracle time: {0:.4f}'.format(rc2.oracle_time()))
| 36.863426
| 131
| 0.542339
|
d2864ce15bfb103444564b842f02244626f982fd
| 32,254
|
py
|
Python
|
hiseq/bin/piRNA_pipe_utils.py
|
bakerwm/hiseq
|
f05ed5011377350964ac57874257adb3ef53c172
|
[
"MIT"
] | 1
|
2022-03-24T14:03:22.000Z
|
2022-03-24T14:03:22.000Z
|
hiseq/bin/piRNA_pipe_utils.py
|
bakerwm/hiseq
|
f05ed5011377350964ac57874257adb3ef53c172
|
[
"MIT"
] | null | null | null |
hiseq/bin/piRNA_pipe_utils.py
|
bakerwm/hiseq
|
f05ed5011377350964ac57874257adb3ef53c172
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
piRNA analysis (small RNAseq)
date: 2020-12-23
Author: Ming Wang
# flowchart of piRNA analysis
1. remove structural RNAs (uniqe + multi)
2. remove miRNAs (unique + multi)
3. remove reads not in [23-29] nt
4. collapse reads: consider 1-23nt only, allow 1-2 at 3' differ (2019-11-26)
5. split into 4 groups: (1U+/-,10A+/-;)
6. map to TE consensus, (unique + multi), only 1U_not_10A
7. map to genome (unique + multi)
Functions:
rename fastq reads: piR0000001-0000001
piR (piRNA), (piRNA number) {reads number}
mapping, unique + multiple/unique
version: 2020-07-28
update:
1. remove temp fastq files
2. gzip fastq files
version: 2020-07-25
update:
1. collapse reads
date: 2020-07-23
in brief:
1. remove 3' adapters
2. remove structural RNAs (tRNAs, rRNAs) (unique + multiple)
3. filt by length, 23-29 nt
4. collapse reads, only compare 1-23 nt (*)
collapse reads, (regular)
trim reads to 23nt from 3' end
5. map to TE consensus (unique, multiple, unique + multiple)
6. map to piRNA clusters (unique, multiple, unique + multiple)
7. map to genome (not-TE, no-piRNAcluster)
8. Overall, map to genome
"""
import os
import sys
import re
import gzip
import fnmatch
import binascii
import shutil
import logging
import argparse
import pybedtools
import pysam
import pandas as pd
import numpy as np
from xopen import xopen
from hiseq.utils.helper import *
from hiseq.utils.seq import Fastx
################################################################################
def collapse_fx(fx_in, fx_out):
"""
collapse fastx by seq
fx_out: could be : fastq/fasta, gz
"""
outdir = os.path.dirname(fx_out)
check_path(outdir)
# check fa/fq
fx_name = os.path.basename(fx_out)
fx_name = fx_name.replace('.gz', '')
if fx_name.endswith('.fq') or fx_name.endswith('.fastq'):
fq_out = True
else:
fq_out = False
if file_exists(fx_out):
log.info('collapse_fx() skipped, file exists: {}'.format(fx_out))
else:
d = {}
with xopen(fx_in, 'rt') as r:
for n, s, q, m in readfq(r):
d[s] = d.get(s, 0) + 1
# sort by value (decreasing)
i = 0
with xopen(fx_out, 'wt') as w:
for k, v in sorted(d.items(), key=lambda item: item[1], reverse=True):
i += 1
out = '>{}-{}\n{}\n'.format(i, v, k)
if fq_out:
out = '@{}-{}\n{}\n+\n{}\n'.format(i, v, k, 'I'*len(k))
w.write(out)
return fx_out
def count_fx(fx, collapsed=False):
"""Count reads
if collapsed
parse the name: id-count
return
counts:
species:
"""
if not isinstance(fx, str):
log.warning('count_fx() skipped, fx expect str, got {}'.format(
type(fx).__name__))
else:
if not file_exists(fx):
log.warning('count_fx() skipped, fx not exists: {}'.format(fx))
else:
fname = filename(fx)
out_toml = os.path.join(os.path.dirname(fx), fname+'.fx_stat.toml')
if file_exists(out_toml):
log.info('count_fx() skipped, file exists: {}'.format(out_toml))
d = Toml().from_toml(out_toml)
else:
n_read = 0
n_species = 0
if collapsed:
with xopen(fx, 'rt') as r:
for n, _, _, _ in readfq(r):
if '-' in n:
name, i = n.split('-', 1)
i = eval(i.lstrip('0'))
else:
i = 1
n_read += i
n_species += 1
else:
n_read = n_species = Fastx(fx).number_of_seq()
# save to file
d = {
'name': fname,
'num_seqs': n_read,
'num_species': n_species
}
Toml(d).to_toml(out_toml)
# return pd.DataFrame.from_dict(d, 'columns')
return {fname: d}
def count_fx_dir(x, collapsed=False):
"""Count reads in directory: x
Concat all the counts
assume all fx files are gzipped
"""
d = {}
if isinstance(x, str):
if os.path.isdir(x):
fx_list = listfile(x, '*.gz', recursive=False)
if len(fx_list) > 0:
for fx in fx_list:
d.update(count_fx(fx, collapsed))
# save to file
out_toml = os.path.join(x, 'fx_stat.toml')
Toml(d).to_toml(out_toml)
else:
log.warning('count_fx_dir() skipped, x not a directory: {}'.format(x))
def stat_fq(fq, outdir=None):
"""
seqkit stat fq
output: fq.fq_stat.txt
"""
if isinstance(fq, str):
if not isinstance(outdir, str):
outdir = os.path.dirname(fq)
fname = fq_name(fq)
fq_stat_name = '{}.fq_stat.txt'.format(fq_name)
fq_stat_txt = os.path.join(outdir, fq_stat_name)
if outdir is None:
outdir = os.path.dirname(fq)
check_path(outdir)
# run
cmd = 'seqkit stat {} > {}'.format(fq, fq_stat_txt)
if file_exists(fq_stat_txt):
# log.info('stat_fq() skipped, file exists: {}'.format(fname))
pass
else:
# log.info('Run stat_fq(): {}'.format(fname))
os.system(cmd)
elif isinstance(fq, list):
if len(fq) > 0:
[stat_fq(i) for i in fq]
else:
log.warning('stat_fq() skipped, str, list expected, got {}'.format(
type(fq).__name__))
def stat_fq2(x, recursive=False):
"""Run seqkit stat for fastq files in x
recursive
"""
if isinstance(x, str):
if os.path.isdir(x):
fq_list = list_fq(x, recursive)
stat_fq(fq_list)
# organize
wrap_stat_fq(x, recursive)
def read_seqkit_stat(x):
"""
Read number of reads, in seqkit stat output,
eg:
file format type num_seqs sum_len min_len avg_len max_len
in.fq FASTQ DNA 100,000 2,365,144 18 23.7 36
"""
if isinstance(x, str):
if check_file(x, emptycheck=True):
df = pd.read_csv(x, delim_whitespace=True, thousands=',')
df.file = [fq_name(i) for i in df.file.to_list()]
else:
log.warning('read_seqkit_stat() skipped, empty file: {}'.format(x))
df = None
else:
log.warning('read_seqkit_stat() expect single file, skipped ...')
df = None
return df
def parse_stat_fq(x):
"""Return the seqkit stat files in directory: x
all fq_stat.txt files in the same dir of the x file
"""
s_list = [] # empty
if isinstance(x, str):
if os.path.isdir(x):
s_list = listfile(x, '*.fq_stat.txt')
# reading files
if len(s_list) > 0:
s_frames = list(map(read_seqkit_stat, s_list))
s_frames = [i for i in s_frames if i is not None]
if len(s_frames) > 0:
df = pd.concat(s_frames, axis=0)
df = df[['file', 'num_seqs']] # select columns
else:
log.warning('parse_stat_fq() skipped, empty fq_stat.txt files')
df = pd.DataFrame(columns=['file', 'num_seqs'])
else:
log.warning('parse_stat_fq() skipped, no *.fq_stat.txt found')
df = pd.DataFrame(columns=['file', 'num_seqs'])
# remove 'format' column
df.drop_duplicates(inplace=True) # fa,fq both exists,
return df
def wrap_stat_fq(x, recursive=False):
"""wrap reads stat, in directory: x
level-1: 00.raw_data/*.fq.gz
"""
if isinstance(x, str):
if os.path.isdir(x):
x_dirs = [x]
# subdirs
if recursive:
x_subdirs = [i for i in listdir(x, recursive=True,
include_dir = True) if os.path.isdir(i)]
x_dirs.extend(x_subdirs)
# wrap stat
for d in x_dirs:
# stat_json = os.path.join(d, 'fq_stat.json')
stat_toml = os.path.join(d, 'fq_stat.toml')
df = parse_stat_fq(d)
df = df.set_index('file').to_dict() # .to_json(stat_json)
if len(df.keys()) > 0:
Toml(df).to_toml(stat_toml)
def readfq(fh): # this is a generator function
"""
source: https://github.com/lh3/readfq/blob/master/readfq.py
processing fastq file
"""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fh: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last: break
[name, _, comment], seqs, last = last[1:].partition(" "), [], None
for l in fh: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None, comment # yield a fasta record
if not last: break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fh: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs), comment; # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None, comment # yield a fasta record instead
break
def split_fq_1u10a(fq, outdir=None, gzipped=True, remove=False):
"""Split fastq file by 1U, 10A
1U +/-
10A +/-
"""
fname = fq_name(fq)
ftype = Fastx(fq).format # fasta/fastq
# output dir
if outdir is None:
outdir = os.path.join(os.path.dirname(fq), '1U_10A')
check_path(outdir)
# output
f_ext = ftype + '.gz' if gzipped else ftype # fastq/fasta
fq_names = ['1U_10A', '1U_not_10A', 'not_1U_10A', 'not_1U_not_10A']
fq_files = [os.path.join(outdir, '{}.{}.{}'.format(fname, i, f_ext)) \
for i in fq_names]
if not check_file(fq, emptycheck=True):
log.error('split_fq_1u10a() skipped, file is empty: {}'.format(fname))
elif all(file_exists(fq_files)):
log.info('split_fq_1u10a() skipped, file exists: {}'.format(fname))
else:
fh = xopen(fq, 'rt')
w1 = xopen(fq_files[0], 'wt')
w2 = xopen(fq_files[1], 'wt')
w3 = xopen(fq_files[2], 'wt')
w4 = xopen(fq_files[3], 'wt')
for n, s, q, m in readfq(fh):
if ftype == 'fasta':
seq = '>{}\n{}\n'.format(n, s)
else:
seq = '@{}\n{}\n+\n{}\n'.format(n, s, q)
# check
if s[0] == 'T':
if s[9] == 'A':
w1.write(seq)
else:
w2.write(seq)
else:
if s[9] == 'A':
w3.write(seq)
else:
w4.write(seq)
# close file
fh.close()
w1.close()
w2.close()
w3.close()
w4.close()
# remove 1U 10A files
if remove:
file_remove(fq_files, ask=False)
return fq_files
def split_bam_1u10a(bam, outdir=None):
"""Check 1U10A for reads in bam file
1. (sam|bam) extract 1U, 10A reads (1U_10A, 1U_not_10A, not_1U_10A, not_1U_not_10A)
"""
fname = filename(bam)
# output dir
if outdir is None:
outdir = os.path.join(os.path.dirname(bam), '1U_10A')
check_path(outdir)
if not file_exists(bam + '.bai'):
pysam.index(bam)
# split BAM
bam_names = ['1U_10A', '1U_not_10A', 'not_1U_10A', 'not_1U_not_10A']
bam_files = [os.path.join(outdir, fname + i + '.bam') for i in bam_names]
if all(file_exists(bam_files)):
log.info('file exists, split_bam_1u10a skipped: {}'.format(fname))
else:
samfile = pysam.AlignmentFile(bam, 'rb')
w1 = pysam.AlignmentFile(bam_files[0], 'wb', template=samfile)
w2 = pysam.AlignmentFile(bam_files[1], 'wb', template=samfile)
w3 = pysam.AlignmentFile(bam_files[2], 'wb', template=samfile)
w4 = pysam.AlignmentFile(bam_files[3], 'wb', template=samfile)
for read in samfile.fetch():
s = read.query_sequence
q = ''.join([chr(c+33) for c in read.query_qualities])
if read.is_reverse:
s = revComp(s)
q = q[::-1]
if s[0] == 'T':
if s[9] == 'A':
w1.write(read)
else:
w2.write(read)
else:
if s[9] == 'A':
w3.write(read)
else:
w4.write(read)
# output
return bam_files
def splilt_fq_size(fq, outdir=None, min=23, max=29, gzipped=True):
"""Split fastq file by the size
23-29 nt
"""
fname = filename(fq)
ftype = Fastx(fq).format
# output dir
if outdir is None:
outdir = os.path.join(os.path.dirname(fq), '1U_10A')
# outfiles
f_ext = ftype + '.gz' if gzipped else ftype
f_name = fname + '.' + f_ext
dir_in = os.path.join(outdir, '{}_{}'.format(min, max))
dir_ex = os.path.join(outdir, 'not_{}_{}'.format(min, max))
f_in = os.path.join(dir_in, f_name)
f_ex = os.path.join(dir_ex, f_name)
check_path([dir_in, dir_ex])
# run
if not check_file(fq, emptycheck=True):
log.error('file is empty, split_fq_size() skipped: {}'.format(fname))
if all(file_exists([f_in, f_ex])):
log.info('file exists, split_fq_length() skipped: {}'.format(fname))
else:
with xopen(fq, 'rt') as r, \
xopen(f_in, 'wt') as w1, \
xopen(f_ex, 'wt') as w2:
for n, s, q, m in readfq(r):
if ftype == 'fasta':
seq = '>{}\n{}\n'.format(n, s)
elif ftype == 'fastq':
seq = '@{}\n{}\n+\n{}\n'.format(n, s, q)
else:
continue #
w = w1 if len(s) in range(min, max+1) else w2
w.write(seq)
return [f_in, f_ex]
def split_bam_size(bam, output=None, min=23, max=29):
"""
split bam file by length
"""
fname = filename(fq)
ftype = Fastx(fq).format
# output dir
if outdir is None:
outdir = os.path.join(os.path.dirname(fq), '1U_10A')
bam = pysam.AlignmentFile(bam, 'rb')
fo = pysam.AlignmentFile(output, 'wb', template=bam)
for read in bam:
pass
################################################################################
def revComp(s):
d = {"A": "T", "C": "G", "G": "C", "T": "A", "N": "N"}
s = [d[c] for c in s]
return ''.join(s[::-1])
def bam_to_bed(bam, bed=None):
"""
Convert BAm to bed, using pybedtools
"""
if bed is None:
bed = os.path.splitext(bam)[0] + '.bed'
if file_exists(bed):
log.info('file exists, bam_to_bed() skipped: {}'.format(os.path.basename(bam)))
else:
pybedtools.BedTool(bam).bam_to_bed().saveas(bed)
def bam_to_fq(bam, fq=None):
"""
Convert bam to fastq
using samtools fastq
Note:
multiple mapping reads
see also: https://github.com/dpryan79/Answers/blob/master/bioinfoSE_2149/convert.py
"""
fname = filename(bam)
if fq is None:
fq = os.path.join(os.path.dirname(bam), fname + '.fastq.gz') # default
# output fmt
fq_suffix = os.path.basename(fq)
if fq_suffix.endswith('.gz'):
fq_suffix = fq_suffix.rstrip('.gz')
fq_ext = os.path.splitext(fq_suffix)[1]
fq_format = 'fasta' if fq_ext.lower() in ['.fa', '.fasta'] else 'fastq'
# read bam
samfile = pysam.AlignmentFile(bam, 'rb')
if file_exists(fq):
log.info('file exists, bam_to_fq() skipped: {}'.format(os.path.basename(fq)))
else:
of = xopen(fq, 'wt')
for read in samfile:
if read.is_unmapped:
continue # skip unmap reads
s = read.query_sequence
q = ''.join([chr(c+33) for c in read.query_qualities])
if read.is_reverse:
s = revComp(s)
q = q[::-1]
if fq_format == 'fasta':
seq = '>{}\n{}\n'.format(read.query_name, s)
else:
seq = '@{}\n{}\n+\n{}\n'.format(read.query_name, s, q)
of.write(seq)
of.close()
# unique + multi: k=1
def align_both(fq, index, outdir, k=1, threads=4, gzipped=True, rm_tmp=False):
"""
Align reads to index
could be: -k1
"""
fname = filename(fq)
ftype = Fastx(fq).format # fasta/fastq
check_path(outdir)
# files
bam = os.path.join(outdir, '{}.bam'.format(fname))
align_log = os.path.join(outdir, '{}.bowtie.log'.format(fname))
map_fq = os.path.join(outdir, '{}.{}'.format(fname, ftype))
unmap_fq = os.path.join(outdir, '{}.unmap.{}'.format(fname, ftype))
para = '-f' if ftype == 'fasta' else '-q'
# unique + multiple (-k 1)
cmd = ' '.join([
'bowtie {} -k {} -S -v 2'.format(para, k),
'--best -p {}'.format(threads),
'--un {}'.format(unmap_fq),
'{} {}'.format(index, fq),
'2> {}'.format(align_log),
'| samtools view -bhS -',
'| samtools sort -o {} -'.format(bam)])
cmd_sh = os.path.join(outdir, 'cmd.sh')
with open(cmd_sh, 'wt') as w:
w.write(cmd + '\n')
# run
if os.path.exists(bam):
log.info('file exixts, align() skipped: {}'.format(bam))
else:
os.system(cmd)
# output
if gzipped:
map_fq_gz = map_fq + '.gz'
unmap_fq_gz = unmap_fq + '.gz'
if file_exists(unmap_fq):
gzip_cmd(unmap_fq, unmap_fq_gz, decompress=False)
# convert to fq
bam_to_fq(bam, map_fq_gz)
map_fq_out, unmap_fq_out = [map_fq_gz, unmap_fq_gz]
else:
bam_to_fq(bam, map_fq)
map_fq_out, unmap_fq_out = [map_fq, unmap_fq]
# output
return [bam, map_fq_out, unmap_fq_out] # return gzipped files
# unique: m=1
def align_uniq(fq, index, outdir, threads=4, gzipped=True, rm_tmp=False):
"""
Align reads to index
extract unique reads (-m 1)
unique: -m 1
multiple: -k 2, ids -> bam
"""
fname = filename(fq)
ftype = Fastx(fq).format
check_path(outdir)
# files
bam = os.path.join(outdir, '{}.bam'.format(fname))
align_log = os.path.join(outdir, '{}.bowtie.log'.format(fname))
map_fq = os.path.join(outdir, '{}.{}'.format(fname, ftype))
unmap_fq = os.path.join(outdir, '{}.unmap.{}'.format(fname, ftype))
para = '-f' if ftype == 'fasta' else '-q'
##-------------------##
# unique (-m 1)
cmd = ' '.join([
'bowtie {} -m 1 -S -v 2'.format(para),
'--un {} --best -p {}'.format(unmap_fq, threads),
'{} {}'.format(index, fq),
'2> {}'.format(align_log),
'| samtools view -bhS -',
'| samtools sort -o {} -'.format(bam)])
# save
cmd_sh = os.path.join(outdir, 'cmd.sh')
with open(cmd_sh, 'wt') as w:
w.write(cmd + '\n')
# run
if os.path.exists(bam):
log.info('file exists, align skipped: {}'.format(bam))
else:
os.system(cmd)
# output
if gzipped:
map_fq_gz = map_fq + '.gz'
unmap_fq_gz = unmap_fq + '.gz'
if file_exists(unmap_fq):
gzip_cmd(unmap_fq, unmap_fq_gz, decompress=False)
# convert to fq
bam_to_fq(bam, map_fq_gz)
map_fq_out, unmap_fq_out = [map_fq_gz, unmap_fq_gz]
else:
bam_to_fq(bam, map_fq)
map_fq_out, unmap_fq_out = [map_fq, unmap_fq]
return [bam, map_fq_out, unmap_fq_out] # gzipped output
# multi:
def align_multi(fq, index, outdir, threads=4, gzipped=True):
"""
Align reads to index
extract multiple reads (-k 2)
multiple: -k 2, ids -> bam
"""
fname = filename(fq)
ftype = Fastx(fq).format
check_path(outdir)
##-------------------##
## unique + multi reads
both_bam = os.path.join(outdir, '{}.unique_multi_k2.bam'.format(fname))
both_log = os.path.join(outdir, '{}.unique_multi_k2.bowtie.log'.format(fname))
para = '-f' if ftype == 'fasta' else '-q'
# cmd
both_cmd = ' '.join([
'bowtie {} -k 2 -S -v 2'.format(para),
'--no-unal --best -p {}'.format(threads),
'{} {}'.format(index, fq),
'2> {}'.format(both_log),
'| samtools view -bhS -',
'| samtools sort -o {} -'.format(both_bam)])
# save
cmd_sh = os.path.join(outdir, 'cmd.sh')
with open(cmd_sh, 'wt') as w:
w.write(both_cmd + '\n')
# run
if os.path.exists(both_bam):
log.info('file exists, align skipped: {}'.format(both_bam))
else:
os.system(both_cmd)
##-------------------##
## extract multi fq
multi_ids = os.path.join(outdir, '{}.multi.id.txt'.format(fname))
multi_fq = os.path.join(outdir, '{}.{}'.format(fname, ftype))
unmap_fq = os.path.join(outdir, '{}.unmap.{}'.format(fname, ftype))
multi_fq_gz = multi_fq + '.gz'
unmap_fq_gz = unmap_fq + '.gz'
## cmd
get_multi_cmd = ' '.join([
'samtools view -F 0x4 {}'.format(both_bam),
'| cut -f 1 | sort | uniq -c',
'| awk \'$1>1 {print $2}\'',
'> {}'.format(multi_ids),
'&& seqkit grep -n -f {}'.format(multi_ids),
'{} > {}'.format(fq, multi_fq),
'&& seqkit grep -v -n -f {}'.format(multi_ids),
'{} > {}'.format(fq, unmap_fq)])
# save
cmd_multi_sh = os.path.join(outdir, 'cmd_multi.sh')
with open(cmd_multi_sh, 'wt') as w:
w.write(get_multi_cmd + '\n')
# run
if file_exists(multi_fq) or file_exists(multi_fq_gz):
log.info('file exists, align skipped: {}'.format(multi_fq))
else:
os.system(get_multi_cmd)
# gzip output
if gzipped:
if file_exists(multi_fq): # switch
gzip_cmd(multi_fq, multi_fq_gz, decompress=False)
if file_exists(unmap_fq):
gzip_cmd(unmap_fq, unmap_fq_gz, decompress=False)
multi_fq_out, unmap_fq_out = [multi_fq_gz, unmap_fq_gz]
else:
multi_fq_out, unmap_fq_out = [multi_fq, unmap_fq]
##-------------------##
## multi alignment/multi_bam
## for bam/ -k 1 # only 1 alignment
multi_bam = os.path.join(outdir, '{}.bam'.format(fname))
if not file_exists(multi_bam):
tmp_dir = os.path.join(outdir, 'tmp')
tmp_bam, _, _ = align_both(multi_fq_out, index, tmp_dir, k=1,
threads=threads, gzipped=gzipped) # 100% mapped
shutil.copy(tmp_bam, multi_bam)
shutil.rmtree(tmp_dir)
return [multi_bam, multi_fq_out, unmap_fq_out]
def anno_bam(bam, outdir, genome='dm6'):
"""
Annotate bam alignment, annotationPeaks.pl
"""
fname = filename(bam)
check_path(outdir)
# exe
anno = shutil.which('annotatePeaks.pl')
if not anno:
log.info('anno_bam() skipped. annotatePeaks.pl not found in $PATH, \
install HOMER, add to PATH')
return None
# check input
if bam.endswith('.bed'):
bed = bam # not need
else:
# bed = os.path.join(outdir, fname + '.bed')
bed = os.path.splitext(bam)[0] + '.bed'
if not file_exists(bed):
bam_to_bed(bam, bed)
# bed to anno
anno_stat = os.path.join(outdir, fname + '.anno.stat')
anno_txt = os.path.join(outdir, fname + '.anno.txt')
anno_log = os.path.join(outdir, fname + '.anno.log')
# run
anno_cmd = ' '.join([
'{} {} {}'.format(anno, bed, genome),
'-annStats {}'.format(anno_stat),
'1> {}'.format(anno_txt),
'2> {}'.format(anno_log)])
cmd_sh = os.path.join(outdir, 'cmd_anno.sh')
with open(cmd_sh, 'wt') as w:
w.write(anno_cmd + '\n')
if file_exists(anno_txt):
log.info('file exists, anno_bam() skipped: {}'.format(fname))
else:
os.system(anno_cmd)
# combine anno + seq
def merge_bed_byname(b1, b2, output=None, how='inner'):
"""
Merge two bed files, by name
b1 bed file (bed6)
b2 bed file (bed6)
output if None, return the pd.DataFrame
how 'inner', 'outer', 'left', 'right'
"""
df1 = pybedtools.BedTool(b1).to_dataframe()
df2 = pybedtools.BedTool(b2).to_dataframe()
# merge
df = pd.merge(df1, df2, left_on='name', right_on='name', how=how)
if output is None:
return df
else:
df.to_csv(output, index=False, header=False)
def pipe_align(fq, index, outdir, threads=4, genome='dm6', gzipped=True,
unique_multi='both', remove_1u10a=False):
"""Align reads to unique, multiple
unique_multi str unique, multi, both, all
"""
f_name = fq_name(fq)
f_name = f_name.replace('.unmap', '') # remove suffix
fq_align = os.path.join(outdir, f_name + '.fastq.gz')
## priority: both > unique > multi
##---------------------------------------------------------##
# multi only
if unique_multi in ['multi', 'all']:
multi_dir = os.path.join(outdir, 'multi')
check_path(multi_dir)
multi_bam, multi_fq, multi_unmap = align_multi(fq, index, multi_dir,
threads=4, gzipped=gzipped)
# annotation
anno_bam(multi_bam, multi_dir, genome)
multi_bam_list = split_bam_1u10a(multi_bam)
# output
fq_unmap = multi_unmap
file_symlink(multi_fq, fq_align)
##---------------------------------------------------------##
## uniq only
if unique_multi in ['unique', 'all']:
uniq_dir = os.path.join(outdir, 'unique')
check_path(uniq_dir)
uniq_bam, uniq_fq, uniq_unmap = align_uniq(fq, index, uniq_dir,
threads=4, gzipped=gzipped)
# annotation
anno_bam(uniq_bam, uniq_dir, genome)
uniq_bam_list = split_bam_1u10a(uniq_bam)
# output
fq_unmap = uniq_unmap
if file_exists(fq_align):
file_remove(fq_align, ask=False)
file_symlink(uniq_fq, fq_align)
##---------------------------------------------------------##
## uniq + multiple
if unique_multi in ['both', 'all']:
both_dir = os.path.join(outdir, 'unique_multi')
check_path(both_dir)
both_bam, both_fq, both_unmap = align_both(fq, index, both_dir, k=1,
threads=threads, gzipped=gzipped)
# annotation
anno_bam(both_bam, both_dir, genome)
both_bam_list = split_bam_1u10a(both_bam)
# output
fq_unmap = both_unmap
if file_exists(fq_align):
file_remove(fq_align, ask=False)
file_symlink(both_fq, fq_align)
return (fq_align, fq_unmap)
def pipe_overlap(dirA, dirB, outdir):
"""
Calculate the overlap between TE, piRNA clusters
subdir: unique, multi, unique_multi
"""
# b1 = listfile(dirA, "*.bed", recursive=True)
# b2 = listfile(dirB, "*.bed", recursive=True)
for sub in ['unique', 'unique_multi', 'multi']:
# run
b1 = listfile(os.path.join(dirA, sub), "*.bed")
b2 = listfile(os.path.join(dirB, sub), "*.bed")
if len(b1) == 0 or len(b2) == 0:
log.warning('bed not found, {}'.format(sub))
else:
b1 = b1[0] # first one
b2 = b2[0] # first one
# output file
sub_outdir = os.path.join(outdir, sub)
sub_fn = os.path.basename(b1)
sub_f = os.path.join(sub_outdir, sub_fn)
if file_exists(sub_f):
log.info('file exists, overlap() skipped: {}'.format(sub))
continue
# merge two BED file
df = merge_bed_byname(b1, b2, how='outer')
# extract count from name
# id-count
dm = df['name'].str.split('-', n=1, expand=True)
if dm.shape[1] == 2:
# dm = dm.astype({0: str, 1: np.int64})
dm_chk = dm[1].apply(lambda x:x is None).sum()
df['count'] = dm[1] if dm_chk == 0 else 1
df.astype({'count': np.int64})
else:
df['count'] = 1
# re-arrange
df = df.astype({'count': np.int64}) # numeric
df.sort_values(by=['count'], ascending=False, inplace=True)
# df.reset_index(inplace=True)
# save to file
check_path(sub_outdir)
df.to_csv(sub_f, '\t', header=False, index=False)
################################################################################
# Deprecated
def pipe_init(fq, outdir):
"""
Copy data to dir
fq_stat()
"""
fname = filename(fq)
check_path(outdir)
fq_to = os.path.join(outdir, os.path.basename(fq))
if not file_exists(fq_to):
os.symlink(fq, fq_to)
stat_fq(fq_to)
# 1U 10A
fq_list = split_fq_1u10a(fq_to)
for f in fq_list:
stat_fq(f)
# wrap stat
df = wrap_stat_fq(outdir)
return [fq_to, df]
def pipe_collapse(fq, outdir, gzipped=True):
"""
Collapse, by sequence
"""
fname = filename(fq)
check_path(outdir)
fq_out = collapse_fx(fq, outdir, gzipped=True)
stat_fq(fq_out)
# 1U 10A
fq_list = split_fq_1u10a(fq_out)
for f in fq_list:
stat_fq(f)
# wrap stat
df = wrap_stat_fq(outdir)
return [fq_out, df]
def pipe_smRNA(fq, index, outdir, threads=4, genome='dm6', gzipped=True):
"""
run reads to small RNAs
return unmap fq
"""
fname = filename(fq)
ftype = Fastx(fq).format
check_path(outdir)
# align
bam, map_fq, unmap_fq = align(fq, index, outdir, k=1, threads=threads, gzipped=gzipped)
# annotation
bam_to_bed(bam)
anno_bam(bam, outdir, genome)
# fq stat
stat_fq(map_fq)
# 1U 10A
sub_dir = os.path.join(outdir, '1U_10A')
bam_list = split_bam_1u10a(bam, sub_dir)
for b in bam_list:
f = os.path.splitext(b)[0] + '.' + ftype
if gzipped:
f += '.gz'
bam_to_fq(b, f)
stat_fq(f)
# wrap stat
df = wrap_stat_fq(outdir)
return [unmap_fq, df]
def pipe_filt_length(fq, outdir, min=23, max=29, gzipped=True):
"""
filt fastq by length
"""
fname = filename(fq)
fq1, fq2 = filt_fq_length(fq, outdir, min=23, max=29, gzipped=gzipped)
################
## sub-01: 23_29
## split fq
stat_fq(fq1)
fq1_dir = os.path.dirname(fq1)
fq1_list = split_fq_1u10a(fq1)
for i in fq1_list:
stat_fq(i)
## wrap stat
wrap_stat_fq(fq1_dir)
#####################
## sub-02: not_23_29
## split fq
stat_fq(fq2)
fq2_dir = os.path.dirname(fq2)
fq2_list = split_fq_1u10a(fq2)
for i in fq2_list:
stat_fq(i)
## wrap stat
wrap_stat_fq(fq2_dir)
return [fq1, fq2]
def filename(x):
"""
Extract the name of fastq file
"""
if x is None:
xname = None
else:
if x.endswith('.gz'):
xname = os.path.basename(x).replace('.gz', '')
else:
xname = os.path.basename(x)
xname = os.path.splitext(xname)[0]
# remove ".unmap"
xname = xname.replace('.unmap', '')
return xname
def fa2fq(fa, fq):
"""
Convert fasta to fastq
"I" for quality
"""
fq_dir = os.path.dirname(fq)
check_path(fq_dir)
if file_exists(fq):
log.info('file exists, fa2fq() skipped:')
else:
with xopen(fa, 'rt') as r, xopen(fq, 'wt') as w:
for n, s, q, m in readfq(r):
fq = '@{}\n{}\n+\n{}\n'.format(n, s, q)
w.write(fq)
| 30.200375
| 92
| 0.538073
|
cfc25b418c2837951c2a6830e2ffaf31e4bf6b7d
| 3,813
|
py
|
Python
|
src/module_tests/config_test.py
|
eirikeve/esrdgan
|
058687816663c47b013721e5a98c0d175a876596
|
[
"Apache-2.0"
] | 8
|
2019-04-07T11:49:42.000Z
|
2021-03-25T06:24:16.000Z
|
src/module_tests/config_test.py
|
eirikeve/esrdgan
|
058687816663c47b013721e5a98c0d175a876596
|
[
"Apache-2.0"
] | 2
|
2019-04-08T18:24:02.000Z
|
2020-06-19T06:21:22.000Z
|
src/module_tests/config_test.py
|
eirikeve/esrdgan
|
058687816663c47b013721e5a98c0d175a876596
|
[
"Apache-2.0"
] | null | null | null |
"""
test_config.py
Written by Eirik Vesterkjær, 2019
Apache License
tests for config/config.py
run this using unittest:
python -m unittest module_tests/config_test.py
"""
import unittest
import tempfile
from config.config import *
cfg_ini = \
"""[DEFAULT]
name = test_name
model = test_model
use_tensorboard_logger = False
scale = 4
also_log_to_terminal = True
[ENV]
root_path = test_root
log_subpath = /log
runs_subpath = /runs
generator_load_subpath = /runs/ESRDGAN_train_1_4x/generator_5000.pth
discriminator_load_subpath = /runs/ESRDGAN_train_1_4x/discriminator_5000.pth
[DATASETTRAIN]
n_workers = 16
batch_size = 16
img_size = 192
name = default_dataset_train_name
dataroot = default_path
data_aug_gaussian_noise = True
gaussian_stddev = 0.01
data_aug_shuffle = True
data_aug_flip = True
data_aug_rot = True
[DATASETVAL]
n_workers = 16
batch_size = 16
img_size = 192
name = default_dataset_val_name
dataroot = default_path
data_aug_gaussian_noise = True
gaussian_stddev = 0.01
data_aug_shuffle = True
data_aug_flip = True
data_aug_rot = True
[DATASETTEST]
n_workers = 16
batch_size = 16
img_size = 192
name = default_dataset_test_name
dataroot = default_path
data_aug_gaussian_noise = True
gaussian_stddev = 0.01
data_aug_shuffle = True
data_aug_flip = True
data_aug_rot = True
[GENERATOR]
norm_type = none
act_type = leakyrelu
layer_mode = CNA
num_features = 64
num_rrdb = 23
num_rdb_convs = 5
rdb_res_scaling = 0.2
rrdb_res_scaling = 0.2
in_num_ch = 3
out_num_ch = 3
rdb_growth_chan = 32
[DISCRIMINATOR]
norm_type = batch
act_type = leakyrelu
layer_mode = CNA
num_features = 64
in_num_ch = 3
[FEATUREEXTRACTOR]
low_level_feat_layer = 10
high_level_feat_layer = 20
[TRAINING]
resume_training_from_save = False
resume_epoch = 0
resume_iter = 0
learning_rate_g = 2e-4
learning_rate_d = 3e-4
multistep_lr = True
multistep_lr_steps = [50000, 100000, 200000, 300000]
lr_gamma = 0.5
# Loss weighting
gan_type = relativistic
gan_weight = 5e-3
pixel_criterion = l1
pixel_weight = 1e-2
feature_criterion = l2
feature_weight = 1.0
niter = 500000
val_period = 2000
save_model_period = 2000
log_period = 100
"""
class TestConfig(unittest.TestCase):
def test_config(self):
global cfg_ini
temp = tempfile.NamedTemporaryFile('w+')
temp.write(cfg_ini)
temp.seek(0)
cfg = Config(temp.name)
temp.close()
# check that base config is set
self.assertEqual(cfg.name, "test_name")
self.assertEqual(cfg.model, "test_model")
# check that its subconfigs are set
self.assertEqual(cfg.env.root_path, "test_root")
self.assertEqual(cfg.feature_extractor.low_level_feat_layer, 10)
self.assertAlmostEqual(cfg.training.learning_rate_g, 2e-4)
new_cfg_name = "new_test_name"
new_train_lr = 5e10
new_feat_low_layer = 3
cfg.name = new_cfg_name
cfg.training.learning_rate_g = new_train_lr
cfg.feature_extractor.low_level_feat_layer = new_feat_low_layer
new_cfg_ini = cfg.asINI()
temp = tempfile.NamedTemporaryFile('w+')
temp.write(new_cfg_ini)
temp.seek(0)
cfg2 = Config(temp.name)
temp.close()
# check that base config is set
self.assertEqual(cfg.name, new_cfg_name)
self.assertEqual(len(cfg.gpu_ids), len(new_gpu_list))
# check that its subconfigs are set
self.assertEqual(cfg.feature_extractor.low_level_feat_layer, new_feat_low_layer)
self.assertAlmostEqual(cfg.training.learning_rate_g, new_train_lr)
if __name__ == "__main__":
unittest.main()
| 22.832335
| 88
| 0.6984
|
2e684918d8e6e1c6277c27aa89521a7c6cf229bc
| 13,766
|
py
|
Python
|
train.py
|
TianlongChenTAMU/ABD-Net
|
502305e37ab36bb23084c64f200437d19a9e9e28
|
[
"MIT"
] | 244
|
2019-08-03T16:56:42.000Z
|
2020-08-08T00:57:21.000Z
|
train.py
|
TianlongChenTAMU/ABD-Net
|
502305e37ab36bb23084c64f200437d19a9e9e28
|
[
"MIT"
] | 31
|
2019-08-09T09:13:22.000Z
|
2020-08-11T07:27:47.000Z
|
train.py
|
TianlongChenTAMU/ABD-Net
|
502305e37ab36bb23084c64f200437d19a9e9e28
|
[
"MIT"
] | 44
|
2019-08-09T00:55:48.000Z
|
2020-07-27T03:52:13.000Z
|
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import datetime
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.optim import lr_scheduler
from args import argument_parser, image_dataset_kwargs, optimizer_kwargs
from torchreid.data_manager import ImageDataManager
from torchreid import models
from torchreid.utils.iotools import save_checkpoint, check_isfile
from torchreid.utils.avgmeter import AverageMeter
from torchreid.utils.loggers import Logger, RankLogger
from torchreid.utils.torchtools import count_num_param, open_all_layers, open_specified_layers
from torchreid.utils.reidtools import visualize_ranked_results
from torchreid.eval_metrics import evaluate
from torchreid.optimizers import init_optimizer
from torchreid.regularizers import get_regularizer
import logging
logging.basicConfig(level=os.environ.get('LOGLEVEL', 'CRITICAL'))
# global variables
parser = argument_parser()
args = parser.parse_args()
os.environ['TORCH_HOME'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '.torch'))
def get_criterion(num_classes: int, use_gpu: bool, args):
if args.criterion == 'htri':
from torchreid.losses.hard_mine_triplet_loss import TripletLoss
criterion = TripletLoss(num_classes, vars(args), use_gpu)
elif args.criterion == 'xent':
from torchreid.losses.cross_entropy_loss import CrossEntropyLoss
criterion = CrossEntropyLoss(num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth)
else:
raise RuntimeError('Unknown criterion {}'.format(args.criterion))
return criterion
def main():
global args
torch.manual_seed(args.seed)
if not args.use_avai_gpus:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu:
use_gpu = False
log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'
sys.stderr = sys.stdout = Logger(osp.join(args.save_dir, log_name))
print("==========\nArgs:{}\n==========".format(args))
if use_gpu:
print("Currently using GPU {}".format(args.gpu_devices))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print("Currently using CPU, however, GPU is highly recommended")
print("Initializing image data manager")
dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
trainloader, testloader_dict = dm.return_dataloaders()
print("Initializing model: {}".format(args.arch))
model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, loss={'xent'}, use_gpu=use_gpu, args=vars(args))
print(model)
print("Model size: {:.3f} M".format(count_num_param(model)))
criterion = get_criterion(dm.num_train_pids, use_gpu, args)
regularizer = get_regularizer(vars(args))
optimizer = init_optimizer(model.parameters(), **optimizer_kwargs(args))
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma)
if args.load_weights and check_isfile(args.load_weights):
# load pretrained weights but ignore layers that don't match in size
try:
checkpoint = torch.load(args.load_weights)
except Exception as e:
print(e)
checkpoint = torch.load(args.load_weights, map_location={'cuda:0': 'cpu'})
pretrain_dict = checkpoint['state_dict']
model_dict = model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
print("Loaded pretrained weights from '{}'".format(args.load_weights))
if args.resume and check_isfile(args.resume):
checkpoint = torch.load(args.resume)
state = model.state_dict()
state.update(checkpoint['state_dict'])
model.load_state_dict(state)
# args.start_epoch = checkpoint['epoch'] + 1
print("Loaded checkpoint from '{}'".format(args.resume))
print("- start_epoch: {}\n- rank1: {}".format(args.start_epoch, checkpoint['rank1']))
if use_gpu:
model = nn.DataParallel(model).cuda()
if args.evaluate:
print("Evaluate only")
for name in args.target_names:
print("Evaluating {} ...".format(name))
queryloader = testloader_dict[name]['query'], testloader_dict[name]['query_flip']
galleryloader = testloader_dict[name]['gallery'], testloader_dict[name]['gallery_flip']
distmat = test(model, queryloader, galleryloader, use_gpu, return_distmat=True)
if args.visualize_ranks:
visualize_ranked_results(
distmat, dm.return_testdataset_by_name(name),
save_dir=osp.join(args.save_dir, 'ranked_results', name),
topk=20
)
return
start_time = time.time()
ranklogger = RankLogger(args.source_names, args.target_names)
train_time = 0
print("==> Start training")
if args.fixbase_epoch > 0:
oldenv = os.environ.get('sa', '')
os.environ['sa'] = ''
print("Train {} for {} epochs while keeping other layers frozen".format(args.open_layers, args.fixbase_epoch))
initial_optim_state = optimizer.state_dict()
for epoch in range(args.fixbase_epoch):
start_train_time = time.time()
train(epoch, model, criterion, regularizer, optimizer, trainloader, use_gpu, fixbase=True)
train_time += round(time.time() - start_train_time)
print("Done. All layers are open to train for {} epochs".format(args.max_epoch))
optimizer.load_state_dict(initial_optim_state)
os.environ['sa'] = oldenv
max_r1 = 0
for epoch in range(args.start_epoch, args.max_epoch):
start_train_time = time.time()
print(epoch)
print(criterion)
train(epoch, model, criterion, regularizer, optimizer, trainloader, use_gpu, fixbase=False)
train_time += round(time.time() - start_train_time)
if use_gpu:
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
save_checkpoint({
'state_dict': state_dict,
'rank1': 0,
'epoch': epoch,
}, False, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))
scheduler.step()
if (epoch + 1) > args.start_eval and args.eval_freq > 0 and (epoch + 1) % args.eval_freq == 0 or (epoch + 1) == args.max_epoch:
print("==> Test")
for name in args.target_names:
print("Evaluating {} ...".format(name))
queryloader = testloader_dict[name]['query'], testloader_dict[name]['query_flip']
galleryloader = testloader_dict[name]['gallery'], testloader_dict[name]['gallery_flip']
rank1 = test(model, queryloader, galleryloader, use_gpu)
ranklogger.write(name, epoch + 1, rank1)
if use_gpu:
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
if max_r1 < rank1:
print('Save!', max_r1, rank1)
save_checkpoint({
'state_dict': state_dict,
'rank1': rank1,
'epoch': epoch,
}, False, osp.join(args.save_dir, 'checkpoint_best.pth.tar'))
max_r1 = rank1
elapsed = round(time.time() - start_time)
elapsed = str(datetime.timedelta(seconds=elapsed))
train_time = str(datetime.timedelta(seconds=train_time))
print("Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".format(elapsed, train_time))
ranklogger.show_summary()
def train(epoch, model, criterion, regularizer, optimizer, trainloader, use_gpu, fixbase=False):
if not fixbase and args.use_of and epoch >= args.of_start_epoch:
print('Using OF')
from torchreid.losses.of_penalty import OFPenalty
of_penalty = OFPenalty(vars(args))
losses = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
model.train()
if fixbase or args.fixbase:
open_specified_layers(model, args.open_layers)
else:
open_all_layers(model)
end = time.time()
for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):
try:
limited = float(os.environ.get('limited', None))
except (ValueError, TypeError):
limited = 1
if not fixbase and (batch_idx + 1) > limited * len(trainloader):
break
data_time.update(time.time() - end)
if use_gpu:
imgs, pids = imgs.cuda(), pids.cuda()
outputs = model(imgs)
loss = criterion(outputs, pids)
if not fixbase:
reg = regularizer(model)
loss += reg
if not fixbase and args.use_of and epoch >= args.of_start_epoch:
penalty = of_penalty(outputs)
loss += penalty
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
losses.update(loss.item(), pids.size(0))
if (batch_idx + 1) % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time,
data_time=data_time, loss=losses))
end = time.time()
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20], return_distmat=False):
flip_eval = args.flip_eval
if flip_eval:
print('# Using Flip Eval')
batch_time = AverageMeter()
model.eval()
with torch.no_grad():
qf, q_pids, q_camids, q_paths = [], [], [], []
if flip_eval:
enumerator = enumerate(zip(queryloader[0], queryloader[1]))
else:
enumerator = enumerate(queryloader[0])
for batch_idx, package in enumerator:
end = time.time()
if flip_eval:
(imgs0, pids, camids, paths), (imgs1, _, _, _) = package
if use_gpu:
imgs0, imgs1 = imgs0.cuda(), imgs1.cuda()
features = (model(imgs0)[0] + model(imgs1)[0]) / 2.0
# print(features.size())
else:
(imgs, pids, camids, paths) = package
if use_gpu:
imgs = imgs.cuda()
features = model(imgs)[0]
batch_time.update(time.time() - end)
features = features.data.cpu()
qf.append(features)
q_pids.extend(pids)
q_camids.extend(camids)
q_paths.extend(paths)
qf = torch.cat(qf, 0)
q_pids = np.asarray(q_pids)
q_camids = np.asarray(q_camids)
print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))
gf, g_pids, g_camids, g_paths = [], [], [], []
if flip_eval:
enumerator = enumerate(zip(galleryloader[0], galleryloader[1]))
else:
enumerator = enumerate(galleryloader[0])
for batch_idx, package in enumerator:
end = time.time()
if flip_eval:
(imgs0, pids, camids, paths), (imgs1, _, _, _) = package
if use_gpu:
imgs0, imgs1 = imgs0.cuda(), imgs1.cuda()
features = (model(imgs0)[0] + model(imgs1)[0]) / 2.0
# print(features.size())
else:
(imgs, pids, camids, _) = package
if use_gpu:
imgs = imgs.cuda()
features = model(imgs)[0]
batch_time.update(time.time() - end)
features = features.data.cpu()
gf.append(features)
g_pids.extend(pids)
g_camids.extend(camids)
g_paths.extend(paths)
gf = torch.cat(gf, 0)
g_pids = np.asarray(g_pids)
g_camids = np.asarray(g_camids)
print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1)))
if os.environ.get('save_feat'):
import scipy.io as io
io.savemat(os.environ.get('save_feat'), {'q': qf.data.numpy(), 'g': gf.data.numpy(), 'qt': q_pids, 'gt': g_pids})
# return
print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch_size))
m, n = qf.size(0), gf.size(0)
distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
distmat.addmm_(1, -2, qf, gf.t())
distmat = distmat.numpy()
if os.environ.get('distmat'):
import scipy.io as io
io.savemat(os.environ.get('distmat'), {'distmat': distmat, 'qp': q_paths, 'gp': g_paths})
print("Computing CMC and mAP")
cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)
print("Results ----------")
print("mAP: {:.2%}".format(mAP))
print("CMC curve")
for r in ranks:
print("Rank-{:<3}: {:.2%}".format(r, cmc[r - 1]))
print("------------------")
if return_distmat:
return distmat
return cmc[0]
if __name__ == '__main__':
main()
| 35.117347
| 135
| 0.611579
|
92d7342b2b8d4a52aaeba10f1297509b41be45a0
| 1,912
|
py
|
Python
|
wb/main/models/get_system_resources_job_model.py
|
apaniukov/workbench
|
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
|
[
"Apache-2.0"
] | 23
|
2022-03-17T12:24:09.000Z
|
2022-03-31T09:13:30.000Z
|
wb/main/models/get_system_resources_job_model.py
|
apaniukov/workbench
|
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
|
[
"Apache-2.0"
] | 18
|
2022-03-21T08:17:44.000Z
|
2022-03-30T12:42:30.000Z
|
wb/main/models/get_system_resources_job_model.py
|
apaniukov/workbench
|
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
|
[
"Apache-2.0"
] | 16
|
2022-03-17T12:24:14.000Z
|
2022-03-31T12:15:12.000Z
|
"""
OpenVINO DL Workbench
Class for ORM model describing job for getting system resources from target
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from sqlalchemy import Column, Integer, ForeignKey
from sqlalchemy.orm import relationship, backref
from wb.main.enumerates import JobTypesEnum
from wb.main.models.jobs_model import JobsModel
from wb.main.models.remote_target_model import RemoteTargetModel
from wb.main.models.target_model import TargetModel
class GetSystemResourcesJobModel(JobsModel):
__tablename__ = 'get_system_resources_jobs'
__mapper_args__ = {
'polymorphic_identity': JobTypesEnum.get_system_resources_type.value
}
job_id = Column(Integer, ForeignKey(JobsModel.job_id), primary_key=True)
target_id = Column(Integer, ForeignKey(TargetModel.id))
target: RemoteTargetModel = relationship('RemoteTargetModel', foreign_keys=[target_id],
backref=backref('get_system_resources_job',
lazy='subquery',
cascade='delete,all'))
def __init__(self, data: dict):
super().__init__(data)
self.target_id = data['targetId']
def json(self) -> dict:
return {
**super().json(),
'targetId': self.target_id,
}
| 37.490196
| 91
| 0.685146
|
18699c5f298928d7138823cd4e14c79c32c4212f
| 797
|
py
|
Python
|
Python/sorts/bubble_sort.py
|
fanxiangs/notebook
|
c11fdfa4d1ea4d79f97422851999313fc81b0970
|
[
"MIT"
] | null | null | null |
Python/sorts/bubble_sort.py
|
fanxiangs/notebook
|
c11fdfa4d1ea4d79f97422851999313fc81b0970
|
[
"MIT"
] | null | null | null |
Python/sorts/bubble_sort.py
|
fanxiangs/notebook
|
c11fdfa4d1ea4d79f97422851999313fc81b0970
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: fan
# time: 11/14/2021
from typing import List
def bubble_sort(nums: List[int]):
"""
冒泡排序:
比较相邻的元素,如果第一个比第二个大(升序),就交换位置。
时间复杂度: O(n^2)
@param nums: 无序数组
@return: 升序数组
>>> import random
>>> nums = random.sample(range(-50, 50), 50)
>>> bubble_sort(nums) == sorted(nums)
[...]
True
>>> bubble_sort([0, 5, 2, 3, 2]) == sorted([0, 5, 2, 3, 2])
[...]
True
"""
length = len(nums)
for i in range(length - 1):
for j in range(i, length):
if nums[i] > nums[j]:
nums[i], nums[j] = nums[j], nums[i]
print(nums)
return nums
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True, optionflags=doctest.ELLIPSIS)
| 21.540541
| 63
| 0.540778
|
f8ac3d3b58b1adbcd63f15b2497cb7658e3cec8a
| 15,391
|
py
|
Python
|
xarray/tests/test_coding_times.py
|
thadncs/https-github.com-pydata-xarray
|
e31cf43e8d183c63474b2898a0776fda72abc82c
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2019-09-10T17:41:42.000Z
|
2019-09-10T17:41:42.000Z
|
xarray/tests/test_coding_times.py
|
thadncs/https-github.com-pydata-xarray
|
e31cf43e8d183c63474b2898a0776fda72abc82c
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
xarray/tests/test_coding_times.py
|
thadncs/https-github.com-pydata-xarray
|
e31cf43e8d183c63474b2898a0776fda72abc82c
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import numpy as np
import pandas as pd
from xarray import Variable, coding
from . import (
TestCase, requires_netCDF4, assert_array_equal)
import pytest
@np.vectorize
def _ensure_naive_tz(dt):
if hasattr(dt, 'tzinfo'):
return dt.replace(tzinfo=None)
else:
return dt
class TestDatetime(TestCase):
@requires_netCDF4
def test_cf_datetime(self):
import netCDF4 as nc4
for num_dates, units in [
(np.arange(10), 'days since 2000-01-01'),
(np.arange(10).reshape(2, 5), 'days since 2000-01-01'),
(12300 + np.arange(5), 'hours since 1680-01-01 00:00:00'),
# here we add a couple minor formatting errors to test
# the robustness of the parsing algorithm.
(12300 + np.arange(5), 'hour since 1680-01-01 00:00:00'),
(12300 + np.arange(5), u'Hour since 1680-01-01 00:00:00'),
(12300 + np.arange(5), ' Hour since 1680-01-01 00:00:00 '),
(10, 'days since 2000-01-01'),
([10], 'daYs since 2000-01-01'),
([[10]], 'days since 2000-01-01'),
([10, 10], 'days since 2000-01-01'),
(np.array(10), 'days since 2000-01-01'),
(0, 'days since 1000-01-01'),
([0], 'days since 1000-01-01'),
([[0]], 'days since 1000-01-01'),
(np.arange(2), 'days since 1000-01-01'),
(np.arange(0, 100000, 20000), 'days since 1900-01-01'),
(17093352.0, 'hours since 1-1-1 00:00:0.0'),
([0.5, 1.5], 'hours since 1900-01-01T00:00:00'),
(0, 'milliseconds since 2000-01-01T00:00:00'),
(0, 'microseconds since 2000-01-01T00:00:00'),
]:
for calendar in ['standard', 'gregorian', 'proleptic_gregorian']:
expected = _ensure_naive_tz(
nc4.num2date(num_dates, units, calendar))
print(num_dates, units, calendar)
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Unable to decode time axis')
actual = coding.times.decode_cf_datetime(num_dates, units,
calendar)
if (isinstance(actual, np.ndarray) and
np.issubdtype(actual.dtype, np.datetime64)):
# self.assertEqual(actual.dtype.kind, 'M')
# For some reason, numpy 1.8 does not compare ns precision
# datetime64 arrays as equal to arrays of datetime objects,
# but it works for us precision. Thus, convert to us
# precision for the actual array equal comparison...
actual_cmp = actual.astype('M8[us]')
else:
actual_cmp = actual
assert_array_equal(expected, actual_cmp)
encoded, _, _ = coding.times.encode_cf_datetime(actual, units,
calendar)
if '1-1-1' not in units:
# pandas parses this date very strangely, so the original
# units/encoding cannot be preserved in this case:
# (Pdb) pd.to_datetime('1-1-1 00:00:0.0')
# Timestamp('2001-01-01 00:00:00')
assert_array_equal(num_dates, np.around(encoded, 1))
if (hasattr(num_dates, 'ndim') and num_dates.ndim == 1 and
'1000' not in units):
# verify that wrapping with a pandas.Index works
# note that it *does not* currently work to even put
# non-datetime64 compatible dates into a pandas.Index
encoded, _, _ = coding.times.encode_cf_datetime(
pd.Index(actual), units, calendar)
assert_array_equal(num_dates, np.around(encoded, 1))
@requires_netCDF4
def test_decode_cf_datetime_overflow(self):
# checks for
# https://github.com/pydata/pandas/issues/14068
# https://github.com/pydata/xarray/issues/975
from datetime import datetime
units = 'days since 2000-01-01 00:00:00'
# date after 2262 and before 1678
days = (-117608, 95795)
expected = (datetime(1677, 12, 31), datetime(2262, 4, 12))
for i, day in enumerate(days):
result = coding.times.decode_cf_datetime(day, units)
assert result == expected[i]
def test_decode_cf_datetime_non_standard_units(self):
expected = pd.date_range(periods=100, start='1970-01-01', freq='h')
# netCDFs from madis.noaa.gov use this format for their time units
# they cannot be parsed by netcdftime, but pd.Timestamp works
units = 'hours since 1-1-1970'
actual = coding.times.decode_cf_datetime(np.arange(100), units)
assert_array_equal(actual, expected)
@requires_netCDF4
def test_decode_cf_datetime_non_iso_strings(self):
# datetime strings that are _almost_ ISO compliant but not quite,
# but which netCDF4.num2date can still parse correctly
expected = pd.date_range(periods=100, start='2000-01-01', freq='h')
cases = [(np.arange(100), 'hours since 2000-01-01 0'),
(np.arange(100), 'hours since 2000-1-1 0'),
(np.arange(100), 'hours since 2000-01-01 0:00')]
for num_dates, units in cases:
actual = coding.times.decode_cf_datetime(num_dates, units)
assert_array_equal(actual, expected)
@requires_netCDF4
def test_decode_non_standard_calendar(self):
import netCDF4 as nc4
for calendar in ['noleap', '365_day', '360_day', 'julian', 'all_leap',
'366_day']:
units = 'days since 0001-01-01'
times = pd.date_range('2001-04-01-00', end='2001-04-30-23',
freq='H')
noleap_time = nc4.date2num(times.to_pydatetime(), units,
calendar=calendar)
expected = times.values
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Unable to decode time axis')
actual = coding.times.decode_cf_datetime(noleap_time, units,
calendar=calendar)
assert actual.dtype == np.dtype('M8[ns]')
abs_diff = abs(actual - expected)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff <= np.timedelta64(1, 's')).all()
@requires_netCDF4
def test_decode_non_standard_calendar_single_element(self):
units = 'days since 0001-01-01'
for calendar in ['noleap', '365_day', '360_day', 'julian', 'all_leap',
'366_day']:
for num_time in [735368, [735368], [[735368]]]:
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Unable to decode time axis')
actual = coding.times.decode_cf_datetime(num_time, units,
calendar=calendar)
assert actual.dtype == np.dtype('M8[ns]')
@requires_netCDF4
def test_decode_non_standard_calendar_single_element_fallback(self):
import netCDF4 as nc4
units = 'days since 0001-01-01'
dt = nc4.netcdftime.datetime(2001, 2, 29)
for calendar in ['360_day', 'all_leap', '366_day']:
num_time = nc4.date2num(dt, units, calendar)
with pytest.warns(Warning, match='Unable to decode time axis'):
actual = coding.times.decode_cf_datetime(num_time, units,
calendar=calendar)
expected = np.asarray(nc4.num2date(num_time, units, calendar))
print(num_time, calendar, actual, expected)
assert actual.dtype == np.dtype('O')
assert expected == actual
@requires_netCDF4
def test_decode_non_standard_calendar_multidim_time(self):
import netCDF4 as nc4
calendar = 'noleap'
units = 'days since 0001-01-01'
times1 = pd.date_range('2001-04-01', end='2001-04-05', freq='D')
times2 = pd.date_range('2001-05-01', end='2001-05-05', freq='D')
noleap_time1 = nc4.date2num(times1.to_pydatetime(), units,
calendar=calendar)
noleap_time2 = nc4.date2num(times2.to_pydatetime(), units,
calendar=calendar)
mdim_time = np.empty((len(noleap_time1), 2), )
mdim_time[:, 0] = noleap_time1
mdim_time[:, 1] = noleap_time2
expected1 = times1.values
expected2 = times2.values
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Unable to decode time axis')
actual = coding.times.decode_cf_datetime(mdim_time, units,
calendar=calendar)
assert actual.dtype == np.dtype('M8[ns]')
assert_array_equal(actual[:, 0], expected1)
assert_array_equal(actual[:, 1], expected2)
@requires_netCDF4
def test_decode_non_standard_calendar_fallback(self):
import netCDF4 as nc4
# ensure leap year doesn't matter
for year in [2010, 2011, 2012, 2013, 2014]:
for calendar in ['360_day', '366_day', 'all_leap']:
calendar = '360_day'
units = 'days since {0}-01-01'.format(year)
num_times = np.arange(100)
expected = nc4.num2date(num_times, units, calendar)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
actual = coding.times.decode_cf_datetime(num_times, units,
calendar=calendar)
assert len(w) == 1
assert 'Unable to decode time axis' in \
str(w[0].message)
assert actual.dtype == np.dtype('O')
assert_array_equal(actual, expected)
@requires_netCDF4
def test_cf_datetime_nan(self):
for num_dates, units, expected_list in [
([np.nan], 'days since 2000-01-01', ['NaT']),
([np.nan, 0], 'days since 2000-01-01',
['NaT', '2000-01-01T00:00:00Z']),
([np.nan, 0, 1], 'days since 2000-01-01',
['NaT', '2000-01-01T00:00:00Z', '2000-01-02T00:00:00Z']),
]:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'All-NaN')
actual = coding.times.decode_cf_datetime(num_dates, units)
expected = np.array(expected_list, dtype='datetime64[ns]')
assert_array_equal(expected, actual)
@requires_netCDF4
def test_decoded_cf_datetime_array_2d(self):
# regression test for GH1229
variable = Variable(('x', 'y'), np.array([[0, 1], [2, 3]]),
{'units': 'days since 2000-01-01'})
result = coding.times.CFDatetimeCoder().decode(variable)
assert result.dtype == 'datetime64[ns]'
expected = pd.date_range('2000-01-01', periods=4).values.reshape(2, 2)
assert_array_equal(np.asarray(result), expected)
def test_infer_datetime_units(self):
for dates, expected in [(pd.date_range('1900-01-01', periods=5),
'days since 1900-01-01 00:00:00'),
(pd.date_range('1900-01-01 12:00:00', freq='H',
periods=2),
'hours since 1900-01-01 12:00:00'),
(['1900-01-01', '1900-01-02',
'1900-01-02 00:00:01'],
'seconds since 1900-01-01 00:00:00'),
(pd.to_datetime(
['1900-01-01', '1900-01-02', 'NaT']),
'days since 1900-01-01 00:00:00'),
(pd.to_datetime(['1900-01-01',
'1900-01-02T00:00:00.005']),
'seconds since 1900-01-01 00:00:00'),
(pd.to_datetime(['NaT', '1900-01-01']),
'days since 1900-01-01 00:00:00'),
(pd.to_datetime(['NaT']),
'days since 1970-01-01 00:00:00'),
]:
assert expected == coding.times.infer_datetime_units(dates)
def test_cf_timedelta(self):
examples = [
('1D', 'days', np.int64(1)),
(['1D', '2D', '3D'], 'days', np.array([1, 2, 3], 'int64')),
('1h', 'hours', np.int64(1)),
('1ms', 'milliseconds', np.int64(1)),
('1us', 'microseconds', np.int64(1)),
(['NaT', '0s', '1s'], None, [np.nan, 0, 1]),
(['30m', '60m'], 'hours', [0.5, 1.0]),
(np.timedelta64('NaT', 'ns'), 'days', np.nan),
(['NaT', 'NaT'], 'days', [np.nan, np.nan]),
]
for timedeltas, units, numbers in examples:
timedeltas = pd.to_timedelta(timedeltas, box=False)
numbers = np.array(numbers)
expected = numbers
actual, _ = coding.times.encode_cf_timedelta(timedeltas, units)
assert_array_equal(expected, actual)
assert expected.dtype == actual.dtype
if units is not None:
expected = timedeltas
actual = coding.times.decode_cf_timedelta(numbers, units)
assert_array_equal(expected, actual)
assert expected.dtype == actual.dtype
expected = np.timedelta64('NaT', 'ns')
actual = coding.times.decode_cf_timedelta(np.array(np.nan), 'days')
assert_array_equal(expected, actual)
def test_cf_timedelta_2d(self):
timedeltas = ['1D', '2D', '3D']
units = 'days'
numbers = np.atleast_2d([1, 2, 3])
timedeltas = np.atleast_2d(pd.to_timedelta(timedeltas, box=False))
expected = timedeltas
actual = coding.times.decode_cf_timedelta(numbers, units)
assert_array_equal(expected, actual)
assert expected.dtype == actual.dtype
def test_infer_timedelta_units(self):
for deltas, expected in [
(pd.to_timedelta(['1 day', '2 days']), 'days'),
(pd.to_timedelta(['1h', '1 day 1 hour']), 'hours'),
(pd.to_timedelta(['1m', '2m', np.nan]), 'minutes'),
(pd.to_timedelta(['1m3s', '1m4s']), 'seconds')]:
assert expected == coding.times.infer_timedelta_units(deltas)
| 47.503086
| 79
| 0.536612
|
2c99e4306d15082ad0fc9413da5b4d07f060db4c
| 808
|
py
|
Python
|
src/src/urls.py
|
vijaykharkar/twitter-user-data-django
|
dc849fdc44f2319f11982fccf0a4ea647b23bf25
|
[
"Apache-2.0"
] | null | null | null |
src/src/urls.py
|
vijaykharkar/twitter-user-data-django
|
dc849fdc44f2319f11982fccf0a4ea647b23bf25
|
[
"Apache-2.0"
] | null | null | null |
src/src/urls.py
|
vijaykharkar/twitter-user-data-django
|
dc849fdc44f2319f11982fccf0a4ea647b23bf25
|
[
"Apache-2.0"
] | null | null | null |
"""src URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from userdata.views import funct
urlpatterns = [
path('admin/', admin.site.urls),
path('',funct.as_view()),
]
| 33.666667
| 77
| 0.706683
|
e377843f36962ae6e71821c5b3d159cc12824de8
| 6,837
|
py
|
Python
|
audio_gallery/settings.py
|
adw1n/audio-gallery
|
47ffde7266d62cac8e8cb5281225ec452ca1cb5e
|
[
"Unlicense"
] | null | null | null |
audio_gallery/settings.py
|
adw1n/audio-gallery
|
47ffde7266d62cac8e8cb5281225ec452ca1cb5e
|
[
"Unlicense"
] | 6
|
2020-06-05T18:41:46.000Z
|
2022-03-11T23:25:38.000Z
|
audio_gallery/settings.py
|
adw1n/audio-gallery
|
47ffde7266d62cac8e8cb5281225ec452ca1cb5e
|
[
"Unlicense"
] | null | null | null |
import os
import distutils.util
from django.utils.translation import ugettext_lazy as _
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def generate_secret_key(file: str)->str:
from django.utils.crypto import get_random_string
# key is generated in the same way as in django startproject command
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
random_secret_key=get_random_string(50, chars)
with open(file,"w+") as secret_key_file:
secret_key_file.write('SECRET_KEY = "%s"\n'%random_secret_key)
return random_secret_key
# This way of handling secret key creation has been suggested in:
# http://stackoverflow.com/questions/4664724/distributing-django-projects-with-unique-secret-keys
try:
from .secret_key import SECRET_KEY
except ImportError:
SETTINGS_DIR = os.path.dirname(os.path.abspath(__file__))
secret_key_file = os.path.join(SETTINGS_DIR, 'secret_key.py')
SECRET_KEY=generate_secret_key(secret_key_file)
try:
__debug_env_var = os.environ['DJANGO_DEBUG']
except KeyError:
DEBUG = True
else:
DEBUG = bool(distutils.util.strtobool(__debug_env_var))
try:
ALLOWED_HOSTS = os.environ['ALLOWED_HOSTS'].split(':')
except KeyError:
ALLOWED_HOSTS = ['*']
try:
ADMINS = [('admin', os.environ["ADMINS"])]
except KeyError:
pass
INSTALLED_APPS = (
'modeltranslation',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'audio_profiling'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'audio_gallery.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'audio_gallery.wsgi.application'
#using memcache only for celery to be able to use locking
#cache middleware is not added
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': os.environ.get('MEMCACHED_HOST', '127.0.0.1')+':11211',
}
}
# RabbitMQ
CELERY_BROKER_URL = 'amqp://{0}:{1}@{2}:5672//'.format(
os.environ.get('RABBIT_USER', 'guest'),
os.environ.get('RABBIT_PASSWORD', 'guest'),
os.environ.get('RABBIT_HOST', '127.0.0.1'))
DATABASE_DIR=os.environ.get('DJANGO_DATABASE_DIR', os.path.join(BASE_DIR, '../database/'))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'audiogallery',
'USER': os.environ.get('DB_USER', 'postgres'),
'PASSWORD': os.environ.get('DB_PASSWORD', 'password'),
'HOST': os.environ.get('DB_HOST', '127.0.0.1'),
'PORT': '5432',
}
}
LOCALE_PATHS = [
os.path.join(BASE_DIR, 'locale')
]
LOGS_DIR = os.environ.get('DJANGO_LOGS_DIR', os.path.join(BASE_DIR, '..', 'logs'))
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '[%(levelname)s] %(message)s'
},
'verbose': {
'format': '[%(levelname)s] [%(asctime)s] [logger: %(name)s file: %(pathname)s function: %(funcName)s line: %(lineno)d]'
' [proc: %(process)d thread: %(thread)d]\n%(message)s'
},
},
'filters':{
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'django_log_file':{
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'maxBytes': 50*(1024**2), #50MB
'backupCount': 5,
'encoding': 'utf-8',
'filename': '%s' % (os.path.join(LOGS_DIR, "django.log")),
'formatter': 'verbose'
},
'celery_log_file':{
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'maxBytes': 50*(1024**2), #50MB
'backupCount': 5,
'encoding': 'utf-8',
'filename': '%s' % (os.path.join(LOGS_DIR, "django_celery.log")),
'formatter': 'verbose'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'filters': ['require_debug_false']
}
},
'loggers': {
'django': {
'handlers': ['console','django_log_file','mail_admins'],
'propagate': True
},
'django.request': {
'handlers': ['console','django_log_file','mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'celery': {
'handlers': ['console','celery_log_file','mail_admins'],
'level': 'DEBUG',
},
'audio_profiling': {
'handlers': ['console', 'django_log_file','mail_admins'],
'level': 'DEBUG'
}
}
}
LANGUAGES = [
('en', _('English')),
('pl', _('Polish')),
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.environ.get('STATIC_ROOT', os.path.join(BASE_DIR, '../static'))
MEDIA_URL='/media/'
MEDIA_ROOT = os.environ.get('MEDIA_ROOT', os.path.join(BASE_DIR, '../media'))
FILE_UPLOAD_PERMISSIONS = 0o644 # nginx was not able to read .wav files - those were being created with permissions 600
# little heads up - if you are testing the app locally using the manage.py runserver command, you might experience some
# problems with the audio that plays in your browser not being seekable - related stackoverflow thread:
# http://stackoverflow.com/questions/4538810/html5-video-element-non-seekable-when-using-django-development-server
if DEBUG:
import mimetypes
mimetypes.add_type("audio/wav", ".wav", True)
| 31.506912
| 131
| 0.621179
|
a3a75b8e7223a4c47aeb78777cf63942ec4791ae
| 2,650
|
py
|
Python
|
intro_to_dna_center.py
|
zapodeanu/DEVWKS_2840
|
630c57f8a4486e4b96234623fd0fcd5e0426bb3a
|
[
"BSD-Source-Code"
] | 2
|
2020-12-16T03:27:04.000Z
|
2021-01-14T18:14:37.000Z
|
intro_to_dna_center.py
|
zapodeanu/DEVWKS_2840
|
630c57f8a4486e4b96234623fd0fcd5e0426bb3a
|
[
"BSD-Source-Code"
] | 1
|
2021-06-02T00:52:26.000Z
|
2021-06-02T00:52:26.000Z
|
intro_to_dna_center.py
|
zapodeanu/DEVWKS_2840
|
630c57f8a4486e4b96234623fd0fcd5e0426bb3a
|
[
"BSD-Source-Code"
] | 2
|
2020-07-31T17:30:52.000Z
|
2021-01-14T18:14:38.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
__author__ = "Gabriel Zapodeanu TME, ENB"
__email__ = "gzapodea@cisco.com"
__version__ = "0.1.0"
__copyright__ = "Copyright (c) 2019 Cisco and/or its affiliates."
__license__ = "Cisco Sample Code License, Version 1.1"
import json
import utils
import requests
import urllib3
from requests.auth import HTTPBasicAuth # for Basic Auth
from urllib3.exceptions import InsecureRequestWarning # for insecure https warnings
from config import DNAC_URL, DNAC_PASS, DNAC_USER
urllib3.disable_warnings(InsecureRequestWarning) # disable insecure https warnings
DNAC_AUTH = HTTPBasicAuth(DNAC_USER, DNAC_PASS)
def get_dnac_jwt_token(dnac_auth):
"""
Create the authorization token required to access DNA C
Call to DNA C - /api/system/v1/auth/login
Call to DNA C - /api/system/v1/auth/token (for 1.3)
:param dnac_auth - DNA C Basic Auth string
:return: DNA C JWT token
"""
url = DNAC_URL + '/api/system/v1/auth/token'
header = {'content-type': 'application/json'}
response = requests.post(url, auth=dnac_auth, headers=header, verify=False)
dnac_jwt_token = response.json()['Token']
return dnac_jwt_token
def get_all_device_info(dnac_jwt_token):
"""
The function will return all network devices info
:param dnac_jwt_token: DNA C token
:return: DNA C device inventory info
"""
url = DNAC_URL + '/api/v1/network-device'
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
all_device_response = requests.get(url, headers=header, verify=False)
all_device_info = all_device_response.json()
return all_device_info['response']
# get the DNA Center auth token
dnac_jwt_auth = get_dnac_jwt_token(DNAC_AUTH)
print('\nThe DNA Center Auth token is: ', dnac_jwt_auth)
# retrieve all managed devices info
all_devices_info = get_all_device_info(dnac_jwt_auth)
print('\n\nThe information for all Cisco DNA Center managed devices is: ')
utils.pprint(all_devices_info)
| 31.176471
| 84
| 0.744528
|
f2790f7dd8c6e89a0691bca1a8a9307340a34f41
| 1,744
|
py
|
Python
|
api/app.py
|
Paxman23l/weatherapi
|
3c88b528c5a96ef64f79d0c0519dcab1bd6bb60c
|
[
"MIT"
] | null | null | null |
api/app.py
|
Paxman23l/weatherapi
|
3c88b528c5a96ef64f79d0c0519dcab1bd6bb60c
|
[
"MIT"
] | 1
|
2022-03-02T09:57:08.000Z
|
2022-03-02T09:57:08.000Z
|
api/app.py
|
Paxman23l/weatherstation
|
3c88b528c5a96ef64f79d0c0519dcab1bd6bb60c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from importlib import import_module
import os
from flask import Flask, render_template, Response
from dotenv import load_dotenv
from weather import Weather
import json
import threading
load_dotenv()
# import camera driver
if os.environ.get('CAMERA'):
Camera = import_module('camera_' + os.environ['CAMERA']).Camera
else:
from camera import Camera
# Raspberry Pi camera module (requires picamera package)
# from camera_pi import Camera
app = Flask(__name__)
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/weather/inside')
def inside_temperature():
resultJson = {
'temperature': Weather.get_inside_temp(),
'humidity': Weather.get_inside_humidity(),
'pressure': Weather.get_inside_pressure(),
}
return json.dumps(resultJson)
@app.route('/weather/outside')
def outside_temperature():
resultJson = Weather.get_outside_weather()
return json.dumps(resultJson)
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(Camera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.after_request
def apply_caching(response):
response.headers["Access-Control-Allow-Origin"] = "*"
return response
if __name__ == '__main__':
t1 = threading.Thread(target=Weather.start_weather_polling)
t1.start()
app.run(host='0.0.0.0', threaded=True)
| 26.830769
| 77
| 0.686353
|
327fc27fe88d668ad853d48c0798879d25c93924
| 105,152
|
py
|
Python
|
pynetdicom/tests/test_service_qr.py
|
howardpchen/pynetdicom
|
a67ad0422dbbc8b7c196da2b2c5fd38e7caf8d47
|
[
"MIT"
] | null | null | null |
pynetdicom/tests/test_service_qr.py
|
howardpchen/pynetdicom
|
a67ad0422dbbc8b7c196da2b2c5fd38e7caf8d47
|
[
"MIT"
] | null | null | null |
pynetdicom/tests/test_service_qr.py
|
howardpchen/pynetdicom
|
a67ad0422dbbc8b7c196da2b2c5fd38e7caf8d47
|
[
"MIT"
] | null | null | null |
"""Tests for the following Service Classes:
* QueryRetrieveServiceClass
* HangingProtocolQueryRetrieveServiceClass
* DefinedProcedureProtocolQueryRetrieveServiceClass
* ColorPaletteQueryRetrieveServiceClass
* ImplantTemplateQueryRetrieveServiceClass
"""
from io import BytesIO
import logging
import os
import threading
import time
import pytest
from pydicom import dcmread
from pydicom.dataset import Dataset
from pydicom.uid import ImplicitVRLittleEndian, ExplicitVRLittleEndian
from pynetdicom import AE, build_context, StoragePresentationContexts
from pynetdicom.dimse_primitives import C_FIND, C_GET
from pynetdicom.presentation import PresentationContext
from pynetdicom.pdu_primitives import SCP_SCU_RoleSelectionNegotiation
from pynetdicom.service_class import (
QueryRetrieveServiceClass,
BasicWorklistManagementServiceClass,
)
from pynetdicom.sop_class import (
uid_to_sop_class,
ModalityWorklistInformationFind,
VerificationSOPClass,
CTImageStorage,
PatientRootQueryRetrieveInformationModelFind,
PatientRootQueryRetrieveInformationModelGet,
PatientRootQueryRetrieveInformationModelMove,
CompositeInstanceRetrieveWithoutBulkDataGet,
)
from .dummy_c_scp import (
DummyVerificationSCP,
DummyStorageSCP,
DummyFindSCP,
DummyBaseSCP,
DummyGetSCP,
DummyMoveSCP
)
LOGGER = logging.getLogger('pynetdicom')
#LOGGER.setLevel(logging.DEBUG)
LOGGER.setLevel(logging.CRITICAL)
TEST_DS_DIR = os.path.join(os.path.dirname(__file__), 'dicom_files')
DATASET = dcmread(os.path.join(TEST_DS_DIR, 'CTImageStorage.dcm'))
def test_unknown_sop_class():
"""Test that starting the QR SCP with an unknown SOP Class raises"""
service = QueryRetrieveServiceClass(None)
context = PresentationContext()
context.abstract_syntax = '1.2.3.4'
context.add_transfer_syntax('1.2')
with pytest.raises(ValueError):
service.SCP(None, context, None)
class TestQRFindServiceClass(object):
"""Test the QueryRetrieveFindServiceClass"""
def setup(self):
"""Run prior to each test"""
self.query = Dataset()
self.query.QueryRetrieveLevel = "PATIENT"
self.query.PatientName = '*'
self.scp = None
def teardown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_bad_req_identifier(self):
"""Test SCP handles a bad request identifier"""
self.scp = DummyFindSCP()
self.scp.statuses = [0xFF00]
self.scp.identifiers = [self.query]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelFind,
ExplicitVRLittleEndian)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
req = C_FIND()
req.MessageID = 1
req.AffectedSOPClassUID = PatientRootQueryRetrieveInformationModelFind
req.Priority = 2
req.Identifier = BytesIO(b'\x08\x00\x01\x00\x04\x00\x00\x00\x00\x08\x00\x49')
assoc.dimse.send_msg(req, 1)
rsp, _ = assoc.dimse.receive_msg(True)
assert rsp.Status == 0xC310
assoc.release()
self.scp.stop()
def test_callback_status_dataset(self):
"""Test on_c_find yielding a Dataset status"""
self.scp = DummyFindSCP()
self.scp.statuses = [Dataset(), 0x0000]
self.scp.statuses[0].Status = 0xFF00
self.scp.identifers = [self.query, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelFind)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_find(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
status, identifier = next(result)
assert status.Status == 0x0000
assoc.release()
self.scp.stop()
def test_callback_status_dataset_multi(self):
"""Test on_c_find yielding a Dataset status with other elements"""
self.scp = DummyFindSCP()
self.scp.statuses = [Dataset()]
self.scp.statuses[0].Status = 0xFF00
self.scp.statuses[0].ErrorComment = 'Test'
self.scp.statuses[0].OffendingElement = 0x00010001
self.scp.identifiers = [self.query]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelFind)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_find(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert status.ErrorComment == 'Test'
assert status.OffendingElement == 0x00010001
status, identifier = next(result)
assert status.Status == 0x0000
assoc.release()
self.scp.stop()
def test_callback_status_int(self):
"""Test on_c_find yielding an int status"""
self.scp = DummyFindSCP()
self.scp.statuses = [0xFF00]
self.scp.identifiers = [self.query]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelFind)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_find(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
status, identifier = next(result)
assert status.Status == 0x0000
assoc.release()
self.scp.stop()
def test_callback_status_unknown(self):
"""Test SCP handles on_c_find yielding a unknown status"""
self.scp = DummyFindSCP()
self.scp.statuses = [0xFFF0]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelFind)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_find(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFFF0
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_callback_status_invalid(self):
"""Test SCP handles on_c_find yielding a invalid status"""
self.scp = DummyFindSCP()
self.scp.statuses = ['Failure']
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelFind)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_find(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xC002
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_callback_status_none(self):
"""Test SCP handles on_c_find not yielding a status"""
self.scp = DummyFindSCP()
self.scp.statuses = [None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelFind)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_find(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xC002
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_callback_exception(self):
"""Test SCP handles on_c_find yielding an exception"""
self.scp = DummyFindSCP()
def on_c_find(ds, context, info): raise ValueError
self.scp.ae.on_c_find = on_c_find
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelFind)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_find(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xC311
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_callback_bad_identifier(self):
"""Test SCP handles a bad callback identifier"""
self.scp = DummyFindSCP()
self.scp.statuses = [0xFF00, 0xFE00]
self.scp.identifiers = [None, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelFind)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_find(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xC312
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_pending_cancel(self):
"""Test on_c_find yielding pending then cancel status"""
self.scp = DummyFindSCP()
self.scp.statuses = [0xFF00, 0xFE00]
self.scp.identifiers = [self.query, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelFind)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_find(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier == self.query
status, identifier = next(result)
assert status.Status == 0xFE00
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_pending_success(self):
"""Test on_c_find yielding pending then success status"""
self.scp = DummyFindSCP()
self.scp.statuses = [0xFF01, 0x0000, 0xA700]
self.scp.identifiers = [self.query, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelFind)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_find(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF01
assert identifier == self.query
status, identifier = next(result)
assert status.Status == 0x0000
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_pending_failure(self):
"""Test on_c_find yielding pending then failure status"""
self.scp = DummyFindSCP()
self.scp.statuses = [0xFF00, 0xA700, 0x0000]
self.scp.identifiers = [self.query, None, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelFind)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_find(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier == self.query
status, identifier = next(result)
assert status.Status == 0xA700
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_multi_pending_cancel(self):
"""Test on_c_find yielding multiple pending then cancel status"""
self.scp = DummyFindSCP()
self.scp.statuses = [0xFF00, 0xFF01, 0xFF00, 0xFE00, 0x0000]
self.scp.identifiers = [self.query, self.query, self.query, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelFind)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_find(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier == self.query
status, identifier = next(result)
assert status.Status == 0xFF01
assert identifier == self.query
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier == self.query
status, identifier = next(result)
assert status.Status == 0xFE00
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_multi_pending_success(self):
"""Test on_c_find yielding multiple pending then success status"""
self.scp = DummyFindSCP()
self.scp.statuses = [0xFF00, 0xFF01, 0xFF00, 0x0000, 0xA700]
self.scp.identifiers = [self.query, self.query, self.query, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelFind)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_find(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier == self.query
status, identifier = next(result)
assert status.Status == 0xFF01
assert identifier == self.query
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier == self.query
status, identifier = next(result)
assert status.Status == 0x0000
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_multi_pending_failure(self):
"""Test on_c_find yielding multiple pending then failure status"""
self.scp = DummyFindSCP()
self.scp.statuses = [0xFF00, 0xFF01, 0xFF00, 0xA700, 0x0000]
self.scp.identifiers = [self.query, self.query, self.query, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelFind)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_find(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier == self.query
status, identifier = next(result)
assert status.Status == 0xFF01
assert identifier == self.query
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier == self.query
status, identifier = next(result)
assert status.Status == 0xA700
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_scp_callback_context(self):
"""Test on_c_store caontext parameter"""
self.scp = DummyFindSCP()
self.scp.statuses = [Dataset(), 0x0000]
self.scp.statuses[0].Status = 0xFF00
self.identifiers = [self.query, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelFind,
'1.2.840.10008.1.2.1')
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_find(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
status, identifier = next(result)
assert status.Status == 0x0000
assoc.release()
assert assoc.is_released
assert self.scp.context.context_id == 1
assert self.scp.context.abstract_syntax == PatientRootQueryRetrieveInformationModelFind
assert self.scp.context.transfer_syntax == '1.2.840.10008.1.2.1'
self.scp.stop()
def test_scp_callback_info(self):
"""Test on_c_store caontext parameter"""
self.scp = DummyFindSCP()
self.scp.statuses = [Dataset(), 0x0000]
self.scp.statuses[0].Status = 0xFF00
self.identifiers = [self.query, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelFind)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_find(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
status, identifier = next(result)
assert status.Status == 0x0000
assoc.release()
assert assoc.is_released
assert 'address' in self.scp.info['requestor']
assert self.scp.info['requestor']['ae_title'] == b'PYNETDICOM '
#assert self.scp.info['requestor']['called_aet'] == b'ANY-SCP '
assert isinstance(self.scp.info['requestor']['port'], int)
assert self.scp.info['acceptor']['port'] == 11112
assert 'address' in self.scp.info['acceptor']
assert self.scp.info['acceptor']['ae_title'] == b'PYNETDICOM '
assert self.scp.info['parameters']['message_id'] == 1
assert self.scp.info['parameters']['priority'] == 2
self.scp.stop()
class TestQRGetServiceClass(object):
def setup(self):
"""Run prior to each test"""
self.query = Dataset()
self.query.PatientName = '*'
self.query.QueryRetrieveLevel = "PATIENT"
self.ds = Dataset()
self.ds.file_meta = Dataset()
self.ds.file_meta.TransferSyntaxUID = ImplicitVRLittleEndian
self.ds.SOPClassUID = CTImageStorage
self.ds.SOPInstanceUID = '1.1.1'
self.ds.PatientName = 'Test'
self.fail = Dataset()
self.fail.FailedSOPInstanceUIDList = ['1.2.3']
self.scp = None
def teardown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_bad_req_identifier(self):
"""Test SCP handles a bad request identifier"""
self.scp = DummyGetSCP()
self.scp.statuses = [0xFF00]
self.scp.datasets = [self.ds]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet,
ExplicitVRLittleEndian)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
req = C_GET()
req.MessageID = 1
req.AffectedSOPClassUID = PatientRootQueryRetrieveInformationModelGet
req.Priority = 2
req.Identifier = BytesIO(b'\x08\x00\x01\x00\x04\x00\x00\x00\x00\x08\x00\x49')
assoc.dimse.send_msg(req, 1)
status, _ = assoc.dimse.receive_msg(True)
assert status.Status == 0xC410
assoc.release()
self.scp.stop()
def test_get_callback_bad_subops(self):
"""Test on_c_get yielding a bad no subops"""
self.scp = DummyGetSCP()
self.scp.no_suboperations = 'test'
self.scp.statuses = [Dataset(), 0x0000]
self.scp.statuses[0].Status = 0xFF00
self.scp.datasets = [self.ds, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
ae.acse_timeout = 5
ae.dimse_timeout = 5
def on_c_store(ds, context, assoc_info):
return 0x0000
ae.on_c_store = on_c_store
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xC413
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_get_callback_status_dataset(self):
"""Test on_c_get yielding a Dataset status"""
self.scp = DummyGetSCP()
self.scp.no_suboperations = 1
self.scp.statuses = [Dataset(), 0x0000]
self.scp.statuses[0].Status = 0xFF00
self.scp.datasets = [self.ds, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
def on_c_store(ds, context, assoc_info):
return 0x0000
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_get_callback_status_dataset_multi(self):
"""Test on_c_get yielding a Dataset status with other elements"""
self.scp = DummyGetSCP()
self.scp.statuses = [Dataset()]
self.scp.statuses[0].Status = 0xFF00
self.scp.statuses[0].ErrorComment = 'Test'
self.scp.statuses[0].OffendingElement = 0x00010001
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
def on_c_store(ds, context, assoc_info):
return 0x0000
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert status.ErrorComment == 'Test'
assert status.OffendingElement == 0x00010001
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_get_callback_status_int(self):
"""Test on_c_get yielding an int status"""
self.scp = DummyGetSCP()
self.scp.statuses = [0xFF00]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
def on_c_store(ds, context, assoc_info):
return 0x0000
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_get_callback_status_unknown(self):
"""Test SCP handles on_c_get yielding a unknown status"""
self.scp = DummyGetSCP()
self.scp.statuses = [0xFFF0]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
def on_c_store(ds, context, assoc_info):
return 0x0000
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFFF0
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_get_callback_status_invalid(self):
"""Test SCP handles on_c_get yielding a invalid status"""
self.scp = DummyGetSCP()
self.scp.statuses = ['Failure']
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
def on_c_store(ds, context, assoc_info):
return 0x0000
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xC002
assert identifier.FailedSOPInstanceUIDList == ''
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_get_callback_status_none(self):
"""Test SCP handles on_c_get not yielding a status"""
self.scp = DummyGetSCP()
self.scp.statuses = [None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
def on_c_store(ds, context, assoc_info):
return 0x0000
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xC002
assert identifier.FailedSOPInstanceUIDList == ''
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_get_callback_exception(self):
"""Test SCP handles on_c_get yielding an exception"""
self.scp = DummyGetSCP()
def on_c_get(ds, context, info): raise ValueError
self.scp.ae.on_c_get = on_c_get
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
def on_c_store(ds, context, assoc_info):
return 0x0000
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xC411
assert identifier == Dataset()
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_get_callback_bad_dataset(self):
"""Test SCP handles on_c_get not yielding a valid dataset"""
self.scp = DummyGetSCP()
self.scp.statuses = [0xFF00, 0x0000]
self.scp.datasets = [self.fail, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
def on_c_store(ds, context, assoc_info):
return 0x0000
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert identifier.FailedSOPInstanceUIDList == ''
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_get_callback_invalid_dataset(self):
"""Test status returned correctly if not yielding a Dataset."""
self.scp = DummyGetSCP()
def on_c_store(ds, context, assoc_info):
return 0x0000
self.scp.no_suboperations = 3
self.scp.statuses = [Dataset(), Dataset(), Dataset(), 0x0000]
self.scp.statuses[0].Status = 0xFF00
self.scp.statuses[1].Status = 0xFF00
self.scp.statuses[2].Status = 0xFF00
self.scp.datasets = [self.ds, 'acbdef', self.ds]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert status.NumberOfFailedSuboperations == 1
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 2
assert identifier.FailedSOPInstanceUIDList == ''
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_store_callback_exception(self):
"""Test SCP handles send_c_store raising an exception"""
self.scp = DummyGetSCP()
self.scp.statuses = [0xFF00, 0x0000]
self.scp.datasets = [self.query, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
def on_c_store(ds, context, assoc_info):
return 0x0000
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert identifier.FailedSOPInstanceUIDList == ''
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_scp_basic(self):
"""Test on_c_get"""
self.scp = DummyGetSCP()
def on_c_store(ds, context, assoc_info):
return 0x0000
self.scp.statuses = [0xFF00, 0xFF00]
self.scp.datasets = [self.ds, self.ds]
self.scp.no_suboperations = 2
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_scp_store_failure(self):
"""Test when on_c_store returns failure status"""
self.scp = DummyGetSCP()
def on_c_store(ds, context, assoc_info):
return 0xC001
# SCP should override final success status
self.scp.statuses = [0xFF00, 0xFF00, 0x0000]
self.scp.datasets = [self.ds, self.ds, None]
self.scp.no_suboperations = 2
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert status.NumberOfFailedSuboperations == 2
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 0
assert identifier.FailedSOPInstanceUIDList == ['1.1.1', '1.1.1']
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_scp_store_warning(self):
"""Test when on_c_store returns warning status"""
self.scp = DummyGetSCP()
def on_c_store(ds, context, assoc_info):
return 0xB000
# SCP should override final success status
self.scp.statuses = [0xFF00, 0xFF00, 0x0000]
self.scp.datasets = [self.ds, self.ds, None]
self.scp.no_suboperations = 3
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 2
assert status.NumberOfCompletedSuboperations == 0
assert identifier.FailedSOPInstanceUIDList == ['1.1.1', '1.1.1']
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_pending_success(self):
"""Test when on_c_get returns success status"""
self.scp = DummyGetSCP()
def on_c_store(ds, context, assoc_info):
return 0x0000
# SCP should override final warning status
self.scp.statuses = [0xFF00, 0xB000]
self.scp.datasets = [self.ds, None]
self.scp.no_suboperations = 1
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 1
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_pending_warning(self):
"""Test when on_c_get returns warning status"""
self.scp = DummyGetSCP()
def on_c_store(ds, context, assoc_info):
return 0xB000
# SCP should override final success status
self.scp.statuses = [0xFF00, 0x0000]
self.scp.datasets = [self.ds, None]
self.scp.no_suboperations = 1
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 1
assert status.NumberOfCompletedSuboperations == 0
assert identifier.FailedSOPInstanceUIDList == '1.1.1'
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_pending_failure(self):
"""Test on_c_get returns warning status after store failure"""
self.scp = DummyGetSCP()
def on_c_store(ds, context, assoc_info):
return 0xC000
# SCP should override final warning status
self.scp.statuses = [0xFF00]
self.scp.datasets = [self.ds]
self.scp.no_suboperations = 1
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert status.NumberOfFailedSuboperations == 1
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 0
assert identifier.FailedSOPInstanceUIDList == '1.1.1'
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_multi_pending_success(self):
"""Test on_c_get returns success status after multi store success"""
self.scp = DummyGetSCP()
def on_c_store(ds, context, assoc_info):
return 0x0000
# SCP should override final warning status
self.scp.statuses = [0xFF00, 0xFF00, 0xFF00, 0xB000]
self.scp.datasets = [self.ds, self.ds, self.ds, None]
self.scp.no_suboperations = 3
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 3
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_multi_pending_warning(self):
"""Test on_c_get returns warning status after multi store warning"""
self.scp = DummyGetSCP()
def on_c_store(ds, context, assoc_info):
return 0xB000
# SCP should override final warning status
self.scp.statuses = [0xFF00, 0xFF00, 0xFF00, 0xB000]
self.scp.datasets = [self.ds, self.ds, self.ds, None]
self.scp.no_suboperations = 3
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 3
assert status.NumberOfCompletedSuboperations == 0
assert identifier.FailedSOPInstanceUIDList == ['1.1.1',
'1.1.1',
'1.1.1']
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_multi_pending_failure(self):
"""Test on_c_get returns warning status after multi store failure"""
self.scp = DummyGetSCP()
def on_c_store(ds, context, assoc_info):
return 0xC000
# SCP should override final warning status
self.scp.statuses = [0xFF00, 0xFF00, 0xFF00, 0xB000]
self.scp.datasets = [self.ds, self.ds, self.ds, None]
self.scp.no_suboperations = 3
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert status.NumberOfFailedSuboperations == 3
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 0
assert identifier.FailedSOPInstanceUIDList == ['1.1.1',
'1.1.1',
'1.1.1']
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_get_failure(self):
"""Test when on_c_get returns failure status"""
self.scp = DummyGetSCP()
def on_c_store(ds, context, assoc_info):
return 0x0000
# SCP should override final success status
self.scp.statuses = [0xFF00, 0xC000]
self.scp.datasets = [self.ds, self.fail]
self.scp.no_suboperations = 2
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xC000
assert status.NumberOfFailedSuboperations == 1
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 1
assert identifier.FailedSOPInstanceUIDList == '1.2.3'
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_get_success(self):
"""Test when on_c_get returns success status"""
self.scp = DummyGetSCP()
def on_c_store(ds, context, assoc_info):
return 0x0000
# SCP should override final success status
self.scp.statuses = [0xFF00]
self.scp.datasets = [self.ds]
self.scp.no_suboperations = 1
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 1
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_get_cancel(self):
"""Test on_c_get returns cancel status"""
self.scp = DummyGetSCP()
def on_c_store(ds, context, assoc_info):
return 0x0000
# SCP should override final success status
self.scp.statuses = [0xFF00, 0xFE00, 0x0000]
self.scp.datasets = [self.ds, self.fail, None]
self.scp.no_suboperations = 2
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFE00
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 1
assert identifier.FailedSOPInstanceUIDList == '1.2.3'
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_get_warning(self):
"""Test on_c_get returns warning status"""
self.scp = DummyGetSCP()
def on_c_store(ds, context, assoc_info):
return 0xB000
# SCP should override final success status
self.scp.statuses = [0xFF00]
self.scp.datasets = [self.ds]
self.scp.no_suboperations = 1
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 1
assert status.NumberOfCompletedSuboperations == 0
assert identifier.FailedSOPInstanceUIDList == '1.1.1'
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_scp_callback_context(self):
"""Test on_c_store caontext parameter"""
self.scp = DummyGetSCP()
self.scp.no_suboperations = 1
self.scp.statuses = [Dataset(), 0x0000]
self.scp.statuses[0].Status = 0xFF00
self.identifiers = [self.ds, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet,
'1.2.840.10008.1.2.1')
ae.add_requested_context(CTImageStorage, '1.2.840.10008.1.2.1')
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
def on_c_store(ds, context, assoc_info):
assert context.context_id == 3
assert context.abstract_syntax == CTImageStorage
assert context.transfer_syntax == '1.2.840.10008.1.2.1'
return 0x0000
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
assert assoc.is_released
assert self.scp.context.context_id == 1
assert self.scp.context.abstract_syntax == PatientRootQueryRetrieveInformationModelGet
assert self.scp.context.transfer_syntax == '1.2.840.10008.1.2.1'
self.scp.stop()
def test_scp_callback_info(self):
"""Test on_c_store caontext parameter"""
self.scp = DummyGetSCP()
self.scp.no_suboperations = 1
self.scp.statuses = [Dataset(), 0x0000]
self.scp.statuses[0].Status = 0xFF00
self.identifiers = [self.ds, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet,
'1.2.840.10008.1.2.1')
ae.add_requested_context(CTImageStorage, '1.2.840.10008.1.2.1')
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scu_role = False
role.scp_role = True
def on_c_store(ds, context, assoc_info):
assert context.abstract_syntax == CTImageStorage
assert context.transfer_syntax == '1.2.840.10008.1.2.1'
return 0x0000
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
assert assoc.is_released
assert 'address' in self.scp.info['requestor']
assert self.scp.info['requestor']['ae_title'] == b'PYNETDICOM '
#assert self.scp.info['requestor']['called_aet'] == b'ANY-SCP '
assert isinstance(self.scp.info['requestor']['port'], int)
assert self.scp.info['acceptor']['port'] == 11112
assert 'address' in self.scp.info['acceptor']
assert self.scp.info['acceptor']['ae_title'] == b'PYNETDICOM '
assert self.scp.info['parameters']['message_id'] == 1
assert self.scp.info['parameters']['priority'] == 2
self.scp.stop()
def test_contexts(self):
"""Test multiple presentation contexts work OK."""
self.scp = DummyGetSCP()
def on_c_store(ds, context, assoc_info):
return 0x0000
# SCP should override final success status
self.scp.statuses = [0xFF00]
self.scp.datasets = [self.ds]
self.scp.no_suboperations = 1
self.scp.start()
ae = AE()
ae.requested_contexts = StoragePresentationContexts[:120]
ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet)
role_selection = []
for context in StoragePresentationContexts:
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = context.abstract_syntax
role.scu_role = False
role.scp_role = True
role_selection.append(role)
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=role_selection)
assert assoc.is_established
# Check requestor's negotiated contexts
storage_uids = [cx.abstract_syntax for cx in StoragePresentationContexts]
for cx in assoc.accepted_contexts:
if cx.abstract_syntax in storage_uids:
# Requestor is acting as SCP for storage contexts
assert cx.as_scp is True
assert cx.as_scu is False
else:
# Requestor is acting as SCU for query contexts
assert cx.as_scp is False
assert cx.as_scu is True
# Check acceptor's negotiated contexts
acc_assoc = self.scp.ae.active_associations[0]
for cx in acc_assoc.accepted_contexts:
if cx.abstract_syntax in storage_uids:
# Acceptor is acting as SCU for storage contexts
assert cx.as_scp is False
assert cx.as_scu is True
else:
# Acceptor is acting as SCP for query contexts
assert cx.as_scp is True
assert cx.as_scu is False
assert len(acc_assoc.rejected_contexts) == 0
assoc.release()
self.scp.stop()
class TestQRMoveServiceClass(object):
def setup(self):
"""Run prior to each test"""
self.query = Dataset()
self.query.PatientName = '*'
self.query.QueryRetrieveLevel = "PATIENT"
self.ds = Dataset()
self.ds.file_meta = Dataset()
self.ds.file_meta.TransferSyntaxUID = ImplicitVRLittleEndian
self.ds.SOPClassUID = CTImageStorage
self.ds.SOPInstanceUID = '1.1.1'
self.ds.PatientName = 'Test'
self.fail = Dataset()
self.fail.FailedSOPInstanceUIDList = ['1.2.3']
self.scp = None
def teardown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_bad_req_identifier(self):
"""Test SCP handles a bad request identifier"""
self.scp = DummyMoveSCP()
self.scp.statuses = [0xFF00]
self.scp.datasets = [self.ds]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove,
ExplicitVRLittleEndian)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
req = C_GET()
req.MessageID = 1
req.AffectedSOPClassUID = PatientRootQueryRetrieveInformationModelMove
req.Priority = 2
# Encoded as Implicit VR Little
req.Identifier = BytesIO(b'\x08\x00\x01\x00\x04\x00\x00\x00\x00\x08\x00\x49')
assoc.dimse.send_msg(req, 1)
status, _ = assoc.dimse.receive_msg(True)
assert status.Status == 0xC510
assoc.release()
self.scp.stop()
def test_move_callback_bad_yield_destination(self):
"""Test correct status returned if callback doesn't yield dest."""
# Testing what happens if the on_c_move callback doesn't yield
self.scp = DummyMoveSCP()
self.scp.statuses = [0xFF00]
self.scp.datasets = [self.ds]
self.scp.test_no_yield = True
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xC514
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_callback_bad_yield_subops(self):
"""Test correct status returned if callback doesn't yield subops."""
self.scp = DummyMoveSCP()
self.scp.statuses = [0xFF00]
self.scp.datasets = [self.ds]
self.scp.test_no_subops = True
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xC514
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_bad_destination(self):
"""Test correct status returned if destination bad."""
self.scp = DummyMoveSCP()
self.scp.statuses = [0xFF00]
self.scp.datasets = [self.ds]
self.scp.destination_ae = (None, 11112)
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xA801
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_callback_bad_subops(self):
"""Test on_c_move yielding a bad no subops"""
self.scp = DummyMoveSCP()
self.scp.no_suboperations = 'test'
self.scp.statuses = [Dataset(), 0x0000]
self.scp.statuses[0].Status = 0xFF00
self.scp.datasets = [self.ds, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xC513
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_callback_bad_aet(self):
"""Test on_c_move yielding a bad move aet"""
self.scp = DummyMoveSCP()
self.scp.no_suboperations = 1
self.scp.statuses = [Dataset(), 0x0000]
self.scp.statuses[0].Status = 0xFF00
self.scp.datasets = [self.ds, None]
self.scp.destination_ae = None
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xC515
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_callback_status_dataset(self):
"""Test on_c_move yielding a Dataset status"""
self.scp = DummyMoveSCP()
self.scp.no_suboperations = 1
self.scp.statuses = [Dataset(), 0x0000]
self.scp.statuses[0].Status = 0xFF00
self.scp.datasets = [self.ds, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_callback_status_dataset_multi(self):
"""Test on_c_move yielding a Dataset status with other elements"""
self.scp = DummyMoveSCP()
self.scp.statuses = [Dataset()]
self.scp.statuses[0].Status = 0xFF00
self.scp.statuses[0].ErrorComment = 'Test'
self.scp.statuses[0].OffendingElement = 0x00010001
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert status.ErrorComment == 'Test'
assert status.OffendingElement == 0x00010001
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_callback_status_int(self):
"""Test on_c_move yielding an int status"""
self.scp = DummyMoveSCP()
self.scp.statuses = [0xFF00]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_callback_status_unknown(self):
"""Test SCP handles on_c_move yielding a unknown status"""
self.scp = DummyMoveSCP()
self.scp.statuses = [0xFFF0]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFFF0
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_callback_status_invalid(self):
"""Test SCP handles on_c_move yielding a invalid status"""
self.scp = DummyMoveSCP()
self.scp.statuses = ['Failure']
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xC002
assert identifier.FailedSOPInstanceUIDList == ''
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_callback_status_none(self):
"""Test SCP handles on_c_move not yielding a status"""
self.scp = DummyMoveSCP()
self.scp.statuses = [None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xC002
assert identifier.FailedSOPInstanceUIDList == ''
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_callback_exception(self):
"""Test SCP handles on_c_move yielding an exception"""
self.scp = DummyMoveSCP()
def on_c_move(ds, dest, context, info): raise ValueError
self.scp.ae.on_c_move = on_c_move
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xC511
assert identifier == Dataset()
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_callback_bad_dataset(self):
"""Test SCP handles on_c_move not yielding a valid dataset"""
self.scp = DummyMoveSCP()
self.scp.statuses = [0xFF00, 0x0000]
self.scp.datasets = [self.fail, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert identifier.FailedSOPInstanceUIDList == ''
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_callback_invalid_dataset(self):
"""Test status returned correctly if not yielding a Dataset."""
self.scp = DummyMoveSCP()
self.scp.no_suboperations = 2
self.scp.statuses = [Dataset(), Dataset(), Dataset(), 0x0000]
self.scp.statuses[0].Status = 0xFF00
self.scp.statuses[1].Status = 0xFF00
self.scp.statuses[2].Status = 0xFF00
self.scp.datasets = [self.ds, 'acbdef', self.ds]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert status.NumberOfFailedSuboperations == 1
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 2
assert identifier.FailedSOPInstanceUIDList == ''
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_scp_basic(self):
"""Test on_c_move"""
self.scp = DummyMoveSCP()
self.scp.statuses = [0xFF00, 0xFF00]
self.scp.datasets = [self.ds, self.ds]
self.scp.no_suboperations = 2
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_scp_store_failure(self):
"""Test when on_c_store returns failure status"""
self.scp = DummyMoveSCP()
# SCP should override final success status
self.scp.statuses = [0xFF00, 0xFF00, 0x0000]
self.scp.datasets = [self.ds, self.ds, None]
self.scp.no_suboperations = 2
self.scp.store_status = 0xC000
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert status.NumberOfFailedSuboperations == 2
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 0
assert identifier.FailedSOPInstanceUIDList == ['1.1.1', '1.1.1']
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_callback_warning(self):
"""Test on_c_move returns warning status"""
self.scp = DummyMoveSCP()
# SCP should override final success status
self.scp.statuses = [0xB000, 0xFF00]
self.scp.datasets = [self.ds, self.ds]
self.scp.no_suboperations = 2
self.scp.store_status = 0x0000
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xB000
assert status.NumberOfFailedSuboperations == 2
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 0
assert identifier.FailedSOPInstanceUIDList == ''
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_scp_store_warning(self):
"""Test when on_c_store returns warning status"""
self.scp = DummyMoveSCP()
# SCP should override final success status
self.scp.statuses = [0xFF00, 0xFF00, 0x0000]
self.scp.datasets = [self.ds, self.ds, None]
self.scp.no_suboperations = 3
self.scp.store_status = 0xB000
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 2
assert status.NumberOfCompletedSuboperations == 0
assert identifier.FailedSOPInstanceUIDList == ['1.1.1', '1.1.1']
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_pending_success(self):
"""Test when on_c_move returns success status"""
self.scp = DummyMoveSCP()
# SCP should override final warning status
self.scp.statuses = [0xFF00, 0xB000]
self.scp.datasets = [self.ds, None]
self.scp.no_suboperations = 1
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 1
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_pending_warning(self):
"""Test when on_c_move returns warning status"""
self.scp = DummyMoveSCP()
# SCP should override final success status
self.scp.statuses = [0xFF00, 0x0000]
self.scp.datasets = [self.ds, None]
self.scp.no_suboperations = 1
self.scp.store_status = 0xB000
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 1
assert status.NumberOfCompletedSuboperations == 0
assert identifier.FailedSOPInstanceUIDList == '1.1.1'
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_pending_failure(self):
"""Test on_c_move returns warning status after store failure"""
self.scp = DummyMoveSCP()
# SCP should override final warning status
self.scp.statuses = [0xFF00]
self.scp.datasets = [self.ds]
self.scp.no_suboperations = 1
self.scp.store_status = 0xC000
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert status.NumberOfFailedSuboperations == 1
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 0
assert identifier.FailedSOPInstanceUIDList == '1.1.1'
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_multi_pending_success(self):
"""Test on_c_move returns success status after multi store success"""
self.scp = DummyMoveSCP()
# SCP should override final warning status
self.scp.statuses = [0xFF00, 0xFF00, 0xFF00, 0xB000]
self.scp.datasets = [self.ds, self.ds, self.ds, None]
self.scp.no_suboperations = 3
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 3
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_multi_pending_warning(self):
"""Test on_c_move returns warning status after multi store warning"""
self.scp = DummyMoveSCP()
# SCP should override final warning status
self.scp.statuses = [0xFF00, 0xFF00, 0xFF00, 0xB000]
self.scp.datasets = [self.ds, self.ds, self.ds, None]
self.scp.no_suboperations = 3
self.scp.store_status = 0xB000
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 3
assert status.NumberOfCompletedSuboperations == 0
assert identifier.FailedSOPInstanceUIDList == ['1.1.1',
'1.1.1',
'1.1.1']
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_multi_pending_failure(self):
"""Test on_c_move returns warning status after multi store failure"""
self.scp = DummyMoveSCP()
# SCP should override final warning status
self.scp.statuses = [0xFF00, 0xFF00, 0xFF00, 0xB000]
self.scp.datasets = [self.ds, self.ds, self.ds, None]
self.scp.no_suboperations = 3
self.scp.store_status = 0xC000
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert status.NumberOfFailedSuboperations == 3
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 0
assert identifier.FailedSOPInstanceUIDList == [
'1.1.1', '1.1.1', '1.1.1'
]
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_failure(self):
"""Test when on_c_move returns failure status"""
self.scp = DummyMoveSCP()
# SCP should override final success status
self.scp.statuses = [0xFF00, 0xC000]
self.scp.datasets = [self.ds, self.fail]
self.scp.no_suboperations = 2
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xC000
assert status.NumberOfFailedSuboperations == 1
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 1
assert identifier.FailedSOPInstanceUIDList == '1.2.3'
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_success(self):
"""Test when on_c_move returns failure status"""
self.scp = DummyMoveSCP()
# SCP should override final success status
self.scp.statuses = [0xFF00]
self.scp.datasets = [self.ds]
self.scp.no_suboperations = 1
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 1
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_cancel(self):
"""Test on_c_move returns cancel status"""
self.scp = DummyMoveSCP()
# SCP should override final success status
self.scp.statuses = [0xFF00, 0xFE00, 0x0000]
self.scp.datasets = [self.ds, self.fail, None]
self.scp.no_suboperations = 2
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xFE00
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 1
assert identifier.FailedSOPInstanceUIDList == '1.2.3'
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_move_warning(self):
"""Test on_c_move returns warning status"""
self.scp = DummyMoveSCP()
# SCP should override final success status
self.scp.statuses = [0xFF00]
self.scp.datasets = [self.ds]
self.scp.no_suboperations = 1
self.scp.store_status = 0xB000
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0xB000
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 1
assert status.NumberOfCompletedSuboperations == 0
assert identifier.FailedSOPInstanceUIDList == '1.1.1'
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_no_associate(self):
"""Test when on_c_move returns failure status"""
self.scp = DummyMoveSCP()
# SCP should override final success status
self.scp.statuses = [0xFF00]
self.scp.datasets = [self.ds]
self.scp.no_suboperations = 1
self.scp.destination_ae = ('localhost', 11113)
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xA801
assert identifier == Dataset()
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_scp_callback_context(self):
"""Test on_c_store caontext parameter"""
self.scp = DummyMoveSCP()
self.scp.no_suboperations = 1
self.scp.statuses = [Dataset(), 0x0000]
self.scp.statuses[0].Status = 0xFF00
self.identifiers = [self.ds, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove,
'1.2.840.10008.1.2.1')
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
assert assoc.is_released
assert self.scp.context.abstract_syntax == PatientRootQueryRetrieveInformationModelMove
assert self.scp.context.transfer_syntax == '1.2.840.10008.1.2.1'
self.scp.stop()
def test_scp_callback_info(self):
"""Test on_c_store caontext parameter"""
self.scp = DummyMoveSCP()
self.scp.no_suboperations = 1
self.scp.statuses = [Dataset(), 0x0000]
self.scp.statuses[0].Status = 0xFF00
self.identifiers = [self.ds, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
assert assoc.is_released
assert 'address' in self.scp.info['requestor']
assert self.scp.info['requestor']['ae_title'] == b'PYNETDICOM '
#assert self.scp.info['requestor']['called_aet'] == b'ANY-SCP '
assert isinstance(self.scp.info['requestor']['port'], int)
assert self.scp.info['acceptor']['port'] == 11112
assert 'address' in self.scp.info['acceptor']
assert self.scp.info['acceptor']['ae_title'] == b'PYNETDICOM '
assert self.scp.info['parameters']['message_id'] == 1
assert self.scp.info['parameters']['priority'] == 2
assert self.scp.store_info['parameters']['originator_aet'] == b'PYNETDICOM '
assert self.scp.store_info['parameters']['originator_message_id'] == 1
self.scp.stop()
def test_scp_callback_move_aet(self):
"""Test on_c_move move_aet parameter"""
self.scp = DummyMoveSCP()
self.scp.no_suboperations = 1
self.scp.statuses = [Dataset(), 0x0000]
self.scp.statuses[0].Status = 0xFF00
self.identifiers = [self.ds, None]
self.scp.start()
ae = AE()
ae.add_requested_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_requested_context(CTImageStorage)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_move(self.query, b'TESTMOVE', query_model='P')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
assert assoc.is_released
assert self.scp.move_aet == b'TESTMOVE '
self.scp.stop()
class TestQRCompositeInstanceWithoutBulk(object):
"""Tests for QR + Composite Instance Without Bulk Data"""
def setup(self):
"""Run prior to each test"""
self.query = Dataset()
self.query.PatientName = '*'
self.query.QueryRetrieveLevel = "PATIENT"
self.ds = Dataset()
self.ds.file_meta = Dataset()
self.ds.file_meta.TransferSyntaxUID = ImplicitVRLittleEndian
self.ds.SOPClassUID = CTImageStorage
self.ds.SOPInstanceUID = '1.1.1'
self.ds.PatientName = 'Test'
self.fail = Dataset()
self.fail.FailedSOPInstanceUIDList = ['1.2.3']
self.scp = None
def teardown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_pixel_data(self):
"""Test pixel data is removed"""
self.scp = DummyGetSCP()
assert 'PixelData' in DATASET
self.scp.datasets = [DATASET, None]
self.scp.statuses = [0xFF00, 0x0000]
self.scp.no_suboperations = 1
def on_c_store(ds, context, assoc_info):
assert 'PixelData' not in ds
return 0x0000
# SCP should override final success status
self.scp.start()
ae = AE()
ae.add_requested_context(CompositeInstanceRetrieveWithoutBulkDataGet)
ae.add_requested_context(CTImageStorage)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scp_role = True
role.scu_role = False
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='CB')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 1
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_waveform_sequence(self):
"""Test when on_c_get returns success status"""
self.scp = DummyGetSCP()
self.ds.SOPClassUID = CTImageStorage
self.ds.WaveformSequence = [Dataset(), Dataset()]
self.ds.WaveformSequence[0].WaveformData = b'\x00\x01'
self.ds.WaveformSequence[0].WaveformBitsAllocated = 16
self.ds.WaveformSequence[1].WaveformData = b'\x00\x02'
self.ds.WaveformSequence[1].WaveformBitsAllocated = 8
self.scp.datasets = [self.ds]
self.scp.statuses = [0xFF00]
self.scp.no_suboperations = 1
assert 'WaveformData' in self.ds.WaveformSequence[0]
assert 'WaveformData' in self.ds.WaveformSequence[1]
def on_c_store(ds, context, assoc_info):
assert 'WaveformData' not in self.ds.WaveformSequence[0]
assert 'WaveformData' not in self.ds.WaveformSequence[1]
return 0x0000
# SCP should override final success status
self.scp.start()
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = CTImageStorage
role.scp_role = True
role.scu_role = False
ae = AE()
ae.add_requested_context(CompositeInstanceRetrieveWithoutBulkDataGet)
ae.add_requested_context(CTImageStorage)
ae.on_c_store = on_c_store
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
result = assoc.send_c_get(self.query, query_model='CB')
status, identifier = next(result)
assert status.Status == 0xFF00
assert identifier is None
status, identifier = next(result)
assert status.Status == 0x0000
assert status.NumberOfFailedSuboperations == 0
assert status.NumberOfWarningSuboperations == 0
assert status.NumberOfCompletedSuboperations == 1
assert identifier is None
pytest.raises(StopIteration, next, result)
assoc.release()
self.scp.stop()
class TestBasicWorklistServiceClass(object):
"""Tests for BasicWorklistManagementServiceClass."""
def setup(self):
"""Run prior to each test"""
self.query = Dataset()
self.query.PatientName = '*'
self.query.QueryRetrieveLevel = "PATIENT"
self.ds = Dataset()
self.ds.SOPClassUID = CTImageStorage
self.ds.SOPInstanceUID = '1.1.1'
self.ds.PatientName = 'Test'
self.fail = Dataset()
self.fail.FailedSOPInstanceUIDList = ['1.2.3']
self.scp = None
def teardown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_bad_abstract_syntax_raises(self):
"""Test calling the BWM SCP with an unknown UID raises exception."""
msg = r'The supplied abstract syntax is not valid'
with pytest.raises(ValueError, match=msg):
bwm = BasicWorklistManagementServiceClass(None)
context = build_context('1.2.3.4')
bwm.SCP(None, context, None)
| 36.882497
| 95
| 0.637544
|
87ab9b312af73c02d796690b9a4edceede078f90
| 505
|
py
|
Python
|
p0sx/settings/dev.py
|
norbeta/p0sX-server
|
816f86e274b6cbee42588ee778298d51cb8282c0
|
[
"MIT"
] | null | null | null |
p0sx/settings/dev.py
|
norbeta/p0sX-server
|
816f86e274b6cbee42588ee778298d51cb8282c0
|
[
"MIT"
] | null | null | null |
p0sx/settings/dev.py
|
norbeta/p0sX-server
|
816f86e274b6cbee42588ee778298d51cb8282c0
|
[
"MIT"
] | null | null | null |
from p0sx.settings.base import *
ALLOWED_HOSTS = ['polar.tla.wtf', 'localhost']
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'secret'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
SITE_URL = 'https://polar.tla.wtf'
| 22.954545
| 66
| 0.683168
|
a931b5232bfa787696ab7bf5925734179c4958d9
| 7,220
|
py
|
Python
|
complexExercise_01_fluorescence/fluorescence.py
|
AlreadyTakenJonas/pythonBootCamp2021
|
736002cfdb6992f363c00e3397fc021f879f495d
|
[
"MIT"
] | null | null | null |
complexExercise_01_fluorescence/fluorescence.py
|
AlreadyTakenJonas/pythonBootCamp2021
|
736002cfdb6992f363c00e3397fc021f879f495d
|
[
"MIT"
] | null | null | null |
complexExercise_01_fluorescence/fluorescence.py
|
AlreadyTakenJonas/pythonBootCamp2021
|
736002cfdb6992f363c00e3397fc021f879f495d
|
[
"MIT"
] | null | null | null |
#
# This script uses fluorescence spectra of different solution with known concentration to
# calculate an unknown concentration from given fluorescence intensity
# Calculations are done in nano moles per liter
#
#
# STEP 1 : READ DATA FROM FILE
#
# IMPORT MODULES
#
# Handling files and paths
from pathlib import Path
# Use Regular expression (extracting information from strings)
import re
# CODE
#
# Get the working directory
workingDir = Path(__file__).parent.absolute()
# Get generator with all data files
dataFiles = (workingDir/"data").glob("*.ASC")
# Sort the files by name, makes sure that the data is ordered by concentration
dataFiles = sorted(dataFiles)
# READ DATA FROM FILE
#
# Create empty data structur
spectra = []
# Read all data files and add data to empty data structure
for file in dataFiles:
print("Read file: ", file.name)
# Extract the concentration of the solution from the file name
concentration = int(re.findall("\d{3}", file.name)[0]) # Concentration in nano mol per liter
# Read the file
content = file.read_text().splitlines()
# Parse the file
# -> Save the data in two columns of value float
wavelength = [ float( line.split(", ")[0] ) for line in content ]
intensity = [ float( line.split(", ")[1].replace(",", ".") ) for line in content ]
# Add data to data structure
spectra.append( {"concentration": concentration,
"wavelength" : wavelength,
"intensity" : intensity } )
#
# STEP 2 : PLOT RAW DATA
#
# IMPORT MODULES
# Plotting
from matplotlib import pyplot as plt
# Use numpy arrays
import numpy as np
# CODE
print("Plot spectra")
# Create plot with all spectra
for spectrum in spectra:
plt.plot("wavelength", "intensity", data = spectrum,
# Add the concentration to the legend
label = str(spectrum["concentration"])+" nM" )
# Add legend
plt.legend()
# Add grid
plt.grid()
# Add title
plt.title("concentration dependency of fluorescence spectra")
# Add axis labels
plt.ylabel("intensity")
# Use raw binary string to enable LaTeX encoding -> greek letters
plt.xlabel(r"wavelength $\lambda$ / $nm$")
# Save plot to file
# IMPORTANT: use this command before plt.show!
plt.savefig( str(workingDir) + "/img/spectra.png" )
# Show plot and allow creation of a new plot
plt.show()
#
# STEP 3 : FIND MAXIMUM AND EXTRACT PEAK HEIGHT
#
# Find the global maximum
# Define small function to return the index of the largest value
indexMax = lambda array : array.index(max(array))
# Get the index of the maximum
peakIndex = indexMax(spectra[0]["intensity"])
# Get the wavelength of the maximum
peakLocation = spectra[0]["wavelength"][peakIndex]
# Extract the concentration and the height of the maximum from the spectra
# Create new table with the information
# Use numpy arrays, because fitting the data requires them later on
peakHeightChange = {
# Extract the concentration of each solution
"concentration": np.array(
[ spectrum["concentration"] for spectrum in spectra ]
),
# Extract the intensity value from each spectra at the same wavelength
# spectrum["wavelength"].index(peakLocation) returns the index of the interesting wavelength
# spectrum["intensity"][ ... ] returns the intensity at the wavelength of the largest peak in the spectra
"intensity" : np.array(
[ spectrum["intensity"][spectrum["wavelength"].index(peakLocation)] for spectrum in spectra ]
)
}
# Plot the progress
# Plot the peak height change as scatter plot
plt.scatter("concentration", "intensity", data = peakHeightChange)
# Add labels and title
plt.title("Peak Height Change")
plt.xlabel("concentration c/nM")
plt.ylabel("intensity")
# Show plot and allow creation of a new plot
plt.show()
#
# STEP 4 : APPROXIMATE BLIND VALUE
#
# Blind value was not measured. Blind value would be the spectrum with concentration zero.
# Approximate blind value by computing the mean of the lowest intenisty spectrum beyond 650nm.
# Define small function to return the index of the smallest value of the array
indexMin = lambda array : array.index(min(array))
# Get the index of the spectrum with the lowest concentration
lowConcentrationIndex = indexMin([ spectrum["concentration"] for spectrum in spectra ])
# Get the index of 650nm in the wavelength array
index650nm = spectra[lowConcentrationIndex]["wavelength"].index(650)
# Compute the mean of all intensities beyond 650 nm by slicing the array ( arr[index650nm:] )
blindValue = np.mean(spectra[lowConcentrationIndex]["intensity"][index650nm:])
# Add blind value to the table of peak height changes
peakHeightChange["concentration"] = np.append( peakHeightChange["concentration"], 0 )
peakHeightChange["intensity"] = np.append( peakHeightChange["intensity"] , blindValue )
# Plot the progress
# Plot the peak height change as scatter plot
plt.scatter("concentration", "intensity", data = peakHeightChange)
# Add labels and title
plt.title("Peak Height Change")
plt.xlabel("concentration c/nM")
plt.ylabel("intensity")
# Show plot and allow creation of a new plot
plt.show()
#
# STEP 5 : DO LINEAR REGRESSION
#
# IMPORT MODULE
# Module for machine learning -> linear regression
from sklearn.linear_model import LinearRegression
# Do linear regression
# The x-values must be converted into a column-vector -> .reshape(-1,1)
peakHeightChange["concentration"] = peakHeightChange["concentration"].reshape(-1,1)
# Create a linear model and pass the peakHeightChange to the model
linearFit = LinearRegression().fit( peakHeightChange["concentration"],
peakHeightChange["intensity"] )
# Print formula of linear regression
print("Linear Regression:\n I =", linearFit.coef_[0], "/ nM * c + ", linearFit.intercept_)
# Define small function to predict concentration from measured peak height
predictConcentration = lambda intensity : (intensity - linearFit.intercept_)/linearFit.coef_[0]
# Create a lovely plot
# Plot the peak height change as scatter plot
plt.scatter("concentration", "intensity", data = peakHeightChange, label="data")
# Draw regression line
plt.plot(peakHeightChange["concentration"],
linearFit.predict(peakHeightChange["concentration"]),
"r", label="fit")
# Add labels and title
plt.title("Peak Height Change")
plt.xlabel("concentration c/nM")
plt.ylabel("intensity I")
# Add grid to plot
plt.grid()
plt.legend()
plotDescription = "Regression: $I = a * c + b$ \n a = {}/nM \n b = {}".format(linearFit.coef_[0], linearFit.intercept_)
plt.text(200,0, plotDescription )
# Save plot to file
# IMPORTANT: use this command before plt.show!
plt.savefig(str(workingDir)+"/img/regression.png")
# Show plot and allow creation of a new plot
plt.show()
#
# STEP 6 : PREDICT CONCENTRATIONS
#
# Ask for user input via console
print("Enter intensity I at", peakLocation, "nm for unknown sample concentration c.")
intensity = float(input("I = "))
# Predict with regression model
concentration = predictConcentration(intensity)
# Return result
print("The samples concentration is c =", str(concentration), "nM")
| 35.392157
| 119
| 0.71662
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.