content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def show_set(data_set):
"""
显示集合数据
:param data_set: 数据集
:return: None
"""
data_list = list(data_set)
show_string(data_list)
| 15,900
|
def get_high_accuracy_voronoi_nodes(structure, rad_dict, probe_rad=0.1):
"""
Analyze the void space in the input structure using high accuracy
voronoi decomposition.
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms.
Default is 0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
"""
with ScratchDir('.'):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_flag = True
rad_file = name + ".rad"
with open(rad_file, 'w+') as fp:
for el in rad_dict.keys():
print("{} {}".format(el, rad_dict[el].real), file=fp)
atmnet = AtomNetwork.read_from_CSSR(
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
# vornet, vor_edge_centers, vor_face_centers = \
# atmnet.perform_voronoi_decomposition()
red_ha_vornet = \
prune_voronoi_network_close_node(atmnet)
# generate_simplified_highaccuracy_voronoi_network(atmnet)
# get_nearest_largest_diameter_highaccuracy_vornode(atmnet)
red_ha_vornet.analyze_writeto_XYZ(name, probe_rad, atmnet)
voro_out_filename = name + '_voro.xyz'
voro_node_mol = ZeoVoronoiXYZ.from_file(voro_out_filename).molecule
species = ["X"] * len(voro_node_mol.sites)
coords = []
prop = []
for site in voro_node_mol.sites:
coords.append(list(site.coords))
prop.append(site.properties['voronoi_radius'])
lattice = Lattice.from_parameters(structure.lattice.parameters)
vor_node_struct = Structure(
lattice, species, coords, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
return vor_node_struct
| 15,901
|
def setUpBlobDetector():
"""
Configure parameters for a cv2 blob detector, and returns the detector.
"""
params = cv2.SimpleBlobDetector_Params()
params.minThreshold = 0
params.maxThreshold = 255
params.filterByArea = True
params.minArea = 1500
params.maxArea = 25000
params.filterByCircularity = False
params.filterByColor = False
params.filterByConvexity = False
params.filterByInertia = False
detector = cv2.SimpleBlobDetector_create(params)
return detector
| 15,902
|
def upload_image(picBase64, album, name, title, description, access_token):
"""
POST. Needs auth token. Basic Response.
Uploads all images in given directory to specific album ID
"""
url = 'https://api.imgur.com/3/image'
request = urllib2.Request(url, headers={'Authorization' : 'Bearer {}'.format(access_token)})
values = {
'image' : picBase64,
'album' : album.encode('utf-8'),
'type' : 'base64',
'name' : name.encode('utf-8'),
'title' : title.encode('utf-8'),
'description' : description
}
data = urllib.urlencode(values)
content = urllib2.urlopen(request, data)
content_json = content.read()
content_json = json.loads(content_json)
| 15,903
|
def raise_keymap():
"""
! @ # $ % || ^ & * ( )
DEL ESC || PGDN PGUP PSCR
CAPS volup ENT reset || UP
voldn super shift space bspc|| alt ent LEFT DOWN RGHT
"""
left = [
[KC.N1, KC.N2, KC.N3, KC.N4, KC.N5],
[KC.F1, KC.F2, KC.F3, KC.F4, KC.F5],
[KC.F11, KC.F12, KC.LPRN, KC.RPRN, KC.AMPR],
[KC.NO, KC.INS, KC.LGUI, KC.LSFT, KC.SPC, KC.BSPC],
]
right = [
[ KC.N6, KC.N7, KC.N8, KC.N9, KC.N0],
[ KC.F6, KC.F7, KC.F8, KC.F9, KC.F10],
[ KC.GRV, KC.LBRC, KC.RBRC, KC.PSLS, KC.BSLS],
[KC.LALT, KC.ENT, KC.TRNS, KC.DOT, KC.PMNS, KC.EQL],
]
return [left, right]
| 15,904
|
def runetl(config, log):
""" run etl """
log.info('run ETL')
genres = import_otrgenres(config, log)
""" loop back for 10 days and import"""
iterdate = datetime.now().date() - timedelta(days=10)
startdate = datetime.now().date() - timedelta(days=8)
enddate = datetime.now().date()
while (iterdate <= enddate):
if (iterdate < startdate):
""" housekeeping(iterdate) """
housekeeping(iterdate, config, log)
else:
import_otrepg(iterdate, genres, config, log)
iterdate = iterdate + timedelta(days=1)
update_toprecordings(config, log)
update_torrents(startdate, config, log)
log.info('successfully run ETL and Houskeeping!')
pass
| 15,905
|
def sample_cmd() -> Command:
"""Useful for testing constraints against a variety of parameter kinds.
Parameters have names that should make easy to remember their "kind"
without the need for looking up this code."""
@cloup.command()
# Optional arguments
@click.argument('arg1', required=False)
@click.argument('arg2', required=False)
# Plain options without default
@cloup.option('--str-opt')
@cloup.option('--int-opt', type=int)
@cloup.option('--bool-opt', type=bool)
# Flags
@cloup.option('--flag / --no-flag')
@cloup.option('--flag2', is_flag=True)
# Options with default
@cloup.option('--def1', default=1)
@cloup.option('--def2', default=2)
# Options that take a tuple
@cloup.option('--tuple', nargs=2, type=int)
# Options that can be specified multiple times
@cloup.option('--mul1', type=int, multiple=True)
@cloup.option('--mul2', type=int, multiple=True)
def f(**kwargs):
print('It works')
return cast(Command, f)
| 15,906
|
def get_linenos(obj):
"""Get an object’s line numbers in its source code file"""
try:
lines, start = inspect.getsourcelines(obj)
except TypeError: # obj is an attribute or None
return None, None
except OSError: # obj listing cannot be found
# This happens for methods that are not explicitly defined
# such as the __init__ method for a dataclass
return None, None
else:
return start, start + len(lines) - 1
| 15,907
|
def enter_user_trigger_critical_section(user_id):
"""Set semaphore noting users trigger state is actively being processed
A number of asynchronous tasks are involved in processing a users results
and determining their trigger state. This endpoint is used to set the
semaphore used by other parts of the system to determine if results are
available or still pending.
:raises AsyncLockUnavailable: if lock for user is already present
:raises TransitionNotAllowed: if user's current trigger state won't allow
a transition to the ``inprocess`` state.
"""
ts = users_trigger_state(user_id)
sm = EMPRO_state(ts)
sm.begin_process()
# Record the historical transformation via insert.
current_app.logger.debug(
"record state change to 'inprocess' from "
f"enter_user_trigger_critical_section({user_id})")
ts.insert(from_copy=True)
# Now 'inprocess', obtain the lock to be freed at the conclusion
# of `evaluate_triggers()`
critical_section = TimeoutLock(key=EMPRO_LOCK_KEY.format(user_id=user_id))
critical_section.__enter__()
| 15,908
|
def create_solution_board(width=6, height=6):
"""Randomly generates a new board
with width by height size
"""
if type(width) != int or type(height) != int:
raise TypeError('Arguments must be int type')
boxes = width * height
if boxes % 2 != 0:
raise ValueError('Number of boxes is not multiple of two')
numbers = list(range(1, boxes // 2 + 1))
numbers = numbers + numbers
random.shuffle(numbers)
board = []
for index in range(height):
board.append([])
for _ in range(width):
random_number = numbers.pop()
board[index].append(random_number)
board[index] = board[index]
return board
| 15,909
|
def _getE(s,R,W,V = None):
"""The sum of the energies for the states present in the solution.
Args:
s (list of int): The number of samples points along each
basis vector.
R (numpy.ndarray): The basis vectors for the unit cell.
W (numpy.ndarray): A matrix containing the expansion coefficients
for the wavefunctions
Returns:
E (numpy.ndarray): A vector of the energies at the sample points.
"""
from pydft.poisson import _O_operator, _L_operator, _B_operator
if V == None: #pragma: no cover
V = _sho_V
O_t = _O_operator(s,R,W)
U = np.dot(np.conj(W.T),_O_operator(s,R,W))
Vt = np.transpose(np.conj(_Vdual(s,R, V = V)))
IW = _B_operator(s,R,W)
Uinv = np.linalg.inv(U)
IWU = _B_operator(s,R,np.dot(W,Uinv))
n = _diagouter(IW,IWU)
Ew = np.trace(np.dot(np.conj(np.transpose(W)),_L_operator(s,R,np.dot(W,Uinv))))
E = (-1.)*Ew/2. + np.dot(Vt,n)
return E
| 15,910
|
def get_gt_list(request):
""" This view returns the list of groundtruths associated to a user and a specific configuration of institute,
usecase and language.
.js files: InfoAboutConfiguration.js DownloadGT.js"""
groundTruths = 0
json_resp = {}
ins = request.GET.get('inst',None)
lang = request.GET.get('lang',None)
use = request.GET.get('use',None)
action = request.GET.get('action',None)
token = request.GET.get('token',None)
reptype = request.GET.get('reptype',None)
annotation_mode = request.GET.get('annotation_mode','Human')
if ins == '':
ins = None
if use == '':
use = None
if lang == '':
lang = None
if token == 'all':
ns_robot = NameSpace.objects.get(ns_id='Robot')
ns_human = NameSpace.objects.get(ns_id='Human')
# rob_user = User.objects.get(username='Robot_user',ns_id=ns_robot)
list_gt = GroundTruthLogFile.objects.filter(ns_id=ns_human).count()
groundTruths = list_gt
# gt_rob = GroundTruthLogFile.objects.filter(ns_id=ns_robot,username = rob_user)
i = 0
# print(groundTruths)
# for el in gt_rob:
# gts = GroundTruthLogFile.objects.filter(ns_id=ns_robot,gt_type = el.gt_type,id_report = el.id_report_id,language = el.language).exclude(insertion_time = el.insertion_time)
# gts_count = gts.count()
# # print('count: '+str(i)+' '+str(gts.count()))
# i = i+1
# groundTruths = groundTruths + gts_count
else:
with connection.cursor() as cursor:
if reptype == 'reports':
if annotation_mode == 'Human':
cursor.execute(
"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language INNER JOIN topic_has_document as t on t.id_report = r.id_report and r.language = t.language WHERE r.institute = COALESCE(%s,r.institute) AND t.name = %s AND r.language = COALESCE(%s,r.language) AND g.gt_type = %s AND g.ns_id = %s and r.institute != %s",
[ins, use, lang, action, 'Human','PUBMED'])
groundTruths = cursor.fetchone()[0]
else:
if annotation_mode == 'Human':
cursor.execute(
"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language INNER JOIN topic_has_document as t on t.id_report = r.id_report and r.language = t.language WHERE t.name = %s AND r.language = %s AND g.gt_type = %s AND g.ns_id = %s and r.institute = %s",
[use, 'english', action, 'Human','PUBMED'])
groundTruths = cursor.fetchone()[0]
json_resp['ground_truths'] = groundTruths
# print(json_resp)
return JsonResponse(json_resp)
| 15,911
|
def to_dict(item: Any) -> MutableMapping[Hashable, Any]:
"""Converts 'item' to a MutableMapping.
Args:
item (Any): item to convert to a MutableMapping.
Raises:
TypeError: if 'item' is a type that is not registered.
Returns:
MutableMapping: derived from 'item'.
"""
if isinstance(item, MutableMapping):
return item
else:
raise TypeError(
f'item cannot be converted because it is an unsupported type: '
f'{type(item).__name__}')
| 15,912
|
def node(*args, **kwargs):
"""
args[0] -- a XML tag
args[1:] -- an array of children to append to the newly created node
or if a unicode arg is supplied it will be used to make a text node
kwargs -- attributes
returns a xml.dom.minidom.Element
"""
blocked_attributes = ['tag']
tag = args[0] if len(args) > 0 else kwargs['tag']
args = args[1:]
result = DetachableElement(tag)
unicode_args = [u for u in args if type(u) == unicode]
assert len(unicode_args) <= 1
parsed_string = False
# kwargs is an xml attribute dictionary,
# here we convert it to a xml.dom.minidom.Element
for k, v in iter(kwargs.items()):
if k in blocked_attributes:
continue
if k == 'toParseString':
if v is True and len(unicode_args) == 1:
parsed_string = True
# Add this header string so parseString can be used?
s = u'<?xml version="1.0" ?><'+tag+'>' + unicode_args[0]\
+ u'</'+tag+'>'
parsed_node = parseString(s.encode("utf-8")).documentElement
# Move node's children to the result Element
# discarding node's root
for child in parsed_node.childNodes:
result.appendChild(copy.deepcopy(child))
else:
result.setAttribute(k, v)
if len(unicode_args) == 1 and not parsed_string:
text_node = PatchedText()
text_node.data = unicode_args[0]
result.appendChild(text_node)
for n in args:
if type(n) == int or type(n) == float or type(n) == bytes:
text_node = PatchedText()
text_node.data = unicode(n)
result.appendChild(text_node)
elif type(n) is not unicode:
try:
result.appendChild(n)
except:
raise Exception(type(n), n)
return result
| 15,913
|
def get_volume_uuid(path: str) -> str:
"""Returns the volume UUID for the given path or None if not found"""
try:
output = subprocess.check_output(["diskutil", "info", "-plist", path])
plist = plistlib.loads(output)
return plist.get("VolumeUUID", None)
except subprocess.CalledProcessError as e:
return None
| 15,914
|
def prob_calibration_1d(Y_obs, Y_sample, title="", save_addr="", fontsize=12):
"""Plots the reliability diagram (i.e. CDF for F^{-1}(y) ) for 1D prediction.
Args:
Y_obs: (np.ndarray of float32) N observations of dim (N, 1)
Y_sample: (np.ndarray of float32) Samples of size M corresponding
to the N observations. dim (N, M)
title: (str) Title of the image.
save_addr: (str) Address to save image to.
fontsize: (int) font size for title and axis labels
"""
if save_addr:
pathlib.Path(save_addr).parent.mkdir(parents=True, exist_ok=True)
plt.ioff()
ecdf_sample = metric_util.ecdf_eval(Y_obs, Y_sample)
ecdf_func = metric_util.make_empirical_cdf_1d(ecdf_sample)
ecdf_eval = np.linspace(0, 1, 1000)
ecdf_valu = ecdf_func(ecdf_eval)
fig, ax = plt.subplots()
ax.plot(ecdf_eval, ecdf_eval, c="black")
ax.plot(ecdf_eval, ecdf_valu)
total_variation = np.mean(np.abs(ecdf_eval - ecdf_valu))
plt.title("Reliability Index, {}, Score: {:.3f}".format(
title, total_variation), fontsize=fontsize)
plt.xlabel(r"Empirical CDF for $\hat{F}(Y_i|X_i)$", fontsize=fontsize)
plt.ylabel("Expected CDF $Uniform(0, 1)$", fontsize=fontsize)
if save_addr:
plt.savefig(save_addr)
plt.close()
plt.ion()
| 15,915
|
def test_reveal_extra_to_args():
"""
Test `reveal_extra_to_args()` function.
"""
text_config = """
reveal_extra:
key0: value0
key1: value1
"""
config = yaml.safe_load(text_config)
args = reveal_extra_to_args(config)
assert len(args) == 4
assert args[::2] == ['-V', '-V']
assert 'key0=value0' in args
assert 'key1=value1' in args
| 15,916
|
def smart_open(filename=None, fmode=None):
"""Context manager to handle both stdout & files in the same manner.
:param filename: Filename to open.
:type filename: ``str`` or ``None``
:param fmode: Mode in which to open a given file.
:type fmode: ``str`` or ``None``
"""
if filename and filename != "-":
fh = open(filename, fmode)
else:
fh = sys.stdout
try:
yield fh
finally:
if fh is not sys.stdout:
fh.close()
| 15,917
|
def test_omit_default_roundtrip(cl_and_vals):
"""
Omit default on the converter works.
"""
converter = Converter(omit_if_default=True)
cl, vals = cl_and_vals
@attr.s
class C(object):
a: int = attr.ib(default=1)
b: cl = attr.ib(factory=lambda: cl(*vals))
inst = C()
unstructured = converter.unstructure(inst)
assert unstructured == {}
assert inst == converter.structure(unstructured, C)
inst = C(0)
unstructured = converter.unstructure(inst)
assert unstructured == {"a": 0}
assert inst == converter.structure(unstructured, C)
| 15,918
|
def test_all():
"""
测试测试模块下所有的测试脚本
:return:
"""
suite = unittest.TestSuite()
all_cases = unittest.defaultTestLoader.discover('.', 'test_*.py')
for case in all_cases:
suite.addTests(case)
runner = xmlrunner.XMLTestRunner(output='report')
runner.run(suite)
| 15,919
|
def mylog10(x):
"""Return the base-10 logarithm of x."""
return math.log10(x)
| 15,920
|
def get_RGB_to_RGB_matrix(in_colorspace, out_colorspace, primaries_only=False):
"""Return RGB to RGB conversion matrix.
Args:
in_colorspace (str): input colorspace.
out_colorspace (str): output colorspace.
Kwargs:
primaries_only (bool): primaries matrix only, doesn't include white point.
Returns:
.numpy.matrix (3x3)
"""
# Get colorspace in to XYZ matrix
in_matrix = get_colorspace_matrix(in_colorspace, primaries_only)
# Get XYZ to colorspace out matrix
out_matrix = get_colorspace_matrix(out_colorspace, primaries_only, inv=True)
# Return scalar product of the 2 matrices
return numpy.dot(out_matrix, in_matrix)
| 15,921
|
def one_hot_arbitrary_bins(value_to_bin=104):
"""Creates OHE numpy array using an arbitrary array of binning thresholds.
Args:
value_to_bin (int, optional): Number to convert to OHE array. Defaults to 104.
"""
bins = np.array([-10000., -5000., -1000., -500.,
-250., -150., -100., -50., 0.,
50., 100., 150., 250., 500.,
1000., 5000., 10000.])
placed_bins = np.digitize(value_to_bin, bins=bins)
ohe_bins = np.eye(len(bins)+1)[placed_bins]
print(ohe_bins)
| 15,922
|
def test_get_sections(test_dict: FullTestDict):
"""
- GIVEN an html section
- WHEN the subsections are extracted
- THEN check the array of subsections is correct
"""
htmls = test_dict['ojad']['htmls']
parsed_htmls = [Soup(html, "html.parser") for html in htmls]
expected_sections = test_dict['ojad']['expected_sections']
assert ojad.get_sections(parsed_htmls) == [
(section['writing_section'], section['reading_sections'])
for section in expected_sections
]
| 15,923
|
def normalize_string(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
| 15,924
|
def sub_band_as_numpy(band, y_limits, data_type=None):
"""Read subsets of the dataset so that we don't hold the whole thing
in memory. It seems wasteful to reread parts, but GDAL keeps its own cache.
"""
data_type = data_type if data_type else INT32
y_size = y_limits[1] - y_limits[0]
LOGGER.debug(f"sub_band y_size={y_size} y_limits {y_limits[0]}")
scanline_buffer = band.ReadRaster(
xoff=0,
yoff=y_limits[0],
xsize=band.XSize,
ysize=y_size,
buf_xsize=band.XSize,
buf_ysize=y_size,
buf_type=data_type.gdal,
)
scanline = np.frombuffer(scanline_buffer, dtype=data_type.numpy)
return np.reshape(scanline, (band.XSize, y_size))
| 15,925
|
def full_reverse(viewname, urlconf=None, args=None, kwargs=None, current_app=None,
scheme=None, domain=None, subdomain=None):
"""
First, obtains the absolute path of the URL matching given
``viewname`` with its parameters.
Then, prepends the path with the scheme name and the authority
part (domain and subdomain) and returns it.
Args::
viewname (str): Name of the URL pattern.
urlconf (str): Path of the module containing URLconfs.
args (list): Positional arguments of the URL pattern.
kwargs (dict): Keyword arguments of the URL pattern.
current_app (str): App identifier.
scheme (str): Scheme name (commonly called protocol).
domain (str): Domain name.
subdomain (str): Subdomain name.
Returns::
The full URL matching given view with its parameters.
Examples::
>>> full_reverse('client-detail-view', args=[client.id])
'http://example.com/clients/client/123/'
>>> full_reverse('client-list-view', scheme='https', subdomain='admin')
'https://admin.example.com/clients/'
Raises::
NoReverseMatch: If no URL pattern matches the given ``viewname``.
ValueError: If both ``args`` and ``kwargs`` are given.
"""
location = reverse(viewname, urlconf, args, kwargs, current_app)
return build_full_url(location, scheme, domain, subdomain)
| 15,926
|
def draw_lrtb_rectangle_filled(left: float, right: float, top: float,
bottom: float, color: Color):
"""
Draw a rectangle by specifying left, right, top, and bottom edges.
Args:
:left: The x coordinate of the left edge of the rectangle.
:right: The x coordinate of the right edge of the rectangle.
:top: The y coordinate of the top of the rectangle.
:bottom: The y coordinate of the rectangle bottom.
:color: The color of the rectangle.
:border_width: The width of the border in pixels. Defaults to one.
Returns:
None
Raises:
:AttributeErrror: Raised if left > right or top < bottom.
"""
if left > right:
raise AttributeError("Left coordinate {} must be less than or equal "
"to the right coordinate {}".format(left, right))
if bottom > top:
raise AttributeError("Bottom coordinate {} must be less than or equal "
"to the top coordinate {}".format(bottom, top))
center_x = (left + right) / 2
center_y = (top + bottom) / 2
width = right - left
height = top - bottom
draw_rectangle_filled(center_x, center_y, width, height, color)
| 15,927
|
def test_regexp_on_index_out_of_range():
"""Test regexp when group indeces are out of range."""
regexp = apply_regexp(
'Hard work',
{'search': 'r', 'group': [1, 2, 3]},
)
assert isinstance(regexp.failure(), IndexError) is True
assert regexp.failure().args == ('list index out of range',)
| 15,928
|
def get_object_or_none(model_class, **kwargs):
"""Identical to get_object_or_404, except instead of returning Http404,
this returns None.
"""
try:
return model_class.objects.get(**kwargs)
except model_class.DoesNotExist:
return None
| 15,929
|
def GenerateTests():
"""Generate all tests."""
filelist = []
for ii in range(len(_GROUPS)):
filename = GenerateFilename(_GROUPS[ii])
filelist.append(filename)
WriteTest(filename, ii, ii + 1)
return filelist
| 15,930
|
def shift_df_generator(empty_df, day_lower_hr_lim, day_upper_hr_lim):
"""Generate day and night dataframe.
Parameters
----------
empty_df : DataFrame
A DataFrame with timestamp and 'Temperature (Celsius)' with all zeros.
day_lower_hr_lim : int
The lower hour limit that constitutes the start of the day shift.
day_upper_hr_lim : int
The upper hour limit that constitutes the end of the day shift.
Returns
-------
day_df : DataFrame
A DataFrame containing only dayshift values.
night_df : DataFrame
A DataFrame containing only nightshift values.
"""
# Create 2 temporary dataframes (1 for dayshift, 1 for nightshift)
day_df = empty_df.loc[(empty_df['Timestamp'].dt.hour >= day_lower_hr_lim) &
(empty_df['Timestamp'].dt.hour < day_upper_hr_lim)]
# Night dataframe will consist of rows with indices not taken by day_df
night_df = empty_df[~empty_df.index.isin(day_df.index)]
return day_df, night_df
| 15,931
|
def json_loads(data, handle=False):
"""
封装的json load
:param data:
:param handle: 补丁, False: 默认不特殊处理: True: 不走正则
:return:
"""
if handle:
return json.loads(data.strip())
return json.loads(regex.sub(r"\\\\", data.strip()))
| 15,932
|
def elina_linexpr0_size(linexpr):
"""
Return the size of an ElinaLinexpr0.
Parameters
----------
linexpr : ElinaLinexpr0Ptr
Pointer to the ElinaLinexpr0 that needs to be checked for its size.
Returns
-------
size_linexpr = c_size_t
Size of the ElinaLinexpr0.
"""
size_linexpr = None
try:
elina_linexpr0_size_c = elina_auxiliary_api.elina_linexpr0_size
elina_linexpr0_size_c.restype = c_size_t
elina_linexpr0_size_c.argtypes = [ElinaLinexpr0Ptr]
size_linexpr = elina_linexpr0_size_c(linexpr)
except:
print('Problem with loading/calling "elina_linexpr0_size" from "libelinaux.so"')
print('Make sure you are passing ElinaLinexpr0Ptr to the function')
return size_linexpr
| 15,933
|
def app():
"""
Setup our flask test app, this only gets executed once.
:return: Flask app
"""
# params = {"DEBUG": False, "TESTING": True, "WTF_CSRF_ENABLED": False}
_app = create_app("config.settings_test")
# Establish an application context before running the tests.
ctx = _app.app_context()
ctx.push()
yield _app
ctx.pop()
| 15,934
|
def _make_index(df, cols=META_IDX, unique=True):
"""Create an index from the columns/index of a dataframe or series"""
def _get_col(c):
try:
return df.index.get_level_values(c)
except KeyError:
return df[c]
index = list(zip(*[_get_col(col) for col in cols]))
if unique:
index = pd.unique(index)
return pd.MultiIndex.from_tuples(index, names=tuple(cols))
| 15,935
|
def handler(event, context):
""" Lambda Handler.
Returns Hello World and the event and context objects
"""
print(event)
print(context)
return {
"body": json.dumps('Hello World!')
}
| 15,936
|
def latex_table(arrays, here=False):
"""Prints the code for a latex table with empty headings,
inserting the data provided into rows.
Args:
arrays: The list of arrays for the table.
here: True if here tag is to be included.
"""
# Define a 4-space tab (latex uses 4 space tabs).
TAB = " "
cols = len(arrays[0]) + 1
table_string = (
"\\begin{table}" + int(here)*"[h]" + "\n" +
TAB + "\centering\n" +
TAB + "\\begin{tabular}{" + cols*'c' + "}\n" +
2*TAB + "\hline\n" +
2*TAB + "" + (cols - 1)*" &" + " \\\\\n" +
2*TAB + "\hline\n"
)
# Generate each row and add to string.
for array in arrays:
new_row = 2*TAB
for element in array:
new_row += " & " + str(element)
new_row += " \\\\\n"
table_string += new_row
table_string += (
2*TAB + "\hline\n" +
TAB + "\end{tabular}\n" +
TAB + "\caption{}\n" +
TAB + "\label{tab:}\n" +
"\end{table}"
)
print(table_string)
| 15,937
|
def flatten_concat(tensors: List[tf.Tensor], batch_dims: int = 1) -> tf.Tensor:
"""Flatten given inputs and concatenate them."""
# tensors [(B, ...), (B, ...)]
flattened: List[tf.Tensor] = list() # [(B, X), (B, Y) ...]
for tensor in tensors:
final_dim = -1
if all(i is not None for i in tensor.shape[batch_dims:]):
# We know all the dimensions
final_dim = tf.reduce_prod(tensor.shape[batch_dims:])
flat_tensor = tf.reshape(
tensor, tf.concat([tf.shape(tensor)[:batch_dims], [final_dim]], 0)
)
flattened.append(flat_tensor)
return tf.concat(flattened, -1)
| 15,938
|
def test_pos_tag(parser, text, method_name, ptb_pos_tags):
"""Tests that produced POS tags are valid PTB POS tags.
Args:
parser (SpacyBistParser)
text (str): Input test case.
method_name (str): Parse method to test.
ptb_pos_tags (set of str): Valid PTB POS tags.
"""
parse_method = getattr(parser, method_name)
parsed_doc = parse_method(doc_text=text)
assert all([token['pos'] in ptb_pos_tags for sent in parsed_doc for token in sent])
| 15,939
|
async def test_discovered_by_dhcp_or_homekit(hass, source, data):
"""Test we can setup when discovered from dhcp or homekit."""
mocked_bulb = _mocked_bulb()
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": source}, data=data
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_async_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_async_setup_entry:
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["data"] == {
CONF_HOST: IP_ADDRESS,
CONF_ID: "0x000000000015243f",
CONF_MODEL: MODEL,
}
assert mock_async_setup.called
assert mock_async_setup_entry.called
with _patch_discovery(
no_device=True
), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", side_effect=CannotConnect
):
result3 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": source}, data=data
)
await hass.async_block_till_done()
assert result3["type"] == RESULT_TYPE_ABORT
assert result3["reason"] == "already_configured"
| 15,940
|
def analyze_friends (names,phones,all_areacodes,all_places):
"""
names: tuple of names
phones: tuple of phone numbers (cleaned)
all_areacodes: tuple of area codes (3char ints)
all_places: tuple of places
Goal: Print out how many friends you have and every unique state
"""
# For TESTING MAKE THE PHONE NUMBER FIRST 3 DIGITS THE SAME AS THE AREA CODE
# def get_unique_area_codes():
# """
# Returns a tuple of all unique area codes
# """
# area_codes = ()
# for ph in phones:
# if ph[0:3] not in area_codes:
# area_codes += (ph[0:3],)
# return area_codes
def get_States(some_areacodes):
"""
some_areacodes: tuple of area codes
Return a tuple of states ASSOCIATED with area codes
"""
states = ()
for ac in some_areacodes:
if ac not in all_areacodes:
states += ("BAD AREA CODE",)
else:
index = all_areacodes.index(ac)
states += (all_places[index],)
return states
num_friends = len(names) # Gets number of friends
# unique_areacodes = get_unique_area_codes()
unique_states = get_States(all_areacodes)
print("You have", num_friends, "friends!")
print("They live in", unique_states)
# Function ends with the print, no returns
| 15,941
|
def fast_autoregressive_predict_fn(context, seq_len):
"""Given a context, autoregressively generate the rest of a sine wave."""
core = hk.LSTM(32)
dense = hk.Linear(1)
state = core.initial_state(context.shape[0])
# Unroll over the context using `hk.dynamic_unroll`.
# As before, we `hk.BatchApply` the Linear for efficiency.
context_outs, state = hk.dynamic_unroll(
core,
context,
state,
time_major=False,
)
context_outs = hk.BatchApply(dense)(context_outs)
# Now, unroll one step at a time using the running recurrent state.
ar_outs = []
x = context_outs[:, -1, :]
times = range(seq_len - context.shape[1])
for _ in times:
x, state = core(x, state)
x = dense(x)
ar_outs.append(x)
ar_outs = jnp.stack(ar_outs)
ar_outs = ar_outs.transpose(1, 0, 2)
return jnp.concatenate([context_outs, ar_outs], axis=1)
| 15,942
|
def get_entities(corpus_name):
""" Load the dataset from the filesystem corresponding to corpus_name
(to see the list of allowed names, use utils.list_corpora() ), and extract
all annotated entities.
Returns a dict, in which each key is an entity type, which contains a list
of entity mentions in the corpus.
"""
r = read_conll(corpus_name); data = list(r)
data2 = [ [(w,iob) for ((w,p),iob) in d] for d in data]
data3 = [i for u in data2 for i in u]
tags = sentence_utils.get_tagset(data, with_prefix=True)
taglist = set([t[2:] for t in list(tags) if t !='O'])
entities = {}
for key in taglist:
entities[key] = []
data3.append((u'O',u'O'))
ent = []
entitytype = 'None'
for i,item in enumerate(data3[0:-1]):
if item[1] != 'O':
if item[1][0] == 'B':
ent = []
ent.append(item[0])
else: # == I
if item[1][0] != 'I':
raise ValueError("Should be I")
ent.append(item[0])
if data3[i+1][1][2:] != item[1][2:] or data3[i+1][1][0] == 'B':
#print i, item
entitytype = item[1][2:]
entities[entitytype].append(' '.join(ent))
return entities
| 15,943
|
def _reporthook(t):
"""``reporthook`` to use with ``urllib.request`` that prints the
process of the download.
Uses ``tqdm`` for progress bar.
**Reference:**
https://github.com/tqdm/tqdm
"""
last_b = [0]
def inner(b: int = 1, bsize: int = 1, tsize: int = None):
"""
:param b: Number of blocks just transferred [default: 1].
:param bsize: Size of each block (in tqdm units) [default: 1].
:param tsize: Total size (in tqdm units).
If [default: None] remains unchanged.
"""
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
| 15,944
|
def serial_rx(sysclk, reset_n, n_stop_bits_i, half_baud_rate_tick_i, baud_rate_tick_i, recieve_i, data_o, ready_o):
""" Serial
This module implements a reciever serial interface
Ports:
-----
sysclk: sysclk input
reset_n: reset input
half_baud_rate_tick_i: half baud rate tick
baud_rate_tick_i: the baud rate
n_stop_bits_i: number of stop bits
recieve_i: rx
data_o: the data output in 1 byte
ready_o: indicates data_o is valid
-----
"""
END_OF_BYTE = 7
state_reg = Signal(t_State.ST_WAIT_START_BIT)
state = Signal(t_State.ST_WAIT_START_BIT)
data_reg = Signal(intbv(0, min = 0, max = 256))
data = Signal(intbv(0, min = 0, max = 256))
ready_reg = Signal(bool(0))
ready = Signal(bool(0))
count_8_bits_reg = Signal(intbv(0, min = 0, max = 8))
count_8_bits = Signal(intbv(0, min = 0, max = 8))
count_stop_bits_reg = Signal(intbv(0, min = 0, max = 8))
count_stop_bits = Signal(intbv(0, min = 0, max = 8))
@always_comb
def outputs():
data_o.next = data_reg
ready_o.next = ready_reg
@always_seq(sysclk.posedge, reset = reset_n)
def sequential_process():
state_reg.next = state
data_reg.next = data
ready_reg.next = ready
count_8_bits_reg.next = count_8_bits
count_stop_bits_reg.next = count_stop_bits
@always_comb
def combinational_process():
state.next = state_reg
data.next = data_reg
ready.next = ready_reg
count_8_bits.next = count_8_bits_reg
count_stop_bits.next = count_stop_bits_reg
if state_reg == t_State.ST_WAIT_START_BIT:
ready.next = False
if baud_rate_tick_i == True:
if recieve_i == False:
state.next = t_State.ST_GET_DATA_BITS
elif state_reg == t_State.ST_GET_DATA_BITS:
if baud_rate_tick_i == True:
data.next[count_8_bits_reg] = recieve_i
if count_8_bits_reg == END_OF_BYTE:
count_8_bits.next = 0
state.next = t_State.ST_GET_STOP_BITS
else:
count_8_bits.next = count_8_bits_reg + 1
state.next = t_State.ST_GET_DATA_BITS
elif state_reg == t_State.ST_GET_STOP_BITS:
if baud_rate_tick_i == True:
if count_stop_bits_reg == (n_stop_bits_i - 1):
count_stop_bits.next = 0
ready.next = True
state.next = t_State.ST_WAIT_START_BIT
else:
count_stop_bits.next = count_stop_bits_reg + 1
else:
raise ValueError("Undefined State")
return outputs, sequential_process, combinational_process
| 15,945
|
def zpad(x, l):
""" Left zero pad value `x` at least to length `l`.
>>> zpad('', 1)
'\x00'
>>> zpad('\xca\xfe', 4)
'\x00\x00\xca\xfe'
>>> zpad('\xff', 1)
'\xff'
>>> zpad('\xca\xfe', 2)
'\xca\xfe'
"""
return b'\x00' * max(0, l - len(x)) + x
| 15,946
|
def websocket_node_status(hass, connection, msg):
"""Get the status for a Z-Wave node."""
manager = hass.data[DOMAIN][MANAGER]
node = manager.get_instance(msg[OZW_INSTANCE]).get_node(msg[NODE_ID])
if not node:
connection.send_message(
websocket_api.error_message(
msg[ID], websocket_api.const.ERR_NOT_FOUND, "OZW Node not found"
)
)
return
connection.send_result(
msg[ID],
{
ATTR_NODE_QUERY_STAGE: node.node_query_stage,
NODE_ID: node.node_id,
ATTR_IS_ZWAVE_PLUS: node.is_zwave_plus,
ATTR_IS_AWAKE: node.is_awake,
ATTR_IS_FAILED: node.is_failed,
ATTR_NODE_BAUD_RATE: node.node_baud_rate,
ATTR_IS_BEAMING: node.is_beaming,
ATTR_IS_FLIRS: node.is_flirs,
ATTR_IS_ROUTING: node.is_routing,
ATTR_IS_SECURITYV1: node.is_securityv1,
ATTR_NODE_BASIC_STRING: node.node_basic_string,
ATTR_NODE_GENERIC_STRING: node.node_generic_string,
ATTR_NODE_SPECIFIC_STRING: node.node_specific_string,
ATTR_NODE_MANUFACTURER_NAME: node.node_manufacturer_name,
ATTR_NODE_PRODUCT_NAME: node.node_product_name,
ATTR_NEIGHBORS: node.neighbors,
OZW_INSTANCE: msg[OZW_INSTANCE],
},
)
| 15,947
|
def clf2D_slope_intercept(coef=None, intercept=None, clf=None):
"""
Gets the slop an intercept for the separating hyperplane of a linear
classifier fit on a two dimensional dataset.
Parameters
----------
coef:
The classification normal vector.
intercept:
The classifier intercept.
clf: subclass of sklearn.linear_model.base.LinearClassifierMixin
A sklearn classifier with attributes coef_ and intercept_
Output
------
slope, intercept
"""
if clf is not None:
coef = clf.coef_.reshape(-1)
intercept = float(clf.intercept_)
else:
assert coef is not None and intercept is not None
slope = - coef[0] / coef[1]
intercept = - intercept / coef[1]
return slope, intercept
| 15,948
|
def _get_imgpaths(datasets: list, verbose=True):
""" get image paths
Args:
datasets (list): dataset names
verbose (bool, optional): . Defaults to True.
"""
img_paths = []
for dname in datasets:
img_dir = Path(dname.format(DATASETS_DIR=DATASETS_DIR))
assert img_dir.is_dir(), ANSI.errorstr(f'Cannot find {img_dir}!')
img_names = os.listdir(img_dir)
for imname in img_names:
impath = str(img_dir / imname)
img_paths.append(impath)
if verbose:
msg = f'Loaded {dname} from {ANSI.udlstr(img_dir)}: {len(img_names)} images.'
print(msg)
assert len(img_paths) > 0, 'No image path loaded'
return img_paths
| 15,949
|
def build_driver_for_task(task):
"""Builds a composable driver for a given task.
Starts with a `BareDriver` object, and attaches implementations of the
various driver interfaces to it. They come from separate
driver factories and are configurable via the database.
:param task: The task containing the node to build a driver for.
:returns: A driver object for the task.
:raises: DriverNotFound if node.driver could not be found in the
"ironic.hardware.types" namespaces.
:raises: InterfaceNotFoundInEntrypoint if some node interfaces are set
to invalid or unsupported values.
:raises: IncompatibleInterface the requested implementation is not
compatible with it with the hardware type.
"""
node = task.node
hw_type = get_hardware_type(node.driver)
check_and_update_node_interfaces(node, hw_type=hw_type)
bare_driver = driver_base.BareDriver()
_attach_interfaces_to_driver(bare_driver, node, hw_type)
return bare_driver
| 15,950
|
def evaluate(eval_model, criterion, ntokens, data_source, cnf):
"""
Evaluates the training loss of the given model
"""
eval_model.eval() # Turn on the evaluation mode
total_loss = 0.0
src_mask = generate_square_subsequent_mask(cnf.input_length).to(cnf.device)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, cnf.input_length):
data, targets = get_batch(data_source, i, cnf)
if data.size(0) != cnf.input_length:
src_mask = generate_square_subsequent_mask(data.size(0)).to(cnf.device)
output = eval_model(data, src_mask)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).item()
return total_loss / (len(data_source) - 1)
| 15,951
|
def km_to_meters(kilometers):
"""
(int or float) -> float
Takes a distance in kilometers and returns the distance in meters.
"""
return kilometers * 1000.0
| 15,952
|
def build(plan: List[Step], instances_stock: Optional[Dict[Callable, Any]] = None):
""" Build instances dictionary from a plan """
instances_stock = instances_stock or {}
instances = {}
for cls, kwargs_spec in plan:
if cls in instances_stock:
instances[cls] = instances_stock[cls]
else:
instances[cls] = cls(**kwargs_spec.kwargs(instances))
return instances
| 15,953
|
def make_module_spec(options, weight_file):
"""Makes a module spec.
Args:
options: LM hyperparameters.
weight_file: location of the hdf5 file with LM weights.
Returns:
A module spec object used for constructing a TF-Hub module.
"""
def module_fn():
"""Spec function for a token embedding module."""
# init
_bos_id = 256
_eos_id = 257
_bow_id = 258
_eow_id = 259
_pad_id = 260
_max_word_length = 50
_parallel_iterations = 10
_max_batch_size = 1024
id_dtype = tf.int32
id_nptype = np.int32
max_word_length = tf.constant(_max_word_length, dtype=id_dtype, name='max_word_length')
version = tf.constant('from_dp_1', dtype=tf.string, name='version')
# the charcter representation of the begin/end of sentence characters
def _make_bos_eos(c):
r = np.zeros([_max_word_length], dtype=id_nptype)
r[:] = _pad_id
r[0] = _bow_id
r[1] = c
r[2] = _eow_id
return tf.constant(r, dtype=id_dtype)
bos_ids = _make_bos_eos(_bos_id)
eos_ids = _make_bos_eos(_eos_id)
def token2ids(token):
with tf.name_scope("token2ids_preprocessor"):
char_ids = tf.decode_raw(token, tf.uint8, name='decode_raw2get_char_ids')
char_ids = tf.cast(char_ids, tf.int32, name='cast2int_token')
char_ids = tf.strided_slice(char_ids, [0], [max_word_length - 2],
[1], name='slice2resized_token')
ids_num = tf.shape(char_ids)[0]
fill_ids_num = (_max_word_length - 2) - ids_num
pads = tf.fill([fill_ids_num], _pad_id)
bow_token_eow_pads = tf.concat([[_bow_id], char_ids, [_eow_id], pads],
0, name='concat2bow_token_eow_pads')
return bow_token_eow_pads
def sentence_tagging_and_padding(sen_dim):
with tf.name_scope("sentence_tagging_and_padding_preprocessor"):
sen = sen_dim[0]
dim = sen_dim[1]
extra_dim = tf.shape(sen)[0] - dim
sen = tf.slice(sen, [0, 0], [dim, max_word_length], name='slice2sen')
bos_sen_eos = tf.concat([[bos_ids], sen, [eos_ids]], 0, name='concat2bos_sen_eos')
bos_sen_eos_plus_one = bos_sen_eos + 1
bos_sen_eos_pads = tf.pad(bos_sen_eos_plus_one, [[0, extra_dim], [0, 0]],
"CONSTANT", name='pad2bos_sen_eos_pads')
return bos_sen_eos_pads
# Input placeholders to the biLM.
tokens = tf.placeholder(shape=(None, None), dtype=tf.string, name='ph2tokens')
sequence_len = tf.placeholder(shape=(None, ), dtype=tf.int32, name='ph2sequence_len')
tok_shape = tf.shape(tokens)
line_tokens = tf.reshape(tokens, shape=[-1], name='reshape2line_tokens')
with tf.device('/cpu:0'):
tok_ids = tf.map_fn(
token2ids,
line_tokens,
dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
name='map_fn2get_tok_ids')
tok_ids = tf.reshape(tok_ids, [tok_shape[0], tok_shape[1], -1], name='reshape2tok_ids')
with tf.device('/cpu:0'):
sen_ids = tf.map_fn(
sentence_tagging_and_padding,
(tok_ids, sequence_len),
dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
name='map_fn2get_sen_ids')
# Build the biLM graph.
bilm = BidirectionalLanguageModel(options, str(weight_file),
max_batch_size=_max_batch_size)
embeddings_op = bilm(sen_ids)
# Get an op to compute ELMo (weighted average of the internal biLM layers)
elmo_output = weight_layers('elmo_output', embeddings_op, l2_coef=0.0)
weighted_op = elmo_output['weighted_op']
mean_op = elmo_output['mean_op']
word_emb = elmo_output['word_emb']
lstm_outputs1 = elmo_output['lstm_outputs1']
lstm_outputs2 = elmo_output['lstm_outputs2']
hub.add_signature("tokens", {"tokens": tokens, "sequence_len": sequence_len},
{"elmo": weighted_op,
"default": mean_op,
"word_emb": word_emb,
"lstm_outputs1": lstm_outputs1,
"lstm_outputs2": lstm_outputs2,
"version": version})
# #########################Next signature############################# #
# Input placeholders to the biLM.
def_strings = tf.placeholder(shape=(None), dtype=tf.string)
def_tokens_sparse = tf.string_split(def_strings)
def_tokens_dense = tf.sparse_to_dense(sparse_indices=def_tokens_sparse.indices,
output_shape=def_tokens_sparse.dense_shape,
sparse_values=def_tokens_sparse.values,
default_value=''
)
def_mask = tf.not_equal(def_tokens_dense, '')
def_int_mask = tf.cast(def_mask, dtype=tf.int32)
def_sequence_len = tf.reduce_sum(def_int_mask, axis=-1)
def_tok_shape = tf.shape(def_tokens_dense)
def_line_tokens = tf.reshape(def_tokens_dense, shape=[-1], name='reshape2line_tokens')
with tf.device('/cpu:0'):
def_tok_ids = tf.map_fn(
token2ids,
def_line_tokens,
dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
name='map_fn2get_tok_ids')
def_tok_ids = tf.reshape(def_tok_ids, [def_tok_shape[0], def_tok_shape[1], -1], name='reshape2tok_ids')
with tf.device('/cpu:0'):
def_sen_ids = tf.map_fn(
sentence_tagging_and_padding,
(def_tok_ids, def_sequence_len),
dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
name='map_fn2get_sen_ids')
# Get ops to compute the LM embeddings.
def_embeddings_op = bilm(def_sen_ids)
# Get an op to compute ELMo (weighted average of the internal biLM layers)
def_elmo_output = weight_layers('elmo_output', def_embeddings_op, l2_coef=0.0, reuse=True)
def_weighted_op = def_elmo_output['weighted_op']
def_mean_op = def_elmo_output['mean_op']
def_word_emb = def_elmo_output['word_emb']
def_lstm_outputs1 = def_elmo_output['lstm_outputs1']
def_lstm_outputs2 = def_elmo_output['lstm_outputs2']
hub.add_signature("default", {"strings": def_strings},
{"elmo": def_weighted_op,
"default": def_mean_op,
"word_emb": def_word_emb,
"lstm_outputs1": def_lstm_outputs1,
"lstm_outputs2": def_lstm_outputs2,
"version": version})
return hub.create_module_spec(module_fn)
| 15,954
|
def get_initial_scoreboard():
"""
Retrieve the initial scoreboard (first pages of global and student views).
If a user is logged in, the initial pages will instead be those on which
that user appears, and their group scoreboards will also be returned.
Returns: dict of scoreboard information
"""
def get_user_pos(scoreboard, tid):
for pos, team in enumerate(scoreboard):
if team["tid"] == tid:
return pos
return 1
user = None
if api.user.is_logged_in():
user = api.user.get_user()
result = {'tid': 0, 'groups': []}
global_board = api.stats.get_all_team_scores(include_ineligible=True)
result['global'] = {
'name': 'global',
'pages': math.ceil(len(global_board) / scoreboard_page_len),
'start_page': 1
}
if user is None:
result['global']['scoreboard'] = global_board[:scoreboard_page_len]
else:
result['tid'] = user['tid']
global_pos = get_user_pos(global_board, user["tid"])
start_slice = math.floor(global_pos / 50) * 50
result['global']['scoreboard'] = global_board[start_slice:
start_slice + 50]
result['global']['start_page'] = math.ceil((global_pos + 1) / 50)
result['country'] = user["country"]
student_board = api.stats.get_all_team_scores()
student_pos = get_user_pos(student_board, user["tid"])
start_slice = math.floor(student_pos / 50) * 50
result['student'] = {
'name': 'student',
'pages': math.ceil(len(student_board) / scoreboard_page_len),
'scoreboard': student_board[start_slice:start_slice + 50],
'start_page': math.ceil((student_pos + 1) / 50),
}
for group in api.team.get_groups(user['tid']):
# this is called on every scoreboard pageload and should be
# cached to support large groups
group_board = api.stats.get_group_scores(gid=group['gid'])
group_pos = get_user_pos(group_board, user["tid"])
start_slice = math.floor(group_pos / 50) * 50
result['groups'].append({
'gid':
group['gid'],
'name':
group['name'],
'scoreboard':
group_board[start_slice:start_slice + 50],
'pages':
math.ceil(len(group_board) / scoreboard_page_len),
'start_page':
math.ceil((group_pos + 1) / 50),
})
return result
| 15,955
|
async def resolve(qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
tcp=False, source=None, raise_on_no_answer=True,
source_port=0, lifetime=None, search=None, backend=None):
"""Query nameservers asynchronously to find the answer to the question.
This is a convenience function that uses the default resolver
object to make the query.
See ``dns.asyncresolver.Resolver.resolve`` for more information on the
parameters.
"""
return await get_default_resolver().resolve(qname, rdtype, rdclass, tcp,
source, raise_on_no_answer,
source_port, lifetime, search,
backend)
| 15,956
|
def compare_xml(want, got):
"""Tries to do a 'xml-comparison' of want and got. Plain string
comparison doesn't always work because, for example, attribute
ordering should not be important. Comment nodes are not considered in the
comparison.
Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join([c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE])
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want, got = strip_quotes(want, got)
want = want.replace('\\n', '\n')
got = got.replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
| 15,957
|
def _tf_equal(a, b):
"""Overload of "equal" for Tensors."""
return gen_math_ops.equal(a, b)
| 15,958
|
def faq():
"""FAQ page for SciNet"""
return render_template("faq.html")
| 15,959
|
def test_produces_fst_analysis_string(na_layout: ParadigmLayout):
"""
It should produce a valid FST analysis string.
"""
lemma = "minôs"
expected_lines = {
# Basic
f"{lemma}+N+A+Sg",
f"{lemma}+N+A+Pl",
f"{lemma}+N+A+Obv",
f"{lemma}+N+A+Loc",
f"{lemma}+N+A+Distr",
# Diminutive
f"{lemma}+N+A+Der/Dim+N+A+Sg",
# Possession
f"{lemma}+N+A+Px1Sg+Sg",
f"{lemma}+N+A+Px1Sg+Pl",
f"{lemma}+N+A+Px1Sg+Obv",
f"{lemma}+N+A+Px2Sg+Sg",
f"{lemma}+N+A+Px2Sg+Pl",
f"{lemma}+N+A+Px2Sg+Obv",
f"{lemma}+N+A+Px3Sg+Obv",
f"{lemma}+N+A+Px1Pl+Sg",
f"{lemma}+N+A+Px1Pl+Pl",
f"{lemma}+N+A+Px1Pl+Obv",
f"{lemma}+N+A+Px12Pl+Sg",
f"{lemma}+N+A+Px12Pl+Pl",
f"{lemma}+N+A+Px12Pl+Obv",
f"{lemma}+N+A+Px2Pl+Sg",
f"{lemma}+N+A+Px2Pl+Pl",
f"{lemma}+N+A+Px2Pl+Obv",
f"{lemma}+N+A+Px3Pl+Obv",
f"{lemma}+N+A+Px4Sg/Pl+Obv",
}
template2analysis = na_layout.generate_fst_analyses(lemma)
assert len(expected_lines) == len(template2analysis)
assert expected_lines == set(template2analysis.values())
| 15,960
|
def test_config_ca_cert_as_file(mock_va, mock_connections, mock_isfile):
"""
Tests that the instantiation of a config.VaultConfig object results in the correct configuration.
"""
sample_config = {
"elastic": {
"user": "elastic_user",
"pwd": "elastic_password",
"index": "elastic_index",
"ca_certs": "fake_cert_ca_file",
}
}
local_sample_config = deepcopy(sample_config)
mock_va.return_value.auto_auth = Mock(return_value=True)
mock_va.return_value.loads = Mock(return_value=local_sample_config)
mock_isfile.side_effect = [False, True]
config = VaultConfig(
"test_not_a_conf_file.blargh", "test_not_a_vault_conf_file.blargh", "test_not_a_vault_creds_file.blargh"
)
# Validate calls to VaultAnyConfig instance
mock_va.assert_called_once_with("test_not_a_vault_conf_file.blargh", ac_parser=None)
mock_va.return_value.auto_auth.assert_called_once_with("test_not_a_vault_creds_file.blargh", ac_parser=None)
mock_va.return_value.loads.assert_called_once_with(
"test_not_a_conf_file.blargh", ac_parser=None, process_secret_files=False
)
# Validate elastic configuration
elastic_config = deepcopy(sample_config["elastic"])
auth_string = quote_plus(elastic_config["user"]) + ":" + quote_plus(elastic_config["pwd"])
elastic_config.update({"http_auth": auth_string})
del elastic_config["user"]
del elastic_config["pwd"]
assert config.ES_INDEX == elastic_config["index"]
| 15,961
|
def requirements(filename, pip_cmd='pip', python_cmd='python', **kwargs):
"""
Require Python packages from a pip `requirements file`_.
.. _requirements file: http://www.pip-installer.org/en/latest/requirements.html
"""
pip(MIN_PIP_VERSION, python_cmd=python_cmd)
install_requirements(filename, pip_cmd=pip_cmd, **kwargs)
| 15,962
|
def render_table(sheet, header, width, data, header_style, data_style, tt_id_style):
"""Рендерим страницу"""
# Render table header
for i in range(len(header)):
sheet.write(0, i, header[i], header_style)
sheet.col(i).width = width[i]
sheet.row(1).height = 2500
# Render table data
i = 1
for d in data:
sheet.row(i + 1).height = 2500
cols = [i, 'name', 'location', 'link', 'theme']
for col in range(len(cols)):
if col == 0:
sheet.write(i, col, i, tt_id_style)
elif col == 1:
sheet.write(i, col, d[cols[col]], tt_id_style)
else:
try:
if col == 9:
sheet.write(i, col, (round((d[cols[col]] / 30), 2)), data_style)
else:
sheet.write(i, col, d[cols[col]], data_style)
except KeyError:
sheet.write(i, col, 0, data_style)
i = i + 1
return sheet
| 15,963
|
def texsafe(value):
""" Returns a string with LaTeX special characters stripped/escaped out """
special = [
[ "\\xc5", 'A'], #'\\AA'
[ "\\xf6", 'o'],
[ "&", 'and'], #'\\"{o}'
]
for char in ['\\', '^', '~', '%', "'", '"']: # these mess up things
value = value.replace(char, '')
for char in ['#','$','_', '{', '}', '<', '>']: # these can be escaped properly
value = value.replace(char, '\\' + char)
for char, new_char in special:
value = eval(repr(value).replace(char, new_char))
return value
| 15,964
|
def match_against_host_software_profile(db_session, hostname, software_packages):
"""
Given a software package list, return an array of dictionaries indicating if the
software package matches any software package defined in the host software profile package list.
"""
results = []
system_option = SystemOption.get(db_session)
if system_option.check_host_software_profile:
host = get_host(db_session, hostname)
if host is not None and len(software_packages) > 0:
software_profile = get_software_profile_by_id(db_session, host.software_profile_id)
if software_profile is not None:
software_profile_package_dict = get_matchable_package_dict(software_profile.packages.split(','))
software_package_dict = get_matchable_package_dict(software_packages)
for software_package, pattern in software_package_dict.items():
matched = True if pattern in software_profile_package_dict.values() else False
results.append({'software_package': software_package, 'matched': matched})
return results
| 15,965
|
def get_inputs_by_op(op: Op, store: Mapping[str, Any], copy_on_write: bool = False) -> Any:
"""Retrieve the necessary input data from the data dictionary in order to run an `op`.
Args:
op: The op to run.
store: The system's data dictionary to draw inputs out of.
copy_on_write: Whether to copy read-only data to make it writeable before returning it.
Returns:
Input data to be fed to the `op` forward function.
"""
if op.in_list:
data = []
else:
data = None
if op.inputs:
data = []
for key in op.inputs:
elem = store[key]
if copy_on_write and isinstance(elem, np.ndarray) and not elem.flags.writeable:
elem = deepcopy(elem)
store[key] = elem
data.append(elem)
if not op.in_list:
data = data[0]
return data
| 15,966
|
def test_warn_bad_formatting(eo_validator: ValidateRunner, example_metadata: Dict):
"""A warning if fields aren't formatted in standard manner."""
example_metadata["properties"]["eo:platform"] = example_metadata["properties"][
"eo:platform"
].upper()
eo_validator.warnings_are_errors = True
eo_validator.assert_invalid(example_metadata, codes=["property_formatting"])
| 15,967
|
def cleanup_gcp_instances(neo4j_session, common_job_parameters):
"""
Delete out-of-date GCP instance nodes and relationships
:param neo4j_session: The Neo4j session
:param common_job_parameters: dict of other job parameters to pass to Neo4j
:return: Nothing
"""
run_cleanup_job('gcp_compute_instance_cleanup.json', neo4j_session, common_job_parameters)
| 15,968
|
def finalize_post(func, store: Type['ParameterStore']):
"""Finalizes the store prior to executing the function
Parameters
----------
func : callable
The function to wrap.
store : ParameterStore
The parameter store to finalize.
Returns
-------
callable
The wrapped function.
Raises
------
MissingParameterException
If there's a parameter missing from the required parameters in
the given `store`.
"""
@wraps(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
if not store.final:
store.finalize()
return ret
return wrapper
| 15,969
|
def searchaftertext(filename, startterm, searchterm):
"""Start search after a certain text in a file"""
#print startterm
#print searchterm
startline = findLastString (filename, startterm)
searchtermfound = findLastString (filename, searchterm)
if searchtermfound > startline:
return True
return False
| 15,970
|
def ilcd_flow_generator(archive=ELCD, **kwargs):
"""
This generates flows from the current reference ELCD archive.
:param archive:
:param kwargs:
:return:
"""
i = IlcdLcia(archive, **kwargs)
count = 0
for f in i.list_objects('Flow'):
o = i.objectify(f, dtype='Flow')
if o is not None:
yield o
count += 1
if count % 1000 == 0:
print('%d data sets completed' % count)
| 15,971
|
def export_mobilenetv2():
""" export_mobilenetv2 """
print('\nconfig: \n', config)
if not config.device_id:
config.device_id = get_device_id()
context_device_init(config)
_, _, net = define_net(config, config.is_training)
load_ckpt(net, config.ckpt_file)
input_shp = [config.batch_size, 3, config.image_height, config.image_width]
input_array = ms.Tensor(np.random.uniform(-1.0, 1.0, size=input_shp).astype(np.float32))
ms.export(net, input_array, file_name=config.file_name, file_format=config.file_format)
| 15,972
|
def process_new_table(args, syn):
"""
Function: process_new_table
Purpose: Create an annotations table with the specified name under the
specified Synapse parent ID using the specified JSON schema. This
function is called when the "new_table" option is specified when
the program is called.
Arguments: JSON schema file reference
Synapse parent ID
Synapse table name
A Synapse client object
"""
# Define column names for the synapse table.
dcc_column_names = [
Column(name="key", columnType="STRING", maximumSize=100),
Column(name="description", columnType="STRING", maximumSize=250),
Column(name="columnType", columnType="STRING", maximumSize=50),
Column(name="maximumSize", columnType="DOUBLE"),
Column(name="value", columnType="STRING", maximumSize=250),
Column(name="valueDescription", columnType="LARGETEXT"),
Column(name="source", columnType="STRING", maximumSize=250),
Column(name="module", columnType="STRING", maximumSize=100)]
syn_table_df = process_schema(args.json_schema_file)
# Build and populate the Synapse table.
table_schema = Schema(name=args.synapse_table_name,
columns=dcc_column_names,
parent=args.parent_synapse_id)
dcc_table = syn.store(Table(table_schema, syn_table_df))
| 15,973
|
def vec_list_to_tensor(vec_list):
"""Convert list to vector tensor."""
return jnp.stack(vec_list, axis=-1)
| 15,974
|
def to_ecma_datetime_string(dt, default_timezone=local):
"""
Convert a python datetime into the string format defined by ECMA-262.
See ECMA international standard: ECMA-262 section 15.9.1.15
``assume_local_time`` if true will assume the date time is in local time if the object is a naive date time object;
else assumes the time value is utc.
"""
assert isinstance(dt, datetime.datetime)
dt = get_tz_aware_dt(dt, default_timezone).astimezone(utc)
return "%4i-%02i-%02iT%02i:%02i:%02i.%03iZ" % (
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond / 1000)
| 15,975
|
def WmiAddClassQualifiers(grph, conn_wmi, wmi_class_node, class_name, with_props):
"""This adds information to a WMI class."""
try:
# No need to print this, at the moment.
if False:
klass_descr = str(dir(getattr(conn_wmi, class_name)))
grph.add((wmi_class_node, lib_common.MakeProp("dir"), lib_util.NodeLiteral(klass_descr)))
klass_descr = str(getattr(conn_wmi, class_name)._properties)
grph.add((wmi_class_node, lib_common.MakeProp("_properties"), lib_util.NodeLiteral(klass_descr)))
klass_descr = str(getattr(conn_wmi, class_name).properties["Description"])
grph.add((wmi_class_node, lib_common.MakeProp("properties.Description"), lib_util.NodeLiteral(klass_descr)))
klass_descr = str(getattr(conn_wmi, class_name).property_map)
# Otherwise it crashes.
klass_descr_dlean = klass_descr.replace("{"," ").replace("}"," ")
grph.add((wmi_class_node, lib_common.MakeProp("property_map"), lib_util.NodeLiteral(klass_descr_dlean)))
the_cls = _get_wmi_class_flag_use_amended_qualifiers(conn_wmi, class_name)
if the_cls:
# https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/wmi-class-qualifiers
# Specifies a description of the block for the locale specified by the Locale qualifier.
# If defined, WMI clients can display the description string to users.
klass_descr = the_cls.Qualifiers_("Description")
# Beware, klass_descr is of type "instance".
str_klass_descr = six.text_type(klass_descr)
# This might be a value with broken HTML tags such as:
# "CIM_DataFile is a type ... <B>The behavior ... e returned.<B>"
str_klass_descr = str_klass_descr.replace("<B>", "")
grph.add((wmi_class_node, pc.property_information, lib_util.NodeLiteral(str_klass_descr)))
if with_props:
for prop_obj in the_cls.Properties_:
prop_dsc = six.text_type(prop_obj.Qualifiers_("Description"))
# Properties of different origins should not be mixed.
# Prefixes the property with a dot, so sorting displays it at the end.
# Surprisingly, the dot becomes invisible.
grph.add((wmi_class_node, lib_common.MakeProp("." + prop_obj.Name), lib_util.NodeLiteral(prop_dsc)))
else:
grph.add(
(wmi_class_node, pc.property_information, lib_util.NodeLiteral("No description for %s" % class_name)))
klass_quals = getattr(conn_wmi, class_name).qualifiers
for kla_qual_key in klass_quals:
kla_qual_val = klass_quals[kla_qual_key]
if isinstance(kla_qual_val, tuple):
kla_qual_val = "{ " + ",".join(kla_qual_val) + " }"
# Some specific properties match an entity class, so we can create a node.
# IT WORKS BUT IT IS NOT NICE AS IT IS A SEPARATE NODE.
# We would like to have a clickable URL displayed in a table TD.
if kla_qual_key == "UUID":
nodeUUID = lib_uris.gUriGen.ComTypeLibUri(kla_qual_val)
grph.add((wmi_class_node, lib_common.MakeProp(kla_qual_key), nodeUUID))
continue
grph.add((wmi_class_node, lib_common.MakeProp(kla_qual_key), lib_util.NodeLiteral(kla_qual_val)))
except Exception as exc:
try:
# Dumped in json so that lists can be appropriately deserialized then displayed.
err_str = json.dumps(list(exc))
except:
# Might have caught: 'com_error' object is not iterable
err_str = json.dumps("Non-iterable COM Error:" + str(exc))
grph.add((wmi_class_node, lib_common.MakeProp("WMI Error"), lib_util.NodeLiteral(err_str)))
| 15,976
|
def _logfile_readme() -> str:
"""Returns a string containing a 'how to read this logfile' message.
Returns
-------
str
Returns a formatted paragraph-long message with tips on reading log file output.
"""
line1 = "Messages are displayed below in the format"
line2 = " <DATE> <TIME> <LOGGER NAME> @ <FILE>:<LINE> - <LEVEL> - <FUNCTION>:<MESSAGE>"
line3 = "where <DATE> is the date in 'YYYY-MM-DD' format, <TIME> is the time in 'HH:MM:SS,milliseconds' format, <LOGGER NAME> is the name of the logger that generated the message (which should be the __name__ of the file where the logger was initialized), <FILE> and <LINE> is the file name and line number where the message was generated, <LEVEL> is the priority level that the message was generated at, <FUNCTION> is the name of the function that the message was generated inside, and <MESSAGE> is the actual message that was generated. "
message = f"{line1}\n\n{line2}\n\n{line3}\n\n"
return message
| 15,977
|
def taxon_lookup(es, body, index, taxonomy_index_template, opts, return_type):
"""Query elasticsearch for a taxon."""
taxa = []
with tolog.DisableLogger():
res = es.search_template(body=body, index=index, rest_total_hits_as_int=True)
if "hits" in res and res["hits"]["total"] > 0:
if return_type == "taxon_id":
taxa = [hit["_source"]["taxon_id"] for hit in res["hits"]["hits"]]
else:
taxa = [hit for hit in res["hits"]["hits"]]
else:
template = taxonomy_index_template(opts["taxonomy-source"].lower(), opts)
index = template["index_name"]
with tolog.DisableLogger():
res = es.search_template(
body=body, index=index, rest_total_hits_as_int=True
)
if "hits" in res and res["hits"]["total"] > 0:
if return_type == "taxon_id":
taxa = [hit["_source"]["taxon_id"] for hit in res["hits"]["hits"]]
else:
taxa = [hit for hit in res["hits"]["hits"]]
return taxa
| 15,978
|
async def get_south_services(request):
"""
Args:
request:
Returns:
list of all south services with tracked assets and readings count
:Example:
curl -X GET http://localhost:8081/fledge/south
"""
if 'cached' in request.query and request.query['cached'].lower() == 'false':
_get_installed_plugins.cache_clear()
storage_client = connect.get_storage_async()
cf_mgr = ConfigurationManager(storage_client)
try:
south_cat = await cf_mgr.get_category_child("South")
south_categories = [nc["key"] for nc in south_cat]
except:
return web.json_response({'services': []})
response = await _services_with_assets(storage_client, cf_mgr, south_categories)
return web.json_response({'services': response})
| 15,979
|
def polySpinEdge(*args, **kwargs):
"""
Flags:
- caching : cch (bool) []
- constructionHistory : ch (bool) []
- frozen : fzn (bool) []
- name : n (unicode) []
- nodeState : nds (int) []
- offset : off (int) []
- reverse : rev (bool) []
Derived from mel command `maya.cmds.polySpinEdge`
"""
pass
| 15,980
|
def filter_bam_file(bamfile, chromosome, outfile):
"""
filter_bam_file uses samtools to read a <bamfile> and read only
the reads that are mapped to <chromosome>.
It saves the filtered reads into <outfile>.
"""
inputs = [bamfile]
outputs = [outfile]
options = {
'cores': 1,
'memory': '4g',
'account': 'NChain',
'walltime': '01:00:00'
}
directory = "/".join(outfile.split("/")[:-1])
spec = '''
source /com/extra/samtools/1.6.0/load.sh
mkdir -p {dirc}
samtools view -b {infile} {chrom} > {out}
'''.format(infile=bamfile, chrom=chromosome, out=outfile, dirc=directory)
return inputs, outputs, options, spec
| 15,981
|
def import_teachers():
"""
Import the teachers from Moodle.
:return: Amount of imported users.
:rtype: int
"""
course_list = dict(Course.objects.values_list("courseId", "pk"))
teachers_list = parse_get_teachers(get_teachers(list(course_list.keys())))
teacher_group = create_auth_group()
users = create_teachers(teachers_list)
add_courses_and_group_to_users(course_list, teacher_group, teachers_list, users)
return users.count()
| 15,982
|
def baryvel(dje, deq):
"""
Calculate helio- and barycentric velocity.
.. note:: The "JPL" option present in IDL is not provided here.
Parameters
----------
dje : float
Julian ephemeris date
deq : float
Epoch of mean equinox of helio- and barycentric velocity output.
If `deq` is zero, `deq` is assumed to be equal to `dje`.
Returns
-------
dvelh : array
Heliocentric velocity vector [km/s].
dvelb : array
Barycentric velocity vector [km/s].
Notes
-----
.. note:: This function was ported from the IDL Astronomy User's Library.
:IDL - Documentation:
pro baryvel, dje, deq, dvelh, dvelb, JPL = JPL
NAME:
BARYVEL
PURPOSE:
Calculates heliocentric and barycentric velocity components of Earth.
EXPLANATION:
BARYVEL takes into account the Earth-Moon motion, and is useful for
radial velocity work to an accuracy of ~1 m/s.
CALLING SEQUENCE:
BARYVEL, dje, deq, dvelh, dvelb, [ JPL = ]
INPUTS:
DJE - (scalar) Julian ephemeris date.
DEQ - (scalar) epoch of mean equinox of dvelh and dvelb. If deq=0
then deq is assumed to be equal to dje.
OUTPUTS:
DVELH: (vector(3)) heliocentric velocity component. in km/s
DVELB: (vector(3)) barycentric velocity component. in km/s
The 3-vectors DVELH and DVELB are given in a right-handed coordinate
system with the +X axis toward the Vernal Equinox, and +Z axis
toward the celestial pole.
OPTIONAL KEYWORD SET:
JPL - if /JPL set, then BARYVEL will call the procedure JPLEPHINTERP
to compute the Earth velocity using the full JPL ephemeris.
The JPL ephemeris FITS file JPLEPH.405 must exist in either the
current directory, or in the directory specified by the
environment variable ASTRO_DATA. Alternatively, the JPL keyword
can be set to the full path and name of the ephemeris file.
A copy of the JPL ephemeris FITS file is available in
http://idlastro.gsfc.nasa.gov/ftp/data/
PROCEDURES CALLED:
Function PREMAT() -- computes precession matrix
JPLEPHREAD, JPLEPHINTERP, TDB2TDT - if /JPL keyword is set
NOTES:
Algorithm taken from FORTRAN program of Stumpff (1980, A&A Suppl, 41,1)
Stumpf claimed an accuracy of 42 cm/s for the velocity. A
comparison with the JPL FORTRAN planetary ephemeris program PLEPH
found agreement to within about 65 cm/s between 1986 and 1994
If /JPL is set (using JPLEPH.405 ephemeris file) then velocities are
given in the ICRS system; otherwise in the FK4 system.
EXAMPLE:
Compute the radial velocity of the Earth toward Altair on 15-Feb-1994
using both the original Stumpf algorithm and the JPL ephemeris
IDL> jdcnv, 1994, 2, 15, 0, jd ;==> JD = 2449398.5
IDL> baryvel, jd, 2000, vh, vb ;Original algorithm
==> vh = [-17.07243, -22.81121, -9.889315] ;Heliocentric km/s
==> vb = [-17.08083, -22.80471, -9.886582] ;Barycentric km/s
IDL> baryvel, jd, 2000, vh, vb, /jpl ;JPL ephemeris
==> vh = [-17.07236, -22.81126, -9.889419] ;Heliocentric km/s
==> vb = [-17.08083, -22.80484, -9.886409] ;Barycentric km/s
IDL> ra = ten(19,50,46.77)*15/!RADEG ;RA in radians
IDL> dec = ten(08,52,3.5)/!RADEG ;Dec in radians
IDL> v = vb[0]*cos(dec)*cos(ra) + $ ;Project velocity toward star
vb[1]*cos(dec)*sin(ra) + vb[2]*sin(dec)
REVISION HISTORY:
Jeff Valenti, U.C. Berkeley Translated BARVEL.FOR to IDL.
W. Landsman, Cleaned up program sent by Chris McCarthy (SfSU) June 1994
Converted to IDL V5.0 W. Landsman September 1997
Added /JPL keyword W. Landsman July 2001
Documentation update W. Landsman Dec 2005
"""
# Define constants
dc2pi = 2 * np.pi
cc2pi = 2 * np.pi
dc1 = 1.0
dcto = 2415020.0
dcjul = 36525.0 # days in Julian year
dcbes = 0.313
dctrop = 365.24219572 # days in tropical year (...572 insig)
dc1900 = 1900.0
AU = 1.4959787e8
# Constants dcfel(i,k) of fast changing elements.
dcfel = [1.7400353e00, 6.2833195099091e02, 5.2796e-6, 6.2565836e00, 6.2830194572674e02, -2.6180e-6, 4.7199666e00, 8.3997091449254e03, -1.9780e-5, 1.9636505e-1, 8.4334662911720e03, -5.6044e-5,
4.1547339e00, 5.2993466764997e01, 5.8845e-6, 4.6524223e00, 2.1354275911213e01, 5.6797e-6, 4.2620486e00, 7.5025342197656e00, 5.5317e-6, 1.4740694e00, 3.8377331909193e00, 5.6093e-6]
dcfel = np.resize(dcfel, (8, 3))
# constants dceps and ccsel(i,k) of slowly changing elements.
dceps = [4.093198e-1, -2.271110e-4, -2.860401e-8]
ccsel = [1.675104e-2, -4.179579e-5, -1.260516e-7, 2.220221e-1, 2.809917e-2, 1.852532e-5, 1.589963e00, 3.418075e-2, 1.430200e-5, 2.994089e00, 2.590824e-2, 4.155840e-6, 8.155457e-1, 2.486352e-2, 6.836840e-6, 1.735614e00, 1.763719e-2, 6.370440e-6, 1.968564e00, 1.524020e-2, -2.517152e-6, 1.282417e00, 8.703393e-3, 2.289292e-5, 2.280820e00,
1.918010e-2, 4.484520e-6, 4.833473e-2, 1.641773e-4, -4.654200e-7, 5.589232e-2, -3.455092e-4, -7.388560e-7, 4.634443e-2, -2.658234e-5, 7.757000e-8, 8.997041e-3, 6.329728e-6, -1.939256e-9, 2.284178e-2, -9.941590e-5, 6.787400e-8, 4.350267e-2, -6.839749e-5, -2.714956e-7, 1.348204e-2, 1.091504e-5, 6.903760e-7, 3.106570e-2, -1.665665e-4, -1.590188e-7]
ccsel = np.resize(ccsel, (17, 3))
# Constants of the arguments of the short-period perturbations.
dcargs = [5.0974222e0, -7.8604195454652e2, 3.9584962e0, -5.7533848094674e2, 1.6338070e0, -1.1506769618935e3, 2.5487111e0, -3.9302097727326e2, 4.9255514e0, -5.8849265665348e2, 1.3363463e0, -5.5076098609303e2, 1.6072053e0, -5.2237501616674e2, 1.3629480e0, -
1.1790629318198e3, 5.5657014e0, -1.0977134971135e3, 5.0708205e0, -1.5774000881978e2, 3.9318944e0, 5.2963464780000e1, 4.8989497e0, 3.9809289073258e1, 1.3097446e0, 7.7540959633708e1, 3.5147141e0, 7.9618578146517e1, 3.5413158e0, -5.4868336758022e2]
dcargs = np.resize(dcargs, (15, 2))
# Amplitudes ccamps(n,k) of the short-period perturbations.
ccamps = \
[-2.279594e-5, 1.407414e-5, 8.273188e-6, 1.340565e-5, -2.490817e-7, -3.494537e-5, 2.860401e-7, 1.289448e-7, 1.627237e-5, -1.823138e-7, 6.593466e-7, 1.322572e-5, 9.258695e-6, -4.674248e-7, -3.646275e-7, 1.140767e-5, -2.049792e-5, -4.747930e-6, -2.638763e-6, -1.245408e-7, 9.516893e-6, -2.748894e-6, -1.319381e-6, -4.549908e-6, -1.864821e-7, 7.310990e-6, -1.924710e-6, -8.772849e-7, -3.334143e-6, -1.745256e-7, -2.603449e-6, 7.359472e-6, 3.168357e-6, 1.119056e-6, -1.655307e-7, -3.228859e-6,
1.308997e-7, 1.013137e-7, 2.403899e-6, -3.736225e-7, 3.442177e-7, 2.671323e-6, 1.832858e-6, -2.394688e-7, -3.478444e-7, 8.702406e-6, -8.421214e-6, -1.372341e-6, -1.455234e-6, -4.998479e-8, -1.488378e-6, -1.251789e-5, 5.226868e-7, -2.049301e-7, 0.e0, -8.043059e-6, -2.991300e-6, 1.473654e-7, -3.154542e-7, 0.e0, 3.699128e-6, -3.316126e-6, 2.901257e-7, 3.407826e-7, 0.e0, 2.550120e-6, -1.241123e-6, 9.901116e-8, 2.210482e-7, 0.e0, -6.351059e-7, 2.341650e-6, 1.061492e-6, 2.878231e-7, 0.e0]
ccamps = np.resize(ccamps, (15, 5))
# Constants csec3 and ccsec(n,k) of the secular perturbations in longitude.
ccsec3 = -7.757020e-8
ccsec = [1.289600e-6, 5.550147e-1, 2.076942e00, 3.102810e-5, 4.035027e00, 3.525565e-1,
9.124190e-6, 9.990265e-1, 2.622706e00, 9.793240e-7, 5.508259e00, 1.559103e01]
ccsec = np.resize(ccsec, (4, 3))
# Sidereal rates.
dcsld = 1.990987e-7 # sidereal rate in longitude
ccsgd = 1.990969e-7 # sidereal rate in mean anomaly
# Constants used in the calculation of the lunar contribution.
cckm = 3.122140e-5
ccmld = 2.661699e-6
ccfdi = 2.399485e-7
# Constants dcargm(i,k) of the arguments of the perturbations of the motion
# of the moon.
dcargm = [5.1679830e0, 8.3286911095275e3, 5.4913150e0, -
7.2140632838100e3, 5.9598530e0, 1.5542754389685e4]
dcargm = np.resize(dcargm, (3, 2))
# Amplitudes ccampm(n,k) of the perturbations of the moon.
ccampm = [1.097594e-1, 2.896773e-7, 5.450474e-2, 1.438491e-7, -2.223581e-2, 5.083103e-8,
1.002548e-2, -2.291823e-8, 1.148966e-2, 5.658888e-8, 8.249439e-3, 4.063015e-8]
ccampm = np.resize(ccampm, (3, 4))
# ccpamv(k)=a*m*dl,dt (planets), dc1mme=1-mass(earth+moon)
ccpamv = [8.326827e-11, 1.843484e-11, 1.988712e-12, 1.881276e-12]
dc1mme = 0.99999696e0
# Time arguments.
dt = (dje - dcto) / dcjul
tvec = np.array([1e0, dt, dt * dt])
# Values of all elements for the instant(aneous?) dje.
temp = idlMod(np.dot(dcfel, tvec), dc2pi)
dml = temp[0]
forbel = temp[1:8]
g = forbel[0] # old fortran equivalence
deps = idlMod(np.sum(tvec * dceps), dc2pi)
sorbel = idlMod(np.dot(ccsel, tvec), dc2pi)
e = sorbel[0] # old fortran equivalence
# Secular perturbations in longitude.
dummy = np.cos(2.0)
sn = np.sin(idlMod(np.dot(ccsec[::, 1:3], tvec[0:2]), cc2pi))
# Periodic perturbations of the emb (earth-moon barycenter).
pertl = np.sum(ccsec[::, 0] * sn) + (dt * ccsec3 * sn[2])
pertld = 0.0
pertr = 0.0
pertrd = 0.0
for k in smo.range(15):
a = idlMod((dcargs[k, 0] + dt * dcargs[k, 1]), dc2pi)
cosa = np.cos(a)
sina = np.sin(a)
pertl = pertl + ccamps[k, 0] * cosa + ccamps[k, 1] * sina
pertr = pertr + ccamps[k, 2] * cosa + ccamps[k, 3] * sina
if k < 11:
pertld = pertld + (ccamps[k, 1] * cosa -
ccamps[k, 0] * sina) * ccamps[k, 4]
pertrd = pertrd + (ccamps[k, 3] * cosa -
ccamps[k, 2] * sina) * ccamps[k, 4]
# Elliptic part of the motion of the emb.
phi = (e * e / 4e0) * (((8e0 / e) - e) * np.sin(g) + 5 *
np.sin(2 * g) + (13 / 3e0) * e * np.sin(3 * g))
f = g + phi
sinf = np.sin(f)
cosf = np.cos(f)
dpsi = (dc1 - e * e) / (dc1 + e * cosf)
phid = 2 * e * ccsgd * ((1 + 1.5 * e * e) * cosf +
e * (1.25 - 0.5 * sinf * sinf))
psid = ccsgd * e * sinf / np.sqrt(dc1 - e * e)
# Perturbed heliocentric motion of the emb.
d1pdro = dc1 + pertr
drd = d1pdro * (psid + dpsi * pertrd)
drld = d1pdro * dpsi * (dcsld + phid + pertld)
dtl = idlMod((dml + phi + pertl), dc2pi)
dsinls = np.sin(dtl)
dcosls = np.cos(dtl)
dxhd = drd * dcosls - drld * dsinls
dyhd = drd * dsinls + drld * dcosls
# Influence of eccentricity, evection and variation on the geocentric
# motion of the moon.
pertl = 0.0
pertld = 0.0
pertp = 0.0
pertpd = 0.0
for k in smo.range(3):
a = idlMod((dcargm[k, 0] + dt * dcargm[k, 1]), dc2pi)
sina = np.sin(a)
cosa = np.cos(a)
pertl = pertl + ccampm[k, 0] * sina
pertld = pertld + ccampm[k, 1] * cosa
pertp = pertp + ccampm[k, 2] * cosa
pertpd = pertpd - ccampm[k, 3] * sina
# Heliocentric motion of the earth.
tl = forbel[1] + pertl
sinlm = np.sin(tl)
coslm = np.cos(tl)
sigma = cckm / (1.0 + pertp)
a = sigma * (ccmld + pertld)
b = sigma * pertpd
dxhd = dxhd + a * sinlm + b * coslm
dyhd = dyhd - a * coslm + b * sinlm
dzhd = -sigma * ccfdi * np.cos(forbel[2])
# Barycentric motion of the earth.
dxbd = dxhd * dc1mme
dybd = dyhd * dc1mme
dzbd = dzhd * dc1mme
for k in smo.range(4):
plon = forbel[k + 3]
pomg = sorbel[k + 1]
pecc = sorbel[k + 9]
tl = idlMod((plon + 2.0 * pecc * np.sin(plon - pomg)), cc2pi)
dxbd = dxbd + ccpamv[k] * (np.sin(tl) + pecc * np.sin(pomg))
dybd = dybd - ccpamv[k] * (np.cos(tl) + pecc * np.cos(pomg))
dzbd = dzbd - ccpamv[k] * sorbel[k + 13] * np.cos(plon - sorbel[k + 5])
# Transition to mean equator of date.
dcosep = np.cos(deps)
dsinep = np.sin(deps)
dyahd = dcosep * dyhd - dsinep * dzhd
dzahd = dsinep * dyhd + dcosep * dzhd
dyabd = dcosep * dybd - dsinep * dzbd
dzabd = dsinep * dybd + dcosep * dzbd
# Epoch of mean equinox (deq) of zero implies that we should use
# Julian ephemeris date (dje) as epoch of mean equinox.
if deq == 0:
dvelh = AU * np.array([dxhd, dyahd, dzahd])
dvelb = AU * np.array([dxbd, dyabd, dzabd])
return dvelh, dvelb
# General precession from epoch dje to deq.
deqdat = (dje - dcto - dcbes) / dctrop + dc1900
prema = np.transpose(premat(deqdat, deq, FK4=True))
dvelh = AU * np.dot([dxhd, dyahd, dzahd], prema)
dvelb = AU * np.dot([dxbd, dyabd, dzabd], prema)
return dvelh, dvelb
| 15,983
|
def uniform(low=0.0, high=1.0, size=None):
"""This function has the same `nlcpy.random.RandomState.uniform`
See Also
--------
nlcpy.random.RandomState.uniform : Draws samples from a uniform distribution.
"""
rs = generator._get_rand()
return rs.uniform(low, high, size=size)
| 15,984
|
def test_post_model_predict_bool_param_missing_status_code_equals_400():
"""Verify that calling /api/model/predict with no bool_param fails with 400 and flags missing
properties as required."""
response = _post_request_good_json_with_overrides("bool_param", None)
assert response.status_code == HTTPStatus.BAD_REQUEST
assert "'bool_param' is a required property" in response.text
| 15,985
|
def parse_time_interval(interval_str):
"""Convert a human-readable time interval to a tuple of start and end value.
Args:
interval_str: (`str`) A human-readable str representing an interval
(e.g., "[10us, 20us]", "<100s", ">100ms"). Supported time suffixes are
us, ms, s.
Returns:
`Interval` object where start and end are in microseconds.
Raises:
ValueError: if the input is not valid.
"""
str_interval = _parse_interval(interval_str)
interval_start = 0
interval_end = float("inf")
if str_interval.start:
interval_start = parse_readable_time_str(str_interval.start)
if str_interval.end:
interval_end = parse_readable_time_str(str_interval.end)
if interval_start > interval_end:
raise ValueError(
"Invalid interval %s. Start must be before end of interval." %
interval_str)
return Interval(interval_start, str_interval.start_included,
interval_end, str_interval.end_included)
| 15,986
|
def submatrix(M, x):
"""If x is an array of integer row/col numbers and M a matrix,
extract the submatrix which is the all x'th rows and cols.
i.e. A = submatrix(M,x) => A_ij = M_{x_i}{x_j}
"""
return M[np.ix_(x,x)]
| 15,987
|
def inventory_report(products: list) -> str:
"""Gives a detailed report on created products"""
unique_names, average_price, average_weight, average_flam = _build_report_metrics(products)
report = f'''ACME CORPORATION OFFICIAL INVENTORY REPORT
Unique product names: {unique_names}
Average price: {average_price}
Average weight: {average_weight}
Average flammability: {average_flam}'''
return print(report)
| 15,988
|
def iou(a, b):
""" Calculates intersection over union (IOU) over two tuples """
(a_x1, a_y1), (a_x2, a_y2) = a
(b_x1, b_y1), (b_x2, b_y2) = b
a_area = (a_x2 - a_x1) * (a_y2 - a_y1)
b_area = (b_x2 - b_x1) * (b_y2 - b_y1)
dx = min(a_x2, b_x2) - max(a_x1, b_x1)
dy = min(a_y2, b_y2) - max(a_y1, b_y1)
if (dx>=0) and (dy>=0):
overlap = dx * dy
iou = overlap / (a_area + b_area - overlap)
return iou
return 0
| 15,989
|
def svn_path_is_empty(*args):
"""svn_path_is_empty(char path) -> int"""
return _core.svn_path_is_empty(*args)
| 15,990
|
def main(args=None):
"""
CphdConsistency CLI tool. Prints results to stdout.
Parameters
----------
args: None|List[str]
List of CLI argument strings. If None use sys.argv
"""
parser = argparse.ArgumentParser(description="Analyze a CPHD and display inconsistencies")
parser.add_argument('cphd_or_xml')
parser.add_argument('-v', '--verbose', default=0,
action='count', help="Increase verbosity (can be specified more than once >4 doesn't help)")
parser.add_argument('--schema', help="Use a supplied schema file", default=DEFAULT_SCHEMA)
parser.add_argument('--noschema', action='store_const', const=None, dest='schema', help="Disable schema checks")
parser.add_argument('--signal-data', action='store_true', help="Check the signal data for NaN and +/- Inf")
config = parser.parse_args(args)
# Some questionable abuse of the pytest internals
import ast
import _pytest.assertion.rewrite
base, ext = os.path.splitext(__file__) # python2 can return the '*.pyc' file
with open(base + '.py', 'r') as fd:
source = fd.read()
tree = ast.parse(source)
try:
_pytest.assertion.rewrite.rewrite_asserts(tree)
except TypeError as e:
_pytest.assertion.rewrite.rewrite_asserts(tree, source)
co = compile(tree, __file__, 'exec', dont_inherit=True)
ns = {}
exec(co, ns)
cphd_con = ns['CphdConsistency'].from_file(config.cphd_or_xml, config.schema, config.signal_data)
cphd_con.check()
failures = cphd_con.failures()
cphd_con.print_result(fail_detail=config.verbose >= 1,
include_passed_asserts=config.verbose >= 2,
include_passed_checks=config.verbose >= 3,
skip_detail=config.verbose >= 4)
return bool(failures)
| 15,991
|
def compute_lorentz(Phi, omega, sigma):
"""In a time-harmonic discretization with quantities
.. math::
\\begin{align}
A &= \\Re(a \\exp(\\text{i} \\omega t)),\\\\
B &= \\Re(b \\exp(\\text{i} \\omega t)),
\\end{align}
the time-average of :math:`A\\times B` over one period is
.. math::
\\overline{A\\times B} = \\frac{1}{2} \\Re(a \\times b^*),
see http://www.ece.rutgers.edu/~orfanidi/ewa/ch01.pdf.
Since the Lorentz force generated by the current :math:`J` in the magnetic
field :math:`B` is
.. math::
F_L = J \\times B,
its time average is
.. math::
\\overline{F_L} = \\frac{1}{2} \\Re(j \\times b^*).
With
.. math::
J &= \\Re(\\exp(\\text{i} \\omega t) j e_{\\theta}),\\\\
B &= \\Re\\left(
\\exp(i \\omega t) \\left(
-\\frac{\\text{d}\\phi}{\\text{d}z} e_r
+ \\frac{1}{r} \\frac{\\text{d}(r\\phi)}{\\text{d}r} e_z
\\right)
\\right),
we have
.. math::
\\overline{F_L}
&= \\frac{1}{2} \\Re\\left(j \\frac{d\\phi^*}{dz} e_z
+ \\frac{j}{r} \\frac{d(r\\phi^*)}{dr} e_r\\right)\\\\
&= \\frac{1}{2}
\\Re\\left(\\frac{j}{r} \\nabla(r\\phi^*)\\right)\\\\
In the workpiece, we can assume
.. math::
j = -\\text{i} \\sigma \\omega \\phi
which gives
.. math::
\\begin{align*}
\\overline{F_L}
&= \\frac{\\sigma\\omega}{2r} \\Im\\left(
\\phi \\nabla(r \\phi^*)
\\right)\\\\
&= \\frac{\\sigma\\omega}{2r} \\left(
\\Im(\\phi) \\nabla(r \\Re(\\phi))
-\\Re(\\phi) \\nabla(r \\Im(\\phi))
\\right)
\\end{align*}
"""
mesh = Phi[0].function_space().mesh()
r = SpatialCoordinate(mesh)[0]
return (
0.5
* sigma
* omega
/ r
* (+Phi[1] * grad(r * Phi[0]) - Phi[0] * grad(r * Phi[1]))
)
| 15,992
|
def test_model_finder_regression_residual_error(model_finder_regression):
"""Testing if prediction_errors raises an error when there are no search results available (regression)."""
with pytest.raises(ModelsNotSearchedError) as excinfo:
_ = model_finder_regression.residuals(1)
assert "Search Results is not available. " in str(excinfo.value)
| 15,993
|
def play_db(cursor, query_string, lookup_term):
"""
Given a query string and a term, retrieve the list of plays associated with
that term
"""
play_list = []
try:
cursor.execute(query_string, [lookup_term])
play_res = cursor.fetchall()
except DatabaseError as err:
LOG.error(
"Error retrieving plays for %s: %s", lookup_term, err
)
return play_list
for row in play_res:
play_list.append(row)
if not play_list:
LOG.info("No plays for %s", lookup_term)
return play_list
| 15,994
|
def list_banks(bot, update):
"""Show user names of banks that are supported"""
chat_id = update.message.chat_id
parser_classes = utils.get_parser_classes()
bank_names = "\n".join(
parser_cls.name + "\t:\t" + parser_cls.short_name
for parser_cls in parser_classes
)
msg = _("Supported banks: \n{}").format(bank_names)
bot.sendMessage(chat_id=chat_id,
text=msg)
return
| 15,995
|
def find_nocc(two_arr, n):
"""
Given two sorted arrays of the SAME lengths and a number,
find the nth smallest number a_n and use two indices to indicate
the numbers that are no larger than a_n.
n can be real. Take the floor.
"""
l = len(two_arr[0])
if n >= 2 * l: return l, l
if n == 0: return 0, 0
res, n = n % 1, int(n)
lo, hi = max(0, n - l - 1), min(l - 1, n - 1)
while lo <= hi:
mid = int((lo + hi) / 2) # image mid is the right answer
if mid + 1 < l and n - mid - 2 >= 0:
if two_arr[0][mid + 1] < two_arr[1][n - mid - 2]:
lo = mid + 1
continue
if n - mid - 1 < l:
if two_arr[1][n - mid - 1] < two_arr[0][mid]:
hi = mid
continue
break
if n - mid - 1 >= l or mid + 1 < l and two_arr[0][mid + 1] < two_arr[1][n - mid - 1]:
return mid + res + 1, n - mid - 1
else:
return mid + 1, n - mid - 1 + res
| 15,996
|
def get_dict(str_of_dict: str, order_key='', sort_dict=False) -> list:
"""Function returns the list of dicts:
:param str_of_dict: string got form DB
(e.g. {"genre_id": 10, "genre_name": "name1"}, {"genre_id": 11, "genre_name": "name12"},...),
:param order_key: the key by which dictionaries will be sorted (required if flag 'sort_dict=True'),
:param sort_dict: flag for sorting the dictionary (boolean).
:return: list of dicts (e.g. [{"genre_id": 10, "genre_name": "name1"}, {"genre_id": 11, "genre_name": "name12"},...])"""
result_dict = list()
if str_of_dict:
result_dict = json.loads('[' + str_of_dict + ']')
if sort_dict and order_key:
try:
result_dict = sorted(result_dict, key=lambda i: i[order_key])
return result_dict
except KeyError:
return result_dict
return result_dict
else:
return result_dict
| 15,997
|
def pyz_repositories():
"""Rules to be invoked from WORKSPACE to load remote dependencies."""
excludes = native.existing_rules()
WHEEL_BUILD_CONTENT = wheel_build_content()
if 'pypi_atomicwrites' not in excludes:
http_archive(
name = 'pypi_atomicwrites',
url = 'https://files.pythonhosted.org/packages/3a/9a/9d878f8d885706e2530402de6417141129a943802c084238914fa6798d97/atomicwrites-1.2.1-py2.py3-none-any.whl',
sha256 = '0312ad34fcad8fac3704d441f7b317e50af620823353ec657a53e981f92920c0',
build_file_content=WHEEL_BUILD_CONTENT,
type="zip",
)
if 'pypi_attrs' not in excludes:
http_archive(
name = 'pypi_attrs',
url = 'https://files.pythonhosted.org/packages/3a/e1/5f9023cc983f1a628a8c2fd051ad19e76ff7b142a0faf329336f9a62a514/attrs-18.2.0-py2.py3-none-any.whl',
sha256 = 'ca4be454458f9dec299268d472aaa5a11f67a4ff70093396e1ceae9c76cf4bbb',
build_file_content=WHEEL_BUILD_CONTENT,
type="zip",
)
if 'pypi_funcsigs' not in excludes:
http_archive(
name = 'pypi_funcsigs',
url = 'https://pypi.python.org/packages/69/cb/f5be453359271714c01b9bd06126eaf2e368f1fddfff30818754b5ac2328/funcsigs-1.0.2-py2.py3-none-any.whl',
sha256 = '330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca',
build_file_content=WHEEL_BUILD_CONTENT,
type="zip",
)
if 'pypi_more_itertools' not in excludes:
http_archive(
name="pypi_more_itertools",
url="https://files.pythonhosted.org/packages/fb/d3/77f337876600747ae307ea775ff264c5304a691941cd347382c7932c60ad/more_itertools-4.3.0-py2-none-any.whl",
sha256="fcbfeaea0be121980e15bc97b3817b5202ca73d0eae185b4550cbfce2a3ebb3d",
build_file_content=WHEEL_BUILD_CONTENT,
type="zip",
)
if 'pypi_pluggy' not in excludes:
http_archive(
name = 'pypi_pluggy',
url = 'https://files.pythonhosted.org/packages/f5/f1/5a93c118663896d83f7bcbfb7f657ce1d0c0d617e6b4a443a53abcc658ca/pluggy-0.7.1-py2.py3-none-any.whl',
sha256 = '6e3836e39f4d36ae72840833db137f7b7d35105079aee6ec4a62d9f80d594dd1',
build_file_content=WHEEL_BUILD_CONTENT,
type="zip",
)
if 'pypi_py' not in excludes:
http_archive(
name="pypi_py",
url="https://files.pythonhosted.org/packages/c8/47/d179b80ab1dc1bfd46a0c87e391be47e6c7ef5831a9c138c5c49d1756288/py-1.6.0-py2.py3-none-any.whl",
sha256="50402e9d1c9005d759426988a492e0edaadb7f4e68bcddfea586bc7432d009c6",
build_file_content=WHEEL_BUILD_CONTENT,
type="zip",
)
if 'pypi_pytest' not in excludes:
http_archive(
name="pypi_pytest",
# pytest 3.7.0 depends on pathlib2 which depends on scandir which is native code
# it does not ship manylinux wheels, so we can't easily depend on it: use pytest 3.6
url="https://files.pythonhosted.org/packages/d8/e9/73246a565c34c5f203dd78bc2382e0e93aa7a249cdaeba709099eb1bc701/pytest-3.6.4-py2.py3-none-any.whl",
sha256="952c0389db115437f966c4c2079ae9d54714b9455190e56acebe14e8c38a7efa",
build_file_content=WHEEL_BUILD_CONTENT,
type="zip",
)
if 'pypi_six' not in excludes:
http_archive(
name = 'pypi_six',
url = 'https://pypi.python.org/packages/67/4b/141a581104b1f6397bfa78ac9d43d8ad29a7ca43ea90a2d863fe3056e86a/six-1.11.0-py2.py3-none-any.whl',
sha256 = '832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb',
build_file_content=WHEEL_BUILD_CONTENT,
type="zip",
)
if 'pypi_setuptools' not in excludes:
http_archive(
name = 'pypi_setuptools',
url = 'https://files.pythonhosted.org/packages/81/17/a6301c14aa0c0dd02938198ce911eba84602c7e927a985bf9015103655d1/setuptools-40.4.1-py2.py3-none-any.whl',
sha256 = '822054653e22ef38eef400895b8ada55657c8db7ad88f7ec954bccff2b3b9b52',
build_file_content=WHEEL_BUILD_CONTENT,
type="zip",
)
| 15,998
|
def get_richpe_hash(pe):
"""Computes the RichPE hash given a file path or data.
If the RichPE hash is unable to be computed, returns None.
Otherwise, returns the computed RichPE hash.
If both file_path and data are provided, file_path is used by default.
Source : https://github.com/RichHeaderResearch/RichPE
"""
if pe.RICH_HEADER is None:
return None
# Get list of @Comp.IDs and counts from Rich header
# Elements in rich_fields at even indices are @Comp.IDs
# Elements in rich_fields at odd indices are counts
rich_fields = pe.RICH_HEADER.values
if len(rich_fields) % 2 != 0:
return None
# The RichPE hash of a file is computed by computing the md5 of specific
# metadata within the Rich header and the PE header
md5 = hashlib.md5()
# Update hash using @Comp.IDs and masked counts from Rich header
while len(rich_fields):
compid = rich_fields.pop(0)
count = rich_fields.pop(0)
mask = 2 ** (count.bit_length() // 2 + 1) - 1
count |= mask
md5.update(struct.pack("<L", compid))
md5.update(struct.pack("<L", count))
# Update hash using metadata from the PE header
md5.update(struct.pack("<L", pe.FILE_HEADER.Machine))
md5.update(struct.pack("<L", pe.FILE_HEADER.Characteristics))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.Subsystem))
md5.update(struct.pack("<B", pe.OPTIONAL_HEADER.MajorLinkerVersion))
md5.update(struct.pack("<B", pe.OPTIONAL_HEADER.MinorLinkerVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MajorOperatingSystemVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MinorOperatingSystemVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MajorImageVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MinorImageVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MajorSubsystemVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MinorSubsystemVersion))
return md5.hexdigest()
| 15,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.