hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6e7a923dc0921797520d19486862dbb22cc5d8c8
| 13,249
|
py
|
Python
|
concourse/pipelines/gen_pipeline.py
|
tvar/gpdb
|
58e5ca05fefb0aa17e76017013f8ffc938dd92db
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
concourse/pipelines/gen_pipeline.py
|
tvar/gpdb
|
58e5ca05fefb0aa17e76017013f8ffc938dd92db
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
concourse/pipelines/gen_pipeline.py
|
tvar/gpdb
|
58e5ca05fefb0aa17e76017013f8ffc938dd92db
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# ----------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ----------------------------------------------------------------------
"""Generate pipeline (default: gpdb_master-generated.yml) from template (default:
templates/gpdb-tpl.yml).
Python module requirements:
- jinja2 (install through pip or easy_install)
"""
from __future__ import print_function
import argparse
import datetime
import os
import re
import subprocess
import yaml
from jinja2 import Environment, FileSystemLoader
PIPELINES_DIR = os.path.dirname(os.path.abspath(__file__))
TEMPLATE_ENVIRONMENT = Environment(
autoescape=False,
loader=FileSystemLoader(os.path.join(PIPELINES_DIR, 'templates')),
trim_blocks=True,
lstrip_blocks=True,
variable_start_string='[[', # 'default {{ has conflict with pipeline syntax'
variable_end_string=']]',
extensions=['jinja2.ext.loopcontrols']
)
BASE_BRANCH = "master" # when branching gpdb update to 7X_STABLE, 6X_STABLE, etc.
SECRETS_PATH = os.path.expanduser('~/workspace/gp-continuous-integration/secrets')
# Variables that govern pipeline validation
RELEASE_VALIDATOR_JOB = ['Release_Candidate', 'Build_Release_Candidate_RPMs']
JOBS_THAT_ARE_GATES = [
'gate_icw_start',
'gate_icw_end',
'gate_replication_start',
'gate_resource_groups_start',
'gate_cli_start',
'gate_ud_start',
'gate_advanced_analytics_start',
'gate_release_candidate_start'
]
JOBS_THAT_SHOULD_NOT_BLOCK_RELEASE = (
[
'combine_cli_coverage',
'compile_gpdb_binary_swap_centos7',
'compile_gpdb_clients_windows',
'concourse_unit_tests',
'walrep_2',
'madlib_build_gppkg',
'MADlib_Test_planner_centos7',
'MADlib_Test_orca_centos7',
'Publish Server Builds',
] + RELEASE_VALIDATOR_JOB + JOBS_THAT_ARE_GATES
)
def suggested_git_remote():
"""Try to guess the current git remote"""
default_remote = "<https://github.com/<github-user>/gpdb>"
remote = subprocess.check_output(["git", "ls-remote", "--get-url"]).decode('utf-8').rstrip()
if "greenplum-db/gpdb" in remote:
return default_remote
if "git@" in remote:
git_uri = remote.split('@')[1]
hostname, path = git_uri.split(':')
return 'https://%s/%s' % (hostname, path)
return remote
def suggested_git_branch():
"""Try to guess the current git branch"""
branch = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"]).decode('utf-8').rstrip()
if branch == "master" or is_a_base_branch(branch):
return "<branch-name>"
return branch
def is_a_base_branch(branch):
# best effort in matching a base branch (5X_STABLE, 6X_STABLE, etc.)
matched = re.match("\d+X_STABLE", branch)
return matched is not None
def render_template(template_filename, context):
"""Render pipeline template yaml"""
return TEMPLATE_ENVIRONMENT.get_template(template_filename).render(context)
def validate_pipeline_release_jobs(raw_pipeline_yml):
"""Make sure all jobs in specified pipeline that don't block release are accounted
for (they should belong to JOBS_THAT_SHOULD_NOT_BLOCK_RELEASE, defined above)"""
print("======================================================================")
print("Validate Pipeline Release Jobs")
print("----------------------------------------------------------------------")
# ignore concourse v2.x variable interpolation
pipeline_yml_cleaned = re.sub('{{', '', re.sub('}}', '', raw_pipeline_yml))
pipeline = yaml.safe_load(pipeline_yml_cleaned)
jobs_raw = pipeline['jobs']
all_job_names = [job['name'] for job in jobs_raw]
rc_name = 'gate_release_candidate_start'
release_candidate_job = [j for j in jobs_raw if j['name'] == rc_name][0]
release_blocking_jobs = release_candidate_job['plan'][0]['in_parallel']['steps'][0]['passed']
non_release_blocking_jobs = [j for j in all_job_names if j not in release_blocking_jobs]
unaccounted_for_jobs = \
[j for j in non_release_blocking_jobs if j not in JOBS_THAT_SHOULD_NOT_BLOCK_RELEASE]
if unaccounted_for_jobs:
print("Please add the following jobs as a Release_Candidate dependency or ignore them")
print("by adding them to JOBS_THAT_SHOULD_NOT_BLOCK_RELEASE in " + __file__)
print(unaccounted_for_jobs)
return False
print("Pipeline validated: all jobs accounted for")
return True
def validate_target(target):
expected_secrets_file = "%s/ccp_ci_secrets_%s.yml" % (SECRETS_PATH, target)
if not os.path.exists(expected_secrets_file):
raise Exception('Invalid target "%s"; no secrets file found. Please ensure your secrets files in %s are up to date.' % (target, SECRETS_PATH))
def create_pipeline(args):
"""Generate OS specific pipeline sections"""
if args.test_trigger_false:
test_trigger = "true"
else:
test_trigger = "false"
context = {
'template_filename': args.template_filename,
'generator_filename': os.path.basename(__file__),
'timestamp': datetime.datetime.now(),
'os_types': args.os_types,
'test_sections': args.test_sections,
'pipeline_configuration': args.pipeline_configuration,
'test_trigger': test_trigger
}
pipeline_yml = render_template(args.template_filename, context)
if args.pipeline_target == 'prod':
validated = validate_pipeline_release_jobs(pipeline_yml)
if not validated:
print("Refusing to update the pipeline file")
return False
with open(args.output_filepath, 'w') as output:
header = render_template('pipeline_header.yml', context)
output.write(header)
output.write(pipeline_yml)
return True
def gen_pipeline(args, pipeline_name, secret_files,
git_remote=None,
git_branch=None):
if git_remote is None:
git_remote = suggested_git_remote()
if git_branch is None:
git_branch = suggested_git_branch()
secrets = ""
for secret in secret_files:
secrets += "-l %s/%s " % (SECRETS_PATH, secret)
format_args = {
'target': args.pipeline_target,
'name': pipeline_name,
'output_path': args.output_filepath,
'secrets_path': SECRETS_PATH,
'secrets': secrets,
'remote': git_remote,
'branch': git_branch,
}
return '''fly --target {target} \
set-pipeline \
--check-creds \
--pipeline {name} \
--config {output_path} \
--load-vars-from {secrets_path}/gpdb_common-ci-secrets.yml \
{secrets} \
--var gpdb-git-remote={remote} \
--var gpdb-git-branch={branch} \
--var pipeline-name={name} \
'''.format(**format_args)
def header(args):
return '''
======================================================================
Pipeline target: ......... : %s
Pipeline file ............ : %s
Template file ............ : %s
OS Types ................. : %s
Test sections ............ : %s
test_trigger ............. : %s
======================================================================
''' % (args.pipeline_target,
args.output_filepath,
args.template_filename,
args.os_types,
args.test_sections,
args.test_trigger_false
)
def print_fly_commands(args):
pipeline_name = os.path.basename(args.output_filepath).rsplit('.', 1)[0]
print(header(args))
if args.pipeline_target == 'prod':
print('NOTE: You can set the production pipelines with the following:\n')
pipeline_name = "gpdb_%s" % BASE_BRANCH if BASE_BRANCH == "master" else BASE_BRANCH
print(gen_pipeline(args, pipeline_name, ["gpdb_%s-ci-secrets.prod.yml" % BASE_BRANCH],
"https://github.com/greenplum-db/gpdb.git", BASE_BRANCH))
print(gen_pipeline(args, "%s_without_asserts" % pipeline_name, ["gpdb_%s_without_asserts-ci-secrets.prod.yml" % BASE_BRANCH],
"https://github.com/greenplum-db/gpdb.git", BASE_BRANCH))
return
print('NOTE: You can set the developer pipeline with the following:\n')
print(gen_pipeline(args, pipeline_name, ["gpdb_%s-ci-secrets.dev.yml" % BASE_BRANCH,
"ccp_ci_secrets_%s.yml" % args.pipeline_target]))
def main():
"""main: parse args and create pipeline"""
parser = argparse.ArgumentParser(
description='Generate Concourse Pipeline utility',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-T',
'--template',
action='store',
dest='template_filename',
default="gpdb-tpl.yml",
help='Name of template to use, in templates/'
)
default_output_filename = "gpdb_%s-generated.yml" % BASE_BRANCH
parser.add_argument(
'-o',
'--output',
action='store',
dest='output_filepath',
default=os.path.join(PIPELINES_DIR, default_output_filename),
help='Output filepath to use for pipeline file, and from which to derive the pipeline name.'
)
parser.add_argument(
'-O',
'--os_types',
action='store',
dest='os_types',
default=['centos7'],
choices=['centos7', 'ubuntu18.04', 'win'],
nargs='+',
help='List of OS values to support'
)
parser.add_argument(
'-t',
'--pipeline_target',
action='store',
dest='pipeline_target',
default='dev',
help='Concourse target to use either: prod, dev, or <team abbreviation> '
'where abbreviation is found from the team\'s ccp secrets file name ending.'
)
parser.add_argument(
'-c',
'--configuration',
action='store',
dest='pipeline_configuration',
default='default',
help='Set of platforms and test sections to use; only works with dev and team targets, ignored with the prod target.'
'Valid options are prod (same as the prod pipeline), full (everything except release jobs), and default '
'(follow the -A and -O flags).'
)
parser.add_argument(
'-a',
'--test_sections',
action='store',
dest='test_sections',
choices=[
'ICW',
'Replication',
'ResourceGroups',
'Interconnect',
'CLI',
'UD',
'AA',
'Extensions'
],
default=['ICW'],
nargs='+',
help='Select tests sections to run'
)
parser.add_argument(
'-n',
'--test_trigger_false',
action='store_false',
default=True,
help='Set test triggers to "false". This only applies to dev pipelines.'
)
parser.add_argument(
'-u',
'--user',
action='store',
dest='user',
default=os.getlogin(),
help='Developer userid to use for pipeline name and filename.'
)
args = parser.parse_args()
validate_target(args.pipeline_target)
output_path_is_set = os.path.basename(args.output_filepath) != default_output_filename
if (args.user != os.getlogin() and output_path_is_set):
print("You can only use one of --output or --user.")
exit(1)
if args.pipeline_target == 'prod':
args.pipeline_configuration = 'prod'
if args.pipeline_configuration == 'prod' or args.pipeline_configuration == 'full':
args.os_types = ['centos6', 'centos7', 'ubuntu18.04', 'win']
args.test_sections = [
'ICW',
'Replication',
'ResourceGroups',
'Interconnect',
'CLI',
'UD',
'Extensions'
]
# if generating a dev pipeline but didn't specify an output,
# don't overwrite the master pipeline
if args.pipeline_target != 'prod' and not output_path_is_set:
pipeline_file_suffix = suggested_git_branch()
if args.user != os.getlogin():
pipeline_file_suffix = args.user
default_dev_output_filename = 'gpdb-' + args.pipeline_target + '-' + pipeline_file_suffix + '.yml'
args.output_filepath = os.path.join(PIPELINES_DIR, default_dev_output_filename)
pipeline_created = create_pipeline(args)
if not pipeline_created:
exit(1)
print_fly_commands(args)
if __name__ == "__main__":
main()
| 32.794554
| 151
| 0.628953
|
0f1ee587bd890177eda3d710d9285cf832fb53e3
| 282
|
py
|
Python
|
Curso de Cisco/Actividades/Algunas funciones simples - Serie Fibonacci.py
|
tomasfriz/Curso-de-Cisco
|
a50ee5fa96bd86d468403e29ccdc3565a181af60
|
[
"MIT"
] | null | null | null |
Curso de Cisco/Actividades/Algunas funciones simples - Serie Fibonacci.py
|
tomasfriz/Curso-de-Cisco
|
a50ee5fa96bd86d468403e29ccdc3565a181af60
|
[
"MIT"
] | null | null | null |
Curso de Cisco/Actividades/Algunas funciones simples - Serie Fibonacci.py
|
tomasfriz/Curso-de-Cisco
|
a50ee5fa96bd86d468403e29ccdc3565a181af60
|
[
"MIT"
] | null | null | null |
def fib(n):
if n < 1:
return None
if n < 3:
return 1
elem1 = elem2 = 1
sum = 0
for i in range(3, n + 1):
sum = elem1 + elem2
elem1, elem2 = elem2, sum
return sum
for n in range(1, 10): # probando
print(n, "->", fib(n))
| 18.8
| 33
| 0.471631
|
86cd9198c6422c92be4d85c0d1616de7b7a1a94d
| 17,581
|
py
|
Python
|
coco_dataset_generator/gui/segment.py
|
tommyfuu/COCO-Style-Dataset-Generator-GUI
|
48807d995c9c076985874eb6962443ec07c643ff
|
[
"Apache-2.0"
] | null | null | null |
coco_dataset_generator/gui/segment.py
|
tommyfuu/COCO-Style-Dataset-Generator-GUI
|
48807d995c9c076985874eb6962443ec07c643ff
|
[
"Apache-2.0"
] | null | null | null |
coco_dataset_generator/gui/segment.py
|
tommyfuu/COCO-Style-Dataset-Generator-GUI
|
48807d995c9c076985874eb6962443ec07c643ff
|
[
"Apache-2.0"
] | null | null | null |
from matplotlib import pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
from matplotlib.widgets import RadioButtons
from matplotlib.path import Path
from PIL import Image
import matplotlib
import argparse
import numpy as np
import glob
import os
from matplotlib.widgets import Button
from matplotlib.lines import Line2D
from matplotlib.artist import Artist
from .poly_editor import PolygonInteractor, dist_point_to_segment
import sys
from ..utils.visualize_dataset import return_info
class COCO_dataset_generator(object):
def __init__(self, fig, ax, args):
self.ax = ax
self.ax.set_yticklabels([])
self.ax.set_xticklabels([])
self.img_dir = args['image_dir']
self.index = 0
self.fig = fig
self.polys = []
self.zoom_scale, self.points, self.prev, self.submit_p, self.lines, self.circles = 1.2, [], None, None, [], []
self.zoom_id = fig.canvas.mpl_connect('scroll_event', self.zoom)
self.click_id = fig.canvas.mpl_connect(
'button_press_event', self.onclick)
self.clickrel_id = fig.canvas.mpl_connect(
'button_release_event', self.onclick_release)
self.keyboard_id = fig.canvas.mpl_connect(
'key_press_event', self.onkeyboard)
self.axradio = plt.axes([0.0, 0.0, 0.2, 1])
self.axbringprev = plt.axes([0.3, 0.05, 0.17, 0.05])
self.axreset = plt.axes([0.48, 0.05, 0.1, 0.05])
self.axsubmit = plt.axes([0.59, 0.05, 0.1, 0.05])
self.axprev = plt.axes([0.7, 0.05, 0.1, 0.05])
self.axnext = plt.axes([0.81, 0.05, 0.1, 0.05])
self.b_bringprev = Button(
self.axbringprev, 'Bring Previous Annotations')
self.b_bringprev.on_clicked(self.bring_prev)
self.b_reset = Button(self.axreset, 'Reset')
self.b_reset.on_clicked(self.reset)
self.b_submit = Button(self.axsubmit, 'Submit')
self.b_submit.on_clicked(self.submit)
self.b_next = Button(self.axnext, 'Next')
self.b_next.on_clicked(self.next)
self.b_prev = Button(self.axprev, 'Prev')
self.b_prev.on_clicked(self.previous)
self.button_axes = [self.axbringprev, self.axreset,
self.axsubmit, self.axprev, self.axnext, self.axradio]
self.existing_polys = []
self.existing_patches = []
self.selected_poly = False
self.objects = []
self.feedback = args['feedback']
self.right_click = False
self.text = ''
with open(args['class_file'], 'r') as f:
self.class_names = [x.strip()
for x in f.readlines() if x.strip() != ""]
self.radio = RadioButtons(self.axradio, self.class_names)
self.class_names = ('BG',) + tuple(self.class_names)
# to change to JPG if for annotating big datasets
self.img_paths = sorted(glob.glob(os.path.join(self.img_dir, '*.jpg')))
print(self.img_dir)
print(self.img_paths)
if len(self.img_paths) == 0:
self.img_paths = sorted(
glob.glob(os.path.join(self.img_dir, '*.png')))
if os.path.exists(self.img_paths[self.index][:-3]+'txt'):
self.index = len(glob.glob(os.path.join(self.img_dir, '*.txt')))
self.checkpoint = self.index
try:
im = Image.open(self.img_paths[self.index])
except IndexError:
print(
"Reached end of dataset! Delete some TXT files if you want to relabel some images in the folder")
exit()
width, height = im.size
im.close()
image = plt.imread(self.img_paths[self.index])
if args['feedback']:
from mask_rcnn import model as modellib
from mask_rcnn.get_json_config import get_demo_config
#from skimage.measure import find_contours
from .contours import find_contours
from mask_rcnn.visualize_cv2 import random_colors
config = get_demo_config(len(self.class_names)-2, True)
if args['config_path'] is not None:
config.from_json(args['config_path'])
# Create model object in inference mode.
model = modellib.MaskRCNN(
mode="inference", model_dir='/'.join(args['weights_path'].split('/')[:-2]), config=config)
# Load weights trained on MS-COCO
model.load_weights(args['weights_path'], by_name=True)
r = model.detect([image], verbose=0)[0]
# Number of instances
N = r['rois'].shape[0]
masks = r['masks']
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
class_ids, scores = r['class_ids'], r['scores']
for i in range(N):
color = colors[i]
# Label
class_id = class_ids[i]
score = scores[i] if scores is not None else None
label = self.class_names[class_id]
# Mask
mask = masks[:, :, i]
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
pat = PatchCollection(
[Polygon(verts, closed=True)], facecolor='green', linewidths=0, alpha=0.6)
self.ax.add_collection(pat)
self.objects.append(label)
self.existing_patches.append(pat)
self.existing_polys.append(
Polygon(verts, closed=True, alpha=0.25, facecolor='red'))
self.ax.imshow(image, aspect='auto')
self.text += str(self.index)+'\n'+os.path.abspath(
self.img_paths[self.index])+'\n'+str(width)+' '+str(height)+'\n\n'
def bring_prev(self, event):
if not self.feedback:
poly_verts, self.objects = return_info(
self.img_paths[self.index-1][:-3]+'txt')
for num in poly_verts:
self.existing_polys.append(
Polygon(num, closed=True, alpha=0.5, facecolor='red'))
pat = PatchCollection(
[Polygon(num, closed=True)], facecolor='green', linewidths=0, alpha=0.6)
self.ax.add_collection(pat)
self.existing_patches.append(pat)
def points_to_polygon(self):
return np.reshape(np.array(self.points), (int(len(self.points)/2), 2))
def deactivate_all(self):
self.fig.canvas.mpl_disconnect(self.zoom_id)
self.fig.canvas.mpl_disconnect(self.click_id)
self.fig.canvas.mpl_disconnect(self.clickrel_id)
self.fig.canvas.mpl_disconnect(self.keyboard_id)
def onkeyboard(self, event):
if not event.inaxes:
return
elif event.key == 'a':
if self.selected_poly:
self.points = self.interactor.get_polygon().xy.flatten()
self.interactor.deactivate()
self.right_click = True
self.selected_poly = False
self.fig.canvas.mpl_connect(self.click_id, self.onclick)
self.polygon.color = (0, 255, 0)
self.fig.canvas.draw()
else:
for i, poly in enumerate(self.existing_polys):
if poly.get_path().contains_point((event.xdata, event.ydata)):
self.radio.set_active(
self.class_names.index(self.objects[i])-1)
self.polygon = self.existing_polys[i]
self.existing_patches[i].set_visible(False)
self.fig.canvas.mpl_disconnect(self.click_id)
self.ax.add_patch(self.polygon)
self.fig.canvas.draw()
self.interactor = PolygonInteractor(
self.ax, self.polygon)
self.selected_poly = True
self.existing_polys.pop(i)
break
elif event.key == 'r':
for i, poly in enumerate(self.existing_polys):
if poly.get_path().contains_point((event.xdata, event.ydata)):
self.existing_patches[i].set_visible(False)
self.existing_patches[i].remove()
self.existing_patches.pop(i)
self.existing_polys.pop(i)
break
self.fig.canvas.draw()
def next(self, event):
if len(self.text.split('\n')) > 5:
print(self.img_paths[self.index][:-3]+'txt')
with open(self.img_paths[self.index][:-3]+'txt', "w") as text_file:
text_file.write(self.text)
self.ax.clear()
self.ax.set_yticklabels([])
self.ax.set_xticklabels([])
if (self.index < len(self.img_paths)-1):
self.index += 1
else:
exit()
image = plt.imread(self.img_paths[self.index])
self.ax.imshow(image, aspect='auto')
im = Image.open(self.img_paths[self.index])
width, height = im.size
im.close()
self.reset_all()
self.text += str(self.index)+'\n'+os.path.abspath(
self.img_paths[self.index])+'\n'+str(width)+' '+str(height)+'\n\n'
def reset_all(self):
self.polys = []
self.text = ''
self.points, self.prev, self.submit_p, self.lines, self.circles = [], None, None, [], []
def previous(self, event):
if (self.index > self.checkpoint):
self.index -= 1
#print (self.img_paths[self.index][:-3]+'txt')
os.remove(self.img_paths[self.index][:-3]+'txt')
self.ax.clear()
self.ax.set_yticklabels([])
self.ax.set_xticklabels([])
image = plt.imread(self.img_paths[self.index])
self.ax.imshow(image, aspect='auto')
im = Image.open(self.img_paths[self.index])
width, height = im.size
im.close()
self.reset_all()
self.text += str(self.index)+'\n'+os.path.abspath(
self.img_paths[self.index])+'\n'+str(width)+' '+str(height)+'\n\n'
def onclick(self, event):
if not event.inaxes:
return
if not any([x.in_axes(event) for x in self.button_axes]):
if event.button == 1:
self.points.extend([event.xdata, event.ydata])
#print (event.xdata, event.ydata)
circle = plt.Circle(
(event.xdata, event.ydata), 2.5, color='black')
self.ax.add_artist(circle)
self.circles.append(circle)
if (len(self.points) < 4):
self.r_x = event.xdata
self.r_y = event.ydata
else:
if len(self.points) > 5:
self.right_click = True
self.fig.canvas.mpl_disconnect(self.click_id)
self.click_id = None
self.points.extend([self.points[0], self.points[1]])
# self.prev.remove()
if (len(self.points) > 2):
line = self.ax.plot(
[self.points[-4], self.points[-2]], [self.points[-3], self.points[-1]], 'b--')
self.lines.append(line)
self.fig.canvas.draw()
if len(self.points) > 4:
if self.prev:
self.prev.remove()
self.p = PatchCollection([Polygon(self.points_to_polygon(
), closed=True)], facecolor='red', linewidths=0, alpha=0.4)
self.ax.add_collection(self.p)
self.prev = self.p
self.fig.canvas.draw()
# if len(self.points)>4:
# print 'AREA OF POLYGON: ', self.find_poly_area(self.points)
# print event.x, event.y
def find_poly_area(self):
coords = self.points_to_polygon()
x, y = coords[:, 0], coords[:, 1]
# shoelace algorithm
return (0.5*np.abs(np.dot(x, np.roll(y, 1))-np.dot(y, np.roll(x, 1))))/2
def onclick_release(self, event):
if any([x.in_axes(event) for x in self.button_axes]) or self.selected_poly:
return
if hasattr(self, 'r_x') and hasattr(self, 'r_y') and None not in [self.r_x, self.r_y, event.xdata, event.ydata]:
# 10 pixels limit for rectangle creation
if np.abs(event.xdata - self.r_x) > 10 and np.abs(event.ydata - self.r_y) > 10:
if len(self.points) < 4:
self.right_click = True
self.fig.canvas.mpl_disconnect(self.click_id)
self.click_id = None
bbox = [np.min([event.xdata, self.r_x]), np.min([event.ydata, self.r_y]), np.max(
[event.xdata, self.r_x]), np.max([event.ydata, self.r_y])]
self.r_x = self.r_y = None
self.points = [bbox[0], bbox[1], bbox[0], bbox[3],
bbox[2], bbox[3], bbox[2], bbox[1], bbox[0], bbox[1]]
self.p = PatchCollection([Polygon(self.points_to_polygon(
), closed=True)], facecolor='red', linewidths=0, alpha=0.4)
self.ax.add_collection(self.p)
self.fig.canvas.draw()
def zoom(self, event):
if not event.inaxes:
return
cur_xlim = self.ax.get_xlim()
cur_ylim = self.ax.get_ylim()
xdata = event.xdata # get event x location
ydata = event.ydata # get event y location
if event.button == 'down':
# deal with zoom in
scale_factor = 1 / self.zoom_scale
elif event.button == 'up':
# deal with zoom out
scale_factor = self.zoom_scale
else:
# deal with something that should never happen
scale_factor = 1
print(event.button)
new_width = (cur_xlim[1] - cur_xlim[0]) * scale_factor
new_height = (cur_ylim[1] - cur_ylim[0]) * scale_factor
relx = (cur_xlim[1] - xdata)/(cur_xlim[1] - cur_xlim[0])
rely = (cur_ylim[1] - ydata)/(cur_ylim[1] - cur_ylim[0])
self.ax.set_xlim([xdata - new_width * (1-relx),
xdata + new_width * (relx)])
self.ax.set_ylim([ydata - new_height * (1-rely),
ydata + new_height * (rely)])
self.ax.figure.canvas.draw()
def reset(self, event):
if not self.click_id:
self.click_id = fig.canvas.mpl_connect(
'button_press_event', self.onclick)
#print (len(self.lines))
#print (len(self.circles))
if len(self.points) > 5:
for line in self.lines:
line.pop(0).remove()
for circle in self.circles:
circle.remove()
self.lines, self.circles = [], []
self.p.remove()
self.prev = self.p = None
self.points = []
#print (len(self.lines))
#print (len(self.circles))
def print_points(self):
ret = ''
for x in self.points:
ret += '%.2f' % x+' '
return ret
def submit(self, event):
if not self.right_click:
print('Right click before submit is a must!!')
else:
self.text += self.radio.value_selected+'\n' + \
'%.2f' % self.find_poly_area()+'\n'+self.print_points()+'\n\n'
self.right_click = False
#print (self.points)
self.lines, self.circles = [], []
self.click_id = fig.canvas.mpl_connect(
'button_press_event', self.onclick)
self.polys.append(Polygon(self.points_to_polygon(
), closed=True, color=np.random.rand(3), alpha=0.4, fill=True))
if self.submit_p:
self.submit_p.remove()
self.submit_p = PatchCollection(
self.polys, cmap=matplotlib.cm.jet, alpha=0.4)
self.ax.add_collection(self.submit_p)
self.points = []
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image_dir", required=True,
help="Path to the image dir")
ap.add_argument("-c", "--class_file", required=True,
help="Path to the classes file of the dataset")
ap.add_argument('-w', "--weights_path", default=None,
help="Path to Mask RCNN checkpoint save file")
ap.add_argument('-x', "--config_path", default=None,
help="Path to Mask RCNN training config JSON file to load model based on specific parameters")
args = vars(ap.parse_args())
args['feedback'] = args['weights_path'] is not None
fig = plt.figure(figsize=(14, 14))
ax = plt.gca()
gen = COCO_dataset_generator(fig, ax, args)
plt.subplots_adjust(bottom=0.2)
plt.show()
gen.deactivate_all()
| 35.73374
| 120
| 0.547466
|
755f9df2466b8a05680e02b692f5890f79dc33a6
| 3,539
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/neosartoryafischeri.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/neosartoryafischeri.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/neosartoryafischeri.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Neosartorya fischeri.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def NeosartoryaFischeri(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Neosartorya fischeri graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Neosartorya fischeri graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="NeosartoryaFischeri",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.768519
| 223
| 0.676745
|
4ac88e8e108f600ef5bd2afb5e9e96bc5266be6c
| 666
|
py
|
Python
|
djongo_test/djongo_test/djongo_app/manage.py
|
shyamyadati/django-mongo
|
d44e5b9e40a6fbab0de795a430593dd2430de8ba
|
[
"MIT"
] | null | null | null |
djongo_test/djongo_test/djongo_app/manage.py
|
shyamyadati/django-mongo
|
d44e5b9e40a6fbab0de795a430593dd2430de8ba
|
[
"MIT"
] | null | null | null |
djongo_test/djongo_test/djongo_app/manage.py
|
shyamyadati/django-mongo
|
d44e5b9e40a6fbab0de795a430593dd2430de8ba
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djongo_app.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.956522
| 74
| 0.68018
|
9b3a4854c29f8a96c9cdf5ea08fd07a4429876bc
| 3,786
|
py
|
Python
|
scripts/figure4/preprocessing_omranian.py
|
jiawu/Roller
|
a70e350905a59c2254dcefda7ab23c6417cf8f7d
|
[
"MIT"
] | null | null | null |
scripts/figure4/preprocessing_omranian.py
|
jiawu/Roller
|
a70e350905a59c2254dcefda7ab23c6417cf8f7d
|
[
"MIT"
] | 2
|
2015-07-13T18:51:22.000Z
|
2015-07-16T15:35:24.000Z
|
scripts/figure4/preprocessing_omranian.py
|
jiawu/Roller
|
a70e350905a59c2254dcefda7ab23c6417cf8f7d
|
[
"MIT"
] | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from scipy.interpolate import interp1d
import sys
from datetime import datetime
import numpy as np
sys.path.append("../pipelines")
import Swing.util.lag_identification as lag_id
from Swing.util.Evaluator import Evaluator
import pdb
def create_df(raw_data_path):
raw_data = pd.read_csv(raw_data_path, sep = '\t')
time = [10,20,30,40,50]
raw_data = raw_data.transpose()
probe_names=raw_data.iloc[0]
raw_data = raw_data[1:][:]
biol_reps = 3
final_frame = pd.DataFrame()
for i in range(3):
rep_frame = raw_data[i::3][:].iloc[:5]
final_frame = final_frame.append(rep_frame)
#make time series
final_time_series = time + time + time
final_frame['Time'] = final_time_series
cols = final_frame.columns.tolist()
cols = cols[-1:] + cols[:-1]
final_frame=final_frame[cols]
#mapping the names to the map and the gold standard
name_map = pd.read_csv('../../data/invitro/ecoli_gene_map.txt', sep = '\t')
#map the probe name to the gene name
gene_list = []
for name in probe_names:
gene_name = name_map[name_map['ID'] == name]['ORF']
gene_list.append(gene_name.values[0].lower())
gene_list.insert(0, 'Time')
final_frame.columns = gene_list
return(final_frame)
#check if gene name exists in map
#raw_data_list = ['../data/invitro/omranian_coldstress.txt','../data/invitro/omranian_heatstress.txt','../data/invitro/omranian_control.txt','../data/invitro/omranian_oxidativestress.txt']
raw_data_list = ['../../data/invitro/omranian_control.txt','../../data/invitro/omranian_coldstress.txt','../../data/invitro/omranian_heatstress.txt', '../../data/invitro/omranian_oxidativestress.txt']
#raw_data_list = ['../data/invitro/omranian_oxidativestress.txt']
overall_df = pd.DataFrame()
for raw_data in raw_data_list:
df = create_df(raw_data)
overall_df = overall_df.append(df)
#overall_df = pd.read_csv('../../data/invitro/omranian_parsed_timeseries.tsv',sep='\t')
final_overall_df = overall_df
final_overall_df.to_csv('../../data/invitro/iomranian_parsed_timeseries.tsv', index=False,sep='\t')
genes = overall_df.columns[1:].tolist()
with open('../../data/invitro/omranian_tf_list.tsv','r') as f:
tf_list = f.read().splitlines()
with open('../../data/invitro/omranian_target_list.tsv','r') as f:
target_list = f.read().splitlines()
gs_list = tf_list+target_list
gs_list = list(set(gs_list))
not_measured_genes = list(set(gs_list)-set(genes))
#remove these genes from gold standard
parsed_gs = pd.read_csv('../../data/invitro/omranian_parsed_goldstandard.tsv',sep='\t', header=None)
parsed_gs.columns = ['regulator', 'target', 'effect']
parsed_gs = parsed_gs[~parsed_gs['regulator'].isin(not_measured_genes)]
parsed_gs = parsed_gs[~parsed_gs['target'].isin(not_measured_genes)]
parsed_gs = parsed_gs[~(parsed_gs['regulator']==parsed_gs['target'])]
parsed_gs.to_csv('../../data/invitro/iomranian_parsed_goldstandard.tsv',sep='\t',index=False, header=False)
parsed_gs_list = list(set(parsed_gs['regulator'].tolist()+parsed_gs['target'].tolist()))
with open('../../data/invitro/omranian_all_genes_list.tsv', 'w') as outfile:
outfile.write("\n".join(parsed_gs_list))
with open('../../data/invitro/omranian_parsed_tf_list.tsv', 'w') as outfile:
outfile.write("\n".join(parsed_gs['regulator'].unique().tolist()))
not_in_gs = list(set(genes) - set(gs_list))
#remove these genes from overall_df
in_df_in_gs = ['Time'] + list(set(gs_list).intersection(genes))
final_overall_df = overall_df[in_df_in_gs]
final_overall_df.to_csv('../../data/invitro/iomranian_parsed_timeseries.tsv', index=False,sep='\t')
my_eval = Evaluator('../../data/invitro/iomranian_parsed_goldstandard.tsv')
| 33.210526
| 200
| 0.721078
|
fb2202d6890b8e057a0c950fbb185c4d9cbcfac5
| 10,260
|
py
|
Python
|
tl_algs/tnb.py
|
raijin0704/tl_algs
|
1f9b4329a96eae93610a49aac04f1cc9e1d6ca4f
|
[
"MIT"
] | 19
|
2017-09-03T05:01:30.000Z
|
2021-02-14T02:41:34.000Z
|
tl_algs/tnb.py
|
raijin0704/tl_algs
|
1f9b4329a96eae93610a49aac04f1cc9e1d6ca4f
|
[
"MIT"
] | 1
|
2018-09-22T15:09:14.000Z
|
2018-09-22T23:10:16.000Z
|
tl_algs/tnb.py
|
raijin0704/tl_algs
|
1f9b4329a96eae93610a49aac04f1cc9e1d6ca4f
|
[
"MIT"
] | 7
|
2017-09-06T08:22:15.000Z
|
2022-02-07T02:49:57.000Z
|
import numpy as np
import pandas as pd
import json
from tl_algs import tl_alg
def sim_minmax(column):
"""Similarity score using the range between min and max
for a value
Args:
column: a given feature column
Returns:
tuple: A tuple of the form (min, max)
"""
return min(column), max(column)
def sim_std(column):
"""Similarity score using the standard error for a column
Args:
column: a given feature column
Returns:
tuple: tuple with the first element one std dev below the mean
and the second element one std dev above the mean
"""
return (np.mean(column) - np.std(column), np.mean(column) + np.std(column))
class TransferNaiveBayes(tl_alg.Base_Transfer):
"""
Transfer Naive Bayes algorithm, as described by Ma [1]
Args:
similarity_func: the function used to determine if two features
are similar
num_disc_bins: the number of bins to use for discretization
discretize: whether to discretize (if passing in already categorical
data, set this to false.
Returns:
A classifier
[1] Ma, Y., Luo, G., Zeng, X., & Chen, A. (2012). Transfer learning
for cross-company software defect prediction. Information and
Software Technology, 54(3), 248-256.
https://doi.org/10.1016/j.infsof.2011.09.007
"""
def __init__(
self,
test_set_X,
test_set_domain,
train_pool_X,
train_pool_y,
train_pool_domain,
rand_seed=None,
classifier_params={},
similarity_func=sim_std,
num_disc_bins=10,
alpha=1,
discretize=True):
super(
TransferNaiveBayes,
self).__init__(
test_set_X,
test_set_domain,
train_pool_X,
train_pool_y,
train_pool_domain,
None,
rand_seed=rand_seed,
classifier_params=classifier_params)
self.similarity_func = similarity_func
self.num_disc_bins = num_disc_bins
self.discretize = discretize
self.alpha = alpha
self.cached_n_j = {}
self.cached_n_c = None
self.cached_cond_prob = {}
def isinrange(self, row, ranges):
""" returns a boolean vector where the ith entry corresponds
to whether the ith feature is within the range specified in ranges
Args:
row: The row to check
ranges: a list of the form [(low, high),...] for each feature
Returns:
A boolean vector
"""
return sum([cell >= ranges[i][0] and cell <= ranges[i][1]
for i, cell in enumerate(row)])
def get_weights(self):
"""
Gets the prior distirbutions for the features (aka the weights)
"""
# ranges is analogous to min_ij and max_ij in the paper
ranges = self.test_set_X.apply(self.similarity_func)
# for each row, compute similarity, analogous to s_i in the paper
similarity_count = self.train_pool_X.apply(lambda x: self.isinrange(x, ranges), axis=1)
# for each row, calculate weight, analogous to w_i in the paper
# note that in the paper the number of features (train_pool_X.shape[0]) is k
weight_vec = map(lambda x: float(
float(x) / (self.train_pool_X.shape[1] - x + 1)**2), similarity_count)
return weight_vec
def get_discretized_X(self):
"""
Creates discretized versions of the training and test instances. Test instances are
used as the "baseline" which dictates the bins. The training instances are then
discretized using the same bin edges. The number of bins is controlled by the
self.num_disc_bins parameter.
Returns:
(X_train_disc, X_test_disc)
"""
test_disc_arr, train_disc_arr = [], []
for col_ind in range(self.test_set_X.shape[1]):
# start with discretizing test set, save bins
try:
test_disc, bins = pd.cut(self.test_set_X.iloc[:, col_ind],
bins = self.num_disc_bins,
labels=map(str, range(self.num_disc_bins)),
retbins=True)
except ValueError:
test_disc, bins = pd.cut(self.test_set_X.iloc[:, col_ind],
bins = [float('inf') * -1, float('inf')],
labels = [str(0)],
retbins=True)
# makde sure bins cover entire interval
bins[0] = -1 * float('inf')
bins[-1] = float('inf')
# use (modified) test set bins for training set discretization
train_disc = pd.cut(self.train_pool_X.iloc[:, col_ind],
bins=bins,
labels=map(str, range(len(bins)-1)))
test_disc_arr.append(test_disc)
train_disc_arr.append(train_disc)
# combine discretized series to data frame
return pd.concat(train_disc_arr, axis=1), pd.concat(test_disc_arr, axis=1)
def get_cached_n_j(self, feature_index, X_weighted):
if feature_index not in self.cached_n_j.keys():
self.cached_n_j[feature_index] = len(X_weighted.iloc[:, feature_index].unique())
return self.cached_n_j[feature_index]
def get_cached_n_c(self):
if not self.cached_n_c:
self.cached_n_c = len(self.train_pool_y.unique())
return self.cached_n_c
def get_cached_conditional_prob(self, label, feature_index, feature_val,
X_weighted, n_c, n_j, alpha):
if (label, feature_index, feature_val) not in self.cached_cond_prob.keys():
feature_mask = np.asarray(self.train_pool_y == label).reshape(-1) & \
np.asarray(X_weighted.iloc[:, feature_index] == feature_val)\
.reshape(-1)
class_mask = np.asarray(self.train_pool_y == label).reshape(-1)
self.cached_cond_prob[(label, feature_index, feature_val)] = \
(X_weighted.myweight.loc[feature_mask].sum() + alpha) / \
(X_weighted.myweight.loc[class_mask].sum() + n_c * alpha)
return self.cached_cond_prob[(label, feature_index, feature_val)]
def get_class_prob(self, X_weighted, label, alpha):
""" Computes P(C) according to equation 7 in [1]
Args:
X_weighted: dataframe with columns of the form [feature1, feature2,
..., weight]
label: the label to calculate the probability of
alpha: the laplace smoothing factor, default is 1
"""
# number of classes
n_c = len(self.train_pool_y.unique())
mask = np.asarray(self.train_pool_y == label).reshape(-1)
return (X_weighted[mask].myweight.sum() + alpha) / \
(X_weighted.myweight.sum() + n_c * alpha)
def get_conditional_prob(self, X_weighted, label, feature_index,
feature_val, alpha):
""" Computes P(a_j|c), where a_j is value j for feature a, and
c is the class
Calculated according to equation 8 from [1]
Args:
X_weighted: dataframe with columns of the form [feature1, feature2,
..., weight]
label: the label to calculate the probability of
feature_val: the value of the feature to calculate the prob. of
alpha: the laplace smoothing factor, default is 1
"""
n_j = self.get_cached_n_j(feature_index, X_weighted)
n_c = self.get_cached_n_c()
return self.get_cached_conditional_prob(label, feature_index, feature_val,
X_weighted, n_c, n_j, alpha)
def get_posterior_prob(self, X_weighted, label, instance, alpha):
""" Compute P(c|u), where c is the class and u is the train instance
Calculated according to equation 1 from [1]
Args:
label: the label to calculate the probability of
instance: a row from the training set
Returns:
P(c|u)
"""
# building numerator
numerator = self.get_class_prob(X_weighted, label, alpha)
# column index = j
for j in range(len(instance)):
numerator *= self.get_conditional_prob(X_weighted,
label = label, feature_index = j, feature_val=instance[j],
alpha=alpha)
# building denominator
denominator = 0
for c in self.train_pool_y.unique():
term = self.get_class_prob(X_weighted, c, alpha)
for j in range(len(instance)):
term *= self.get_conditional_prob(X_weighted,
label = c, feature_index = j,
feature_val=instance[j],
alpha=alpha)
denominator += term
return numerator / denominator if denominator != 0 else 0
def train_filter_test(self):
"""Applies weight filter and returns predictions"""
weights = self.get_weights()
if self.discretize:
X_train_disc, X_test_disc = self.get_discretized_X()
else:
X_train_disc, X_test_disc = self.train_pool_X, self.test_set_X
X_train_disc['myweight'] = weights
y_pred, y_conf = [], []
for __, row in X_test_disc.iterrows():
class_probs = [self.get_posterior_prob(X_train_disc,
c, row, self.alpha) for c in [False, True]]
i = np.argmax(class_probs)
y_pred.append(i == 1)
# always get probability of positive prediction
y_conf.append(class_probs[1])
return np.array(y_conf), np.array(y_pred)
def json_encode(self):
"""Encodes this class as a json object"""
base = tl_alg.Base_Transfer.json_encode(self)
base.update({"similarity_func": self.similarity_func.__name__,
"num_disc_bins" : self.num_disc_bins,
"discretize" : self.discretize
})
return base
| 36.774194
| 95
| 0.589864
|
a67c4928ab631edb9ab93cbdbc9659406d80a04a
| 6,291
|
py
|
Python
|
panda_gym/envs/robots/panda.py
|
quenting44/panda-gym
|
68c87420fa1ced96a52bae3eef2dd596fd4d820c
|
[
"MIT"
] | null | null | null |
panda_gym/envs/robots/panda.py
|
quenting44/panda-gym
|
68c87420fa1ced96a52bae3eef2dd596fd4d820c
|
[
"MIT"
] | null | null | null |
panda_gym/envs/robots/panda.py
|
quenting44/panda-gym
|
68c87420fa1ced96a52bae3eef2dd596fd4d820c
|
[
"MIT"
] | null | null | null |
from typing import Optional
import numpy as np
from gym import spaces
from panda_gym.envs.core import PyBulletRobot
from panda_gym.pybullet import PyBullet
class Panda(PyBulletRobot):
"""Panda robot in PyBullet.
Args:
sim (PyBullet): Simulation instance.
block_gripper (bool, optional): Whether the gripper is blocked. Defaults to False.
base_position (np.ndarray, optionnal): Position of the base base of the robot, as (x, y, z). Defaults to (0, 0, 0).
control_type (str, optional): "ee" to control end-effector displacement or "joints" to control joint angles.
Defaults to "ee".
"""
def __init__(
self,
sim: PyBullet,
block_gripper: bool = False,
base_position: Optional[np.ndarray] = None,
control_type: str = "ee",
) -> None:
base_position = base_position if base_position is not None else np.zeros(3)
self.block_gripper = block_gripper
self.control_type = control_type
n_action = 3 if self.control_type == "ee" else 7 # control (x, y z) if "ee", else, control the 7 joints
n_action += 0 if self.block_gripper else 1
action_space = spaces.Box(-1.0, 1.0, shape=(n_action,), dtype=np.float32)
super().__init__(
sim,
body_name="panda",
file_name="franka_panda/panda.urdf",
base_position=base_position,
action_space=action_space,
joint_indices=np.array([0, 1, 2, 3, 4, 5, 6, 9, 10]),
joint_forces=np.array([87.0, 87.0, 87.0, 87.0, 12.0, 120.0, 120.0, 170.0, 170.0]),
)
self.fingers_indices = np.array([9, 10])
self.neutral_joint_values = np.array([0.00, 0.41, 0.00, -1.85, 0.00, 2.26, 0.79, 0.00, 0.00])
self.ee_link = 11
self.sim.set_lateral_friction(self.body_name, self.fingers_indices[0], lateral_friction=1.0)
self.sim.set_lateral_friction(self.body_name, self.fingers_indices[1], lateral_friction=1.0)
self.sim.set_spinning_friction(self.body_name, self.fingers_indices[0], spinning_friction=0.001)
self.sim.set_spinning_friction(self.body_name, self.fingers_indices[1], spinning_friction=0.001)
def set_action(self, action: np.ndarray) -> None:
action = action.copy() # ensure action don't change
action = np.clip(action, self.action_space.low, self.action_space.high)
if self.control_type == "ee":
ee_displacement = action[:3]
target_arm_angles = self.ee_displacement_to_target_arm_angles(ee_displacement)
else:
arm_joint_ctrl = action[:7]
target_arm_angles = self.arm_joint_ctrl_to_target_arm_angles(arm_joint_ctrl)
if self.block_gripper:
target_fingers_width = 0
else:
fingers_ctrl = action[-1] * 0.2 # limit maximum change in position
fingers_width = self.get_fingers_width()
target_fingers_width = fingers_width + fingers_ctrl
target_angles = np.concatenate((target_arm_angles, [target_fingers_width / 2, target_fingers_width / 2]))
self.control_joints(target_angles=target_angles)
def ee_displacement_to_target_arm_angles(self, ee_displacement: np.ndarray) -> np.ndarray:
"""Compute the target arm angles from the end-effector displacement.
Args:
ee_displacement (np.ndarray): End-effector displacement, as (dx, dy, dy).
Returns:
np.ndarray: Target arm angles, as the angles of the 7 arm joints.
"""
ee_displacement = ee_displacement[:3] * 0.05 # limit maximum change in position
# get the current position and the target position
ee_position = self.get_ee_position()
target_ee_position = ee_position + ee_displacement
# Clip the height target. For some reason, it has a great impact on learning
target_ee_position[2] = np.max((0, target_ee_position[2]))
# compute the new joint angles
target_arm_angles = self.inverse_kinematics(
link=self.ee_link, position=target_ee_position, orientation=np.array([1.0, 0.0, 0.0, 0.0])
)
target_arm_angles = target_arm_angles[:7] # remove fingers angles
return target_arm_angles
def arm_joint_ctrl_to_target_arm_angles(self, arm_joint_ctrl: np.ndarray) -> np.ndarray:
"""Compute the target arm angles from the arm joint control.
Args:
arm_joint_ctrl (np.ndarray): Control of the 7 joints.
Returns:
np.ndarray: Target arm angles, as the angles of the 7 arm joints.
"""
arm_joint_ctrl = arm_joint_ctrl * 0.05 # limit maximum change in position
# get the current position and the target position
current_arm_joint_angles = np.array([self.get_joint_angle(joint=i) for i in range(7)])
target_arm_angles = current_arm_joint_angles + arm_joint_ctrl
return target_arm_angles
def get_obs(self) -> np.ndarray:
# end-effector position and velocity
ee_position = np.array(self.get_ee_position())
ee_velocity = np.array(self.get_ee_velocity())
# fingers opening
if not self.block_gripper:
fingers_width = self.get_fingers_width()
obs = np.concatenate((ee_position, ee_velocity, [fingers_width]))
else:
obs = np.concatenate((ee_position, ee_velocity))
return obs
def reset(self) -> None:
self.set_joint_neutral()
def set_joint_neutral(self) -> None:
"""Set the robot to its neutral pose."""
self.set_joint_angles(self.neutral_joint_values)
def get_fingers_width(self) -> float:
"""Get the distance between the fingers."""
finger1 = self.sim.get_joint_angle(self.body_name, self.fingers_indices[0])
finger2 = self.sim.get_joint_angle(self.body_name, self.fingers_indices[1])
return finger1 + finger2
def get_ee_position(self) -> np.ndarray:
"""Returns the position of the ned-effector as (x, y, z)"""
return self.get_link_position(self.ee_link)
def get_ee_velocity(self) -> np.ndarray:
"""Returns the velocity of the end-effector as (vx, vy, vz)"""
return self.get_link_velocity(self.ee_link)
| 44.617021
| 123
| 0.658401
|
4281b1c239e6fdb3f3dc29bb3295b28c7b471597
| 2,637
|
py
|
Python
|
serverless-functions/mltraining-numbers/numbers.py
|
OwenG88/taxinomitis
|
3ab71d708bdc10301495eb8d689a1e1ef8ed22cd
|
[
"Apache-2.0"
] | 101
|
2018-06-29T07:39:25.000Z
|
2022-01-07T23:44:27.000Z
|
serverless-functions/mltraining-numbers/numbers.py
|
yuzaishi/taxinomitis
|
ccc40b80cd8c52f9d710a84a82e6934d4af8846d
|
[
"Apache-2.0"
] | 254
|
2018-06-26T22:35:42.000Z
|
2022-03-25T22:55:02.000Z
|
serverless-functions/mltraining-numbers/numbers.py
|
yuzaishi/taxinomitis
|
ccc40b80cd8c52f9d710a84a82e6934d4af8846d
|
[
"Apache-2.0"
] | 127
|
2018-06-27T15:08:39.000Z
|
2022-03-30T06:37:26.000Z
|
#!/usr/bin/env python
# reading the action input
import sys
import json
# building the decision tree
from sklearn.feature_extraction import DictVectorizer
from sklearn import tree
# visualising the decision tree
from pydotplus import graph_from_dot_data
# preparing the output for returning
from io import BytesIO
from base64 import b64encode
def main():
# processing parameters
params = json.loads(sys.argv[1])
outputformats = params.get('formats', [])
# decompress the training data
examplesCompressionKey = params.get('examplesKey', [])
compressedexamples = params.get('examples', [])
examples = []
for compressedexample in compressedexamples:
example = {}
for idx, key in enumerate(examplesCompressionKey):
example[key] = compressedexample[idx]
examples.append(example)
del compressedexamples
del examplesCompressionKey
# decompress the output labels
labelsCompressionKey = params.get('labelsKey', [])
compressedlabels = params.get('labels', [])
labels = []
for compressedlabel in compressedlabels:
labels.append(labelsCompressionKey[compressedlabel])
del compressedlabels
del labelsCompressionKey
# building decision tree classifier
vec = DictVectorizer(sparse=False)
dt = tree.DecisionTreeClassifier(random_state=42)
dt.fit(vec.fit_transform(examples), labels)
# creating decision tree visualization
dot_data = tree.export_graphviz(dt,
feature_names=vec.feature_names_,
class_names=dt.classes_,
impurity=False,
filled=True,
rounded=True)
graph = graph_from_dot_data(dot_data)
graph.set_size('"70"')
response = { 'vocabulary' : list(vec.vocabulary_) }
# generating output in requested formats
if 'png' in outputformats:
pngbuffer = BytesIO()
graph.write_png(pngbuffer)
pngbuffer.seek(0)
response['png'] = b64encode(pngbuffer.getvalue()).decode()
del pngbuffer
if 'dot' in outputformats:
dotbuffer = BytesIO()
graph.write(dotbuffer)
dotbuffer.seek(0)
response['dot'] = dotbuffer.getvalue().decode()
del dotbuffer
if 'svg' in outputformats:
svgbuffer = BytesIO()
graph.write_svg(svgbuffer)
svgbuffer.seek(0)
response['svg'] = svgbuffer.getvalue().decode().replace('\n', '')
del svgbuffer
print(json.dumps(response))
if __name__ == "__main__":
main()
| 31.392857
| 73
| 0.644293
|
cd87e2ae473dde9acc22859dd18e08ad1eebd019
| 4,175
|
py
|
Python
|
kite-exp/ml/web-content/sources/data-pipeline/so_dumps_processor/parse_dumps.py
|
kiteco/kiteco-public
|
74aaf5b9b0592153b92f7ed982d65e15eea885e3
|
[
"BSD-3-Clause"
] | 17
|
2022-01-10T11:01:50.000Z
|
2022-03-25T03:21:08.000Z
|
kite-exp/ml/web-content/sources/data-pipeline/so_dumps_processor/parse_dumps.py
|
kiteco/kiteco-public
|
74aaf5b9b0592153b92f7ed982d65e15eea885e3
|
[
"BSD-3-Clause"
] | 1
|
2022-01-13T14:28:47.000Z
|
2022-01-13T14:28:47.000Z
|
kite-exp/ml/web-content/sources/data-pipeline/so_dumps_processor/parse_dumps.py
|
kiteco/kiteco-public
|
74aaf5b9b0592153b92f7ed982d65e15eea885e3
|
[
"BSD-3-Clause"
] | 7
|
2022-01-07T03:58:10.000Z
|
2022-03-24T07:38:20.000Z
|
import xml.sax
from xml.sax import ContentHandler
import json
import os
from datetime import date, datetime
class SOPostHandler(ContentHandler):
def __init__(self):
self.python_posts = {}
self.counter = 0
self.python_counter = 0
def startElement(self, name, attrs):
self.counter += 1
if self.counter % 1000 == 0:
print("Parsed tag {}, and python posts {}".format(self.counter, self.python_counter))
if name != "row":
return
if attrs["PostTypeId"] != "1":
return
if "<python>" not in attrs["Tags"]:
return
self.python_counter += 1
post_id = int(attrs["Id"])
post = {
"PostID": post_id,
"Score": int(attrs["Score"]),
"ViewCount": int(attrs["ViewCount"]),
"Title": attrs["Title"],
"Tags": attrs["Tags"][1:-1].split("><"),
"AnswerCount": attrs["AnswerCount"],
"CommentCount": attrs["CommentCount"],
}
if "FavoriteCount" in attrs:
post["FavoriteCount"] = attrs["FavoriteCount"]
self.python_posts[post_id] = post
class SOPostExtractor(ContentHandler):
def __init__(self):
self.counter = 0
self.python_counter = 0
def startElement(self, name, attrs):
self.counter += 1
if self.counter % 100000 == 0:
print("Parsed tag {}, and python posts {}".format(self.counter, self.python_counter))
if name != "row":
return
if attrs["PostTypeId"] != "1":
return
post_id = int(attrs["Id"])
path = "/home/moe/workspace/web-content/questions_queries/{}.csv".format(post_id)
if not os.path.exists(path):
return
self.python_counter += 1
target_path = "/home/moe/workspace/web-content/questions_content/{}.txt".format(post_id)
with open(target_path, "w") as outfile:
outfile.write(attrs["Body"])
def _parse_so_dump(target):
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, 0)
handler = SOPostHandler()
parser.setContentHandler(handler)
parser.parse(target)
return handler.python_posts
def _add_view_count(recent_data, old_data, day_diff, normal_day_count=30):
for k in recent_data:
old_count = 0
if k in old_data:
old_count = old_count[k]["ViewCount"]
new_count = recent_data[k]["ViewCount"]
recent_data[k]["newViews"] = int((new_count - old_count)*normal_day_count/day_diff)
def extract_SO_meta_informations(so_dump_file, so_dump_date, target_file, so_dump_previous_month=None, so_dump_previous_month_date=None):
"""
Parse SO dump file and extract the meta information of posts
If a second dump is provided, the delta of the number of post is
:param so_dump_file: File to parse to extract post meta information
:param so_dump_date: Date of the first dump (format YYYY/MM/DD)
:param target_file: File to write the result (json dict format)
:param so_dump_previous_month: Optional second file to parse to get a delta of view count
:param day_diff: Number of day to normalize the view count delta (normalized to 30 days)
"""
recent_data = _parse_so_dump(so_dump_file)
if so_dump_previous_month:
if not so_dump_previous_month_date:
raise ValueError("Please provide the date of the previous SO dump. Can't normalize the view count without it")
date_format = "%Y/%m/%d"
recent = datetime.strptime(so_dump_date, date_format)
old = datetime.strptime(so_dump_previous_month_date, date_format)
delta_day = (recent - old).days
old_data = _parse_so_dump(so_dump_previous_month)
_add_view_count(recent_data, old_data, delta_day)
with open(target_file, "w") as outfile:
json.dump(outfile, recent_data)
if ( __name__ == "__main__"):
SO_dump_march_4th = "/data/kite/SO_dumps/Posts_march-2019.xml"
# with open("/data/kite/SO_dumps/python_posts_april.json", "w") as outfile:
# json.dump(handler.python_posts, outfile)
| 34.791667
| 137
| 0.643353
|
8f513b1637995603f5024e5bf33503f2c5a8c57b
| 5,175
|
py
|
Python
|
laygo/generators/splash/BagModules/adc_sar_templates_fdsoi/adc_retimer_slice.py
|
tinapiao/Software-IC-Automation
|
74b23cd94aa6e4658b110e93b5deb635e014f3a6
|
[
"BSD-3-Clause"
] | 26
|
2017-07-07T08:06:31.000Z
|
2021-11-25T06:41:24.000Z
|
laygo/generators/splash/BagModules/adc_sar_templates_fdsoi/adc_retimer_slice.py
|
tinapiao/Software-IC-Automation
|
74b23cd94aa6e4658b110e93b5deb635e014f3a6
|
[
"BSD-3-Clause"
] | 9
|
2016-12-28T03:08:29.000Z
|
2019-01-30T16:00:28.000Z
|
laygo/generators/splash/BagModules/adc_sar_templates_fdsoi/adc_retimer_slice.py
|
tinapiao/Software-IC-Automation
|
74b23cd94aa6e4658b110e93b5deb635e014f3a6
|
[
"BSD-3-Clause"
] | 10
|
2018-07-14T01:31:28.000Z
|
2021-08-21T10:18:30.000Z
|
# -*- coding: utf-8 -*-
########################################################################################################################
#
# Copyright (c) 2014, Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################################################################################
from __future__ import (absolute_import, division,
print_function, unicode_literals)
# noinspection PyUnresolvedReferences,PyCompatibility
from builtins import *
import os
import pkg_resources
from bag.design import Module
yaml_file = pkg_resources.resource_filename(__name__, os.path.join('netlist_info', 'adc_retimer_slice.yaml'))
# noinspection PyPep8Naming
class adc_sar_templates__adc_retimer_slice(Module):
"""Module for library adc_sar_templates cell adc_retimer_slice.
Fill in high level description here.
"""
def __init__(self, bag_config, parent=None, prj=None, **kwargs):
Module.__init__(self, bag_config, yaml_file, parent=parent, prj=prj, **kwargs)
def design(self, lch, pw, nw, m, num_bits, device_intent='fast'):
"""To be overridden by subclasses to design this module.
This method should fill in values for all parameters in
self.parameters. To design instances of this module, you can
call their design() method or any other ways you coded.
To modify schematic structure, call:
rename_pin()
delete_instance()
replace_instance_master()
reconnect_instance_terminal()
restore_instance()
array_instance()
"""
for pn, p in zip(['lch', 'pw', 'nw', 'm', 'num_bits', 'device_intent'],
[lch, pw, nw, m, num_bits, device_intent]):
self.parameters[pn]=p
self.array_instance('ILATCH0', ['ILATCH0<%d:0>'%(num_bits-1)],
term_list=[{'I':'in<%d:0>'%(num_bits-1), 'O':'int0<%d:0>'%(num_bits-1)}])
self.instances['ILATCH0'][0].design(lch=lch, pw=pw, nw=nw, m=m, device_intent=device_intent)
self.array_instance('ILATCH1', ['ILATCH1<%d:0>'%(num_bits-1)],
term_list=[{'I':'int0<%d:0>'%(num_bits-1), 'O':'int1<%d:0>'%(num_bits-1)}])
self.instances['ILATCH1'][0].design(lch=lch, pw=pw, nw=nw, m=m, device_intent=device_intent)
self.array_instance('ILATCH2', ['ILATCH2<%d:0>'%(num_bits-1)],
term_list=[{'I':'int1<%d:0>'%(num_bits-1), 'O':'out<%d:0>'%(num_bits-1)}])
self.instances['ILATCH2'][0].design(lch=lch, pw=pw, nw=nw, m=m, device_intent=device_intent)
self.rename_pin('in', 'in<%d:0>'%(num_bits-1))
self.rename_pin('out', 'out<%d:0>'%(num_bits-1))
def get_layout_params(self, **kwargs):
"""Returns a dictionary with layout parameters.
This method computes the layout parameters used to generate implementation's
layout. Subclasses should override this method if you need to run post-extraction
layout.
Parameters
----------
kwargs :
any extra parameters you need to generate the layout parameters dictionary.
Usually you specify layout-specific parameters here, like metal layers of
input/output, customizable wire sizes, and so on.
Returns
-------
params : dict[str, any]
the layout parameters dictionary.
"""
return {}
def get_layout_pin_mapping(self):
"""Returns the layout pin mapping dictionary.
This method returns a dictionary used to rename the layout pins, in case they are different
than the schematic pins.
Returns
-------
pin_mapping : dict[str, str]
a dictionary from layout pin names to schematic pin names.
"""
return {}
| 43.487395
| 120
| 0.635169
|
80daff00ebec19bca41f9037bcf2dfccb434c8b2
| 8,089
|
py
|
Python
|
sdk/python/setup.py
|
vas28r13/feast
|
1ba86fb0cc7f2e86b8c70477462faa68075f99cd
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/setup.py
|
vas28r13/feast
|
1ba86fb0cc7f2e86b8c70477462faa68075f99cd
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/setup.py
|
vas28r13/feast
|
1ba86fb0cc7f2e86b8c70477462faa68075f99cd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import re
import shutil
import subprocess
import pathlib
from distutils.cmd import Command
from setuptools import find_packages
try:
from setuptools import setup
from setuptools.command.install import install
from setuptools.command.develop import develop
from setuptools.command.egg_info import egg_info
from setuptools.command.sdist import sdist
from setuptools.command.build_py import build_py
except ImportError:
from distutils.core import setup
from distutils.command.install import install
from distutils.command.build_py import build_py
NAME = "feast"
DESCRIPTION = "Python SDK for Feast"
URL = "https://github.com/feast-dev/feast"
AUTHOR = "Feast"
REQUIRES_PYTHON = ">=3.7.0"
REQUIRED = [
"Click>=7.*",
"colorama>=0.3.9",
"dill==0.3.*",
"fastavro>=1.1.0",
"google-api-core>=1.23.0",
"googleapis-common-protos==1.52.*",
"grpcio>=1.34.0",
"grpcio-reflection>=1.34.0",
"Jinja2>=2.0.0",
"jsonschema",
"mmh3",
"pandas>=1.0.0",
"pandavro==1.5.*",
"protobuf>=3.10",
"proto-plus<1.19.7",
"pyarrow>=4.0.0",
"pydantic>=1.0.0",
"PyYAML>=5.4.*",
"tabulate==0.8.*",
"tenacity>=7.*",
"toml==0.10.*",
"tqdm==4.*",
"fastapi>=0.68.0",
"uvicorn[standard]>=0.14.0",
"proto-plus<1.19.7",
"tensorflow-metadata>=1.0.0,<2.0.0",
"dask>=2021.*,<2022.02.0",
]
GCP_REQUIRED = [
"google-cloud-bigquery>=2.28.1",
"google-cloud-bigquery-storage >= 2.0.0",
"google-cloud-datastore>=2.1.*",
"google-cloud-storage>=1.34.*,<1.41",
"google-cloud-core>=1.4.0,<2.0.0",
]
REDIS_REQUIRED = [
"redis==3.5.3",
"redis-py-cluster>=2.1.3",
"hiredis>=2.0.0",
]
AWS_REQUIRED = [
"boto3>=1.17.0",
"docker>=5.0.2",
]
SNOWFLAKE_REQUIRED = [
"snowflake-connector-python[pandas]>=2.7.3",
]
GE_REQUIRED = [
"great_expectations>=0.14.0,<0.15.0"
]
CI_REQUIRED = (
[
"cryptography==3.3.2",
"flake8",
"black==19.10b0",
"isort>=5",
"grpcio-tools==1.34.0",
"grpcio-testing==1.34.0",
"minio==7.1.0",
"mock==2.0.0",
"moto",
"mypy==0.931",
"mypy-protobuf==3.1.0",
"avro==1.10.0",
"gcsfs",
"urllib3>=1.25.4",
"pytest>=6.0.0",
"pytest-cov",
"pytest-xdist",
"pytest-benchmark>=3.4.1",
"pytest-lazy-fixture==0.6.3",
"pytest-timeout==1.4.2",
"pytest-ordering==0.6.*",
"pytest-mock==1.10.4",
"Sphinx!=4.0.0,<4.4.0",
"sphinx-rtd-theme",
"testcontainers==3.4.2",
"adlfs==0.5.9",
"firebase-admin==4.5.2",
"pre-commit",
"assertpy==1.1",
"pip-tools",
"types-protobuf",
"types-python-dateutil",
"types-pytz",
"types-PyYAML",
"types-redis",
"types-requests",
"types-setuptools",
"types-tabulate",
]
+ GCP_REQUIRED
+ REDIS_REQUIRED
+ AWS_REQUIRED
+ SNOWFLAKE_REQUIRED
+ GE_REQUIRED
)
DEV_REQUIRED = ["mypy-protobuf>=3.1.0", "grpcio-testing==1.*"] + CI_REQUIRED
# Get git repo root directory
repo_root = str(pathlib.Path(__file__).resolve().parent.parent.parent)
# README file from Feast repo root directory
README_FILE = os.path.join(repo_root, "README.md")
with open(README_FILE, "r", encoding="utf8") as f:
LONG_DESCRIPTION = f.read()
# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.
# Regex modified from default tag regex in:
# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9
TAG_REGEX = re.compile(
r"^(?:[\/\w-]+)?(?P<version>[vV]?\d+(?:\.\d+){0,2}[^\+]*)(?:\+.*)?$"
)
# Only set use_scm_version if git executable exists (setting this variable causes pip to use git under the hood)
if shutil.which("git"):
use_scm_version = {"root": "../..", "relative_to": __file__, "tag_regex": TAG_REGEX}
else:
use_scm_version = None
class BuildProtoCommand(Command):
description = "Builds the proto files into python files."
def initialize_options(self):
self.protoc = ["python", "-m", "grpc_tools.protoc"] # find_executable("protoc")
self.proto_folder = os.path.join(repo_root, "protos")
self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')
self.sub_folders = ["core", "serving", "types", "storage"]
def finalize_options(self):
pass
def _generate_protos(self, path):
proto_files = glob.glob(os.path.join(self.proto_folder, path))
subprocess.check_call(self.protoc + [
'-I', self.proto_folder,
'--python_out', self.this_package,
'--grpc_python_out', self.this_package,
'--mypy_out', self.this_package] + proto_files)
def run(self):
for sub_folder in self.sub_folders:
self._generate_protos(f'feast/{sub_folder}/*.proto')
from pathlib import Path
for path in Path('feast/protos').rglob('*.py'):
for folder in self.sub_folders:
# Read in the file
with open(path, 'r') as file:
filedata = file.read()
# Replace the target string
filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')
# Write the file out again
with open(path, 'w') as file:
file.write(filedata)
class BuildCommand(build_py):
"""Custom build command."""
def run(self):
self.run_command('build_proto')
build_py.run(self)
class DevelopCommand(develop):
"""Custom develop command."""
def run(self):
self.run_command('build_proto')
develop.run(self)
setup(
name=NAME,
author=AUTHOR,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=("tests",)),
install_requires=REQUIRED,
# https://stackoverflow.com/questions/28509965/setuptools-development-requirements
# Install dev requirements with: pip install -e .[dev]
extras_require={
"dev": DEV_REQUIRED,
"ci": CI_REQUIRED,
"gcp": GCP_REQUIRED,
"aws": AWS_REQUIRED,
"redis": REDIS_REQUIRED,
"snowflake": SNOWFLAKE_REQUIRED,
"ge": GE_REQUIRED,
},
include_package_data=True,
license="Apache",
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
entry_points={"console_scripts": ["feast=feast.cli:cli"]},
use_scm_version=use_scm_version,
setup_requires=["setuptools_scm", "grpcio", "grpcio-tools==1.34.0", "mypy-protobuf==3.1.0", "sphinx!=4.0.0"],
package_data={
"": [
"protos/feast/**/*.proto",
"protos/feast/third_party/grpc/health/v1/*.proto",
"feast/protos/feast/**/*.py",
],
},
cmdclass={
"build_proto": BuildProtoCommand,
"build_py": BuildCommand,
"develop": DevelopCommand,
},
)
| 29.414545
| 118
| 0.611324
|
86f8ca1b13eda43e23f2dfdcb9df9eb0bc89759f
| 9,153
|
py
|
Python
|
scripts/cyp2e1/hg38/bin/sv_modules.py
|
SBIMB/StellarPGx
|
42d4a34a26d2b4716a77aa889092f3a8fdb8a722
|
[
"MIT"
] | 9
|
2021-01-28T08:32:37.000Z
|
2022-01-19T22:55:13.000Z
|
scripts/cyp2e1/hg38/bin/sv_modules.py
|
SBIMB/CypGen
|
7746066f7f29d1339d67b6327ea92ca0cd1d45f4
|
[
"MIT"
] | 11
|
2021-05-10T11:27:32.000Z
|
2022-03-31T18:38:12.000Z
|
scripts/cyp2e1/hg38/bin/sv_modules.py
|
SBIMB/CypGen
|
7746066f7f29d1339d67b6327ea92ca0cd1d45f4
|
[
"MIT"
] | 3
|
2021-06-29T12:25:52.000Z
|
2021-10-05T21:41:17.000Z
|
#!/usr/bin/env python3
import os
import sys
import math
def get_total_CN(cov_file):
all_reg =[]
for line in open(cov_file, "r"):
line = line.strip().split()
all_reg.append(line)
av_2e1_cov = float(all_reg[0][3])/(float(all_reg[0][2]) - float(all_reg[0][1]))
av_vdr_cov = float(all_reg[1][3])/(float(all_reg[1][2]) - float(all_reg[1][1]))
av_egfr_cov = float(all_reg[2][3])/(float(all_reg[2][2]) - float(all_reg[2][1]))
# av_e1_int4 = float(all_reg[3][3])/(float(all_reg[3][2]) - float(all_reg[3][1]))
# av_int4_e9 = float(all_reg[4][3])/(float(all_reg[4][2]) - float(all_reg[4][1]))
av_ctrl_cov = (av_vdr_cov + av_egfr_cov)/2
comp_av = av_2e1_cov/av_ctrl_cov
temp_cn = 2 * comp_av
total_cn = round(temp_cn)
return [str(int(total_cn)), round(av_2e1_cov), round(av_ctrl_cov)]; # , str(av_e1_int4), str(av_int4_e9)];
def del_test(sv_del):
if os.stat(sv_del).st_size == 0:
return "None"
else:
for line in open(sv_del, "r"):
if "COVERAGE" in line:
line = line.strip().split()
ABHom = line[-1]
ABHet = line[-2]
GT = line[2]
DP = int(line[3])
if float(ABHom) == 1.0:
return "*(full_gene_del)/*(full_gene_del)"
elif float(ABHom) == -1.0:
return "*(full_gene_del)"
else:
pass
hap_adv_list = []
hap_t1 = []
def del_adv_test(hap_dbs, cand_allele1, cand_allele2, test_allele1, test_allele2, core_vars):
g = open(hap_dbs, "r")
for line in g:
line = line.strip().split()
hap_adv_list.append(line)
a1 = core_vars.split(";")
for i in a1:
if i[-3:] == "0/1":
hap_t1.append(i[:-4])
for elem in hap_adv_list:
if elem[1] == cand_allele1:
list_t1 = (elem[2]).split(';')
if elem[1] == cand_allele2:
list_t2 = (elem[2]).split(';')
if hap_t1[0] in list_t1:
return test_allele1
elif hap_t1[0] in list_t2:
return test_allele2
het_hom_list = []
het_hom_list_new = []
def dup_test_init(sv_dup, av_cov):
for line in open(sv_dup, "r"):
if "COVERAGE" in line:
continue
elif "AGGREGATED" in line:
continue
else:
fields = line.strip().split()
het_hom_list.append(fields)
test_list1 = []
for i in het_hom_list:
test_list1.append(int(i[2]))
av_read_cov = sum(test_list1)/len(test_list1)
norm_cov = (av_cov + av_read_cov)/2
for i in het_hom_list:
supp_reads = round(float(i[-2])*int(i[2]))
i.append(round(supp_reads/norm_cov, 3))
i.append(supp_reads)
het_hom_list_new.append(i)
return (het_hom_list_new)
hap_def_list = []
allele_cn_list = []
def dup_test_cn_3_4(sv_dup, hap_dbs, cand_allele1, cand_allele2, test_allele1, test_allele2, c_num, av_cov, in_list):
g = open(hap_dbs, "r")
for line in g:
line = line.strip().split()
hap_def_list.append(line)
test_list1 = []
test_list2 = []
het_list = []
for i in in_list:
if i[1] == "0/1":
het_list.append(i)
for i in het_list:
test_list1.append(i[0])
test_list2.append(i[-2])
max_het = max(test_list2)
max_het_pos = test_list2.index(max_het)
var = test_list1[max_het_pos]
for elem in hap_def_list:
if elem[1] == cand_allele1:
list_3t = elem
list_3t_2 = list_3t[2].split(';')
l3 = len(list_3t_2)
if elem[1] == cand_allele2:
list_4t = elem
list_4t_2 = list_4t[2].split(';')
l4 = len(list_4t_2)
hdb_list = list_3t_2 + list_4t_2
index_var = hdb_list.index(var)
if index_var < l3:
allele_cn_list.append(test_allele1)
allele_cn_list.append(int(round(max_het*int(c_num))))
elif index_var >= l3:
allele_cn_list.append(test_allele2)
allele_cn_list.append(int(round(max_het*int(c_num))))
if allele_cn_list[0] == test_allele1:
rt_2 = int(c_num) - allele_cn_list[1]
allele_cn_list.append(test_allele2)
allele_cn_list.append(rt_2)
elif allele_cn_list[0] == test_allele2:
rt_2 = int(c_num) - allele_cn_list[1]
allele_cn_list.append(test_allele1)
allele_cn_list.append(rt_2)
if allele_cn_list[1] == 0:
res_dip = allele_cn_list[0] + "/" + allele_cn_list[2] + "x" + str(allele_cn_list[3] - 1)
elif allele_cn_list[3] == 0:
res_dip = allele_cn_list[2] + "/" + allele_cn_list[0] + "x" + str(allele_cn_list[1] - 1)
elif allele_cn_list[1] == 1:
res_dip = allele_cn_list[0] + "/" + allele_cn_list[2] + "x" + str(allele_cn_list[3])
elif allele_cn_list[3] == 1:
res_dip = allele_cn_list[2] + "/" + allele_cn_list[0] + "x" + str(allele_cn_list[1])
elif allele_cn_list[1] == 2:
res_dip = allele_cn_list[0] + "x2" + "/" + allele_cn_list[2] + "x" + str(allele_cn_list[3])
elif allele_cn_list[3] == 2:
res_dip = allele_cn_list[2] + "x2" + "/" + allele_cn_list[0] + "x" + str(allele_cn_list[1])
else:
res_dip = 'check'
return res_dip
def dup_test_cn_n(sv_dup, hap_dbs, cand_allele1, cand_allele2, test_allele1, test_allele2, c_num, av_cov, in_list):
g = open(hap_dbs, "r")
for line in g:
line = line.strip().split()
hap_def_list.append(line)
test_list1 = []
test_list2 = []
het_list = []
for i in in_list:
if i[1] == "0/1":
het_list.append(i)
for i in het_list:
test_list1.append(i[0])
test_list2.append(i[-2])
max_het = max(test_list2)
max_het_pos = test_list2.index(max_het)
var = test_list1[max_het_pos]
for elem in hap_def_list:
if elem[1] == cand_allele1:
list_3t = elem
list_3t_2 = list_3t[2].split(';')
l3 = len(list_3t_2)
if elem[1] == cand_allele2:
list_4t = elem
list_4t_2 = list_4t[2].split(';')
l4 = len(list_4t_2)
hdb_list = list_3t_2 + list_4t_2
index_var = hdb_list.index(var)
if index_var < l3:
allele_cn_list.append(test_allele1)
allele_cn_list.append(int(round(max_het*int(c_num)-0.15)))
elif index_var >= l3:
allele_cn_list.append(test_allele2)
allele_cn_list.append(int(round(max_het*int(c_num)-0.15)))
if allele_cn_list[0] == test_allele1:
rt_2 = int(c_num) - allele_cn_list[1]
allele_cn_list.append(test_allele2)
allele_cn_list.append(rt_2)
elif allele_cn_list[0] == test_allele2:
rt_2 = int(c_num) - allele_cn_list[1]
allele_cn_list.append(test_allele1)
allele_cn_list.append(rt_2)
if allele_cn_list[1] == 0:
res_dip = allele_cn_list[0] + "/" + allele_cn_list[2] + "x" + str(allele_cn_list[3] - 1)
elif allele_cn_list[3] == 0:
res_dip = allele_cn_list[2] + "/" + allele_cn_list[0] + "x" + str(allele_cn_list[1] - 1)
elif allele_cn_list[1] == 1:
res_dip = allele_cn_list[0] + "/" + allele_cn_list[2] + "x" + str(allele_cn_list[3])
elif allele_cn_list[3] == 1:
res_dip = allele_cn_list[2] + "/" + allele_cn_list[0] + "x" + str(allele_cn_list[1])
elif allele_cn_list[1] == 2:
res_dip = allele_cn_list[0] + "x2" + "/" + allele_cn_list[2] + "x" + str(allele_cn_list[3])
elif allele_cn_list[3] == 2:
res_dip = allele_cn_list[2] + "x2" + "/" + allele_cn_list[0] + "x" + str(allele_cn_list[1])
elif allele_cn_list[1] == 3:
res_dip = allele_cn_list[0] + "x3" + "/" + allele_cn_list[2] + "x" + str(allele_cn_list[3])
elif allele_cn_list[3] == 3:
res_dip = allele_cn_list[2] + "x3" + "/" + allele_cn_list[0] + "x" + str(allele_cn_list[1])
elif allele_cn_list[1] == 4:
res_dip = allele_cn_list[0] + "x4" + "/" + allele_cn_list[2] + "x" + str(allele_cn_list[3])
elif allele_cn_list[3] == 4:
res_dip = allele_cn_list[2] + "x4" + "/" + allele_cn_list[0] + "x" + str(allele_cn_list[1])
else:
res_dip = 'check'
return res_dip
# def hybrid_29_test1(cov_e1_int4, cov_int4_e9):
# if 0.85 < float(cov_e1_int4)/float(cov_int4_e9) < 1.2:
# return 'norm_var'
# elif 0.45 < float(cov_e1_int4)/float(cov_int4_e9) < 0.75:
# return 'hyb_29'
# elif float(cov_e1_int4)/float(cov_int4_e9) < 0.15:
# return 'hyb_29_2'
# else:
# return 'norm_var'
# def hybrid_30_test1(cov_e1_int4, cov_int4_e9):
# if 0.85 < float(cov_e1_int4)/float(cov_int4_e9) < 1.2:
# return 'norm_var'
# elif 0.45 < float(cov_int4_e9)/float(cov_e1_int4) < 0.75:
# return 'hyb_30'
# elif float(cov_int4_e9)/float(cov_e1_int4) < 0.15:
# return 'hyb_30_2'
# else:
# return 'norm_var'
| 27.569277
| 161
| 0.575112
|
d4b1c025d92ba032c7c675ca3f35f6ae44a9993e
| 363
|
py
|
Python
|
app/pokemon/migrations/0008_auto_20201115_0031.py
|
innacroft/PokemonService
|
3dade01c3fe5d5bc56ff631f69a5548fafe4d076
|
[
"MIT"
] | null | null | null |
app/pokemon/migrations/0008_auto_20201115_0031.py
|
innacroft/PokemonService
|
3dade01c3fe5d5bc56ff631f69a5548fafe4d076
|
[
"MIT"
] | null | null | null |
app/pokemon/migrations/0008_auto_20201115_0031.py
|
innacroft/PokemonService
|
3dade01c3fe5d5bc56ff631f69a5548fafe4d076
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.7 on 2020-11-15 00:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pokemon', '0007_auto_20201115_0028'),
]
operations = [
migrations.RenameField(
model_name='pokemon',
old_name='width',
new_name='weight',
),
]
| 19.105263
| 47
| 0.584022
|
64753d5bb822515cbaaafe3388e0a9298e02baca
| 4,989
|
py
|
Python
|
testscripts/RDKB/component/CMHAL/TS_CMHAL_GetConfigFileName.py
|
cablelabs/tools-tdkb
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/CMHAL/TS_CMHAL_GetConfigFileName.py
|
cablelabs/tools-tdkb
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/CMHAL/TS_CMHAL_GetConfigFileName.py
|
cablelabs/tools-tdkb
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
[
"Apache-2.0"
] | null | null | null |
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>1</version>
<name>TS_CMHAL_GetConfigFileName</name>
<primitive_test_id/>
<primitive_test_name>CMHAL_GetParamCharValue</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>To check whether the ConfigFileName of docsis is getting updated properly</synopsis>
<groups_id/>
<execution_time>1</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_CMHAL_10</test_case_id>
<test_objective>To check whether the ConfigFileName of docsis is getting updated properly</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state of DUT that includes component under test Cable Modem
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>docsis_GetDOCSISInfo</api_or_interface_used>
<input_parameters>paramName : "ConfigFileName"</input_parameters>
<automation_approch>1. Load cmhal module
2. From script invoke CMHAL_GetParamCharValue()
3. The buffer is already filled with an invalid value (invalid.configfilename). So check whether the config file name is getting updated in the buffer successfully.
4. Validation of the result is done within the stub and send the result status to Test Manager.
5.Test Manager will publish the result in GUI as PASS/FAILURE based on the response from TAD stub.</automation_approch>
<except_output>The config file name should be retrieved successfully</except_output>
<priority>High</priority>
<test_stub_interface>CosaCM</test_stub_interface>
<test_script>TS_CMHAL_GetConfigFileName</test_script>
<skipped>No</skipped>
<release_version/>
<remarks/>
</test_cases>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("cmhal","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_CMHAL_GetConfigFileName');
#Get the result of connection with test component and DUT
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus ;
if "SUCCESS" in loadmodulestatus.upper():
obj.setLoadModuleStatus("SUCCESS");
#Script to load the configuration file of the component
tdkTestObj = obj.createTestStep("CMHAL_GetParamCharValue");
tdkTestObj.addParameter("paramName","ConfigFileName");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
ConfigFileName = tdkTestObj.getResultDetails();
if expectedresult in actualresult and ConfigFileName != " ":
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the ConfigFileName";
print "EXPECTED RESULT 1: Should get the ConfigFileName successfully";
print "ACTUAL RESULT 1: The config filename is %s" %ConfigFileName;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get the ConfigFileName";
print "EXPECTED RESULT 1: Should get the ConfigFileName successfully";
print "ACTUAL RESULT 1: Failed to get get the config file name, Details %s" %ConfigFileName;
print "[TEST EXECUTION RESULT] : FAILURE";
obj.unloadModule("cmhal");
else:
print "Failed to load the module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
| 43.763158
| 165
| 0.707958
|
61c598c13b636a8c209aab715a75e43fc4d1e886
| 793
|
py
|
Python
|
data_structures/python/dp/coin_exchange_dp.py
|
minicloudsky/leetcode_solutions
|
c9c1a87a61c2867bd1f7015f0ebc4acedde3a469
|
[
"MIT"
] | null | null | null |
data_structures/python/dp/coin_exchange_dp.py
|
minicloudsky/leetcode_solutions
|
c9c1a87a61c2867bd1f7015f0ebc4acedde3a469
|
[
"MIT"
] | null | null | null |
data_structures/python/dp/coin_exchange_dp.py
|
minicloudsky/leetcode_solutions
|
c9c1a87a61c2867bd1f7015f0ebc4acedde3a469
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
"""
给定 amount 金额和coins数组的硬币,
求凑成amount金额的钱,最少需要多少枚硬币
eg.
amount = 11
coins = [1,2,5]
输出 3 (5元 *2 + 1元 *1)
"""
def exchange(amount,coins):
"""初始化每个值为 amount+1,这样当最终求得的 dp[amount]
为 amount+1时候,说明无解.
dp数组表示 i 元钱最少需要 dp[i]个硬币
"""
dp = [amount+1 for x in range(amount+1)]
# 0 硬币本来就没有,所以设置成 0
dp[0] = 0
for i in range(amount+1):
for j in range(len(coins)):
# i 是金额,coins[i] 是硬币,当金额比当前硬币面值小,肯定是换不了的
if i >=coins[j]:
dp[i] = min(dp[i - coins[j]] + 1,dp[i])
if dp[amount] == amount+1:
return -1
return dp[amount]
if __name__ == '__main__':
amount = 1111
coins = [1,2,5]
result = exchange(amount,coins)
print("result: {}".format(result))
| 22.657143
| 55
| 0.558638
|
5f1ba596e7c80dfd44da7d100715320f2dcb3fed
| 45,104
|
py
|
Python
|
learning.py
|
mikksillaste/aima-python
|
cf7b90bccf10e682f073bb5dbdc2056be6f4eb40
|
[
"MIT"
] | null | null | null |
learning.py
|
mikksillaste/aima-python
|
cf7b90bccf10e682f073bb5dbdc2056be6f4eb40
|
[
"MIT"
] | null | null | null |
learning.py
|
mikksillaste/aima-python
|
cf7b90bccf10e682f073bb5dbdc2056be6f4eb40
|
[
"MIT"
] | null | null | null |
"""Learn to estimate functions from examples. (Chapters 18, 20)"""
from utils import (
removeall, unique, product, mode, argmax, argmax_random_tie, isclose, gaussian,
dotproduct, vector_add, scalar_vector_product, weighted_sample_with_replacement,
weighted_sampler, num_or_str, normalize, clip, sigmoid, print_table,
open_data, sigmoid_derivative, probability, norm, matrix_multiplication
)
import copy
import heapq
import math
import random
from statistics import mean, stdev
from collections import defaultdict
# ______________________________________________________________________________
def euclidean_distance(X, Y):
return math.sqrt(sum([(x - y)**2 for x, y in zip(X, Y)]))
def rms_error(X, Y):
return math.sqrt(ms_error(X, Y))
def ms_error(X, Y):
return mean([(x - y)**2 for x, y in zip(X, Y)])
def mean_error(X, Y):
return mean([abs(x - y) for x, y in zip(X, Y)])
def manhattan_distance(X, Y):
return sum([abs(x - y) for x, y in zip(X, Y)])
def mean_boolean_error(X, Y):
return mean(int(x != y) for x, y in zip(X, Y))
def hamming_distance(X, Y):
return sum(x != y for x, y in zip(X, Y))
# ______________________________________________________________________________
class DataSet:
"""A data set for a machine learning problem. It has the following fields:
d.examples A list of examples. Each one is a list of attribute values.
d.attrs A list of integers to index into an example, so example[attr]
gives a value. Normally the same as range(len(d.examples[0])).
d.attrnames Optional list of mnemonic names for corresponding attrs.
d.target The attribute that a learning algorithm will try to predict.
By default the final attribute.
d.inputs The list of attrs without the target.
d.values A list of lists: each sublist is the set of possible
values for the corresponding attribute. If initially None,
it is computed from the known examples by self.setproblem.
If not None, an erroneous value raises ValueError.
d.distance A function from a pair of examples to a nonnegative number.
Should be symmetric, etc. Defaults to mean_boolean_error
since that can handle any field types.
d.name Name of the data set (for output display only).
d.source URL or other source where the data came from.
d.exclude A list of attribute indexes to exclude from d.inputs. Elements
of this list can either be integers (attrs) or attrnames.
Normally, you call the constructor and you're done; then you just
access fields like d.examples and d.target and d.inputs."""
def __init__(self, examples=None, attrs=None, attrnames=None, target=-1,
inputs=None, values=None, distance=mean_boolean_error,
name='', source='', exclude=()):
"""Accepts any of DataSet's fields. Examples can also be a
string or file from which to parse examples using parse_csv.
Optional parameter: exclude, as documented in .setproblem().
>>> DataSet(examples='1, 2, 3')
<DataSet(): 1 examples, 3 attributes>
"""
self.name = name
self.source = source
self.values = values
self.distance = distance
if values is None:
self.got_values_flag = False
else:
self.got_values_flag = True
# Initialize .examples from string or list or data directory
if isinstance(examples, str):
self.examples = parse_csv(examples)
elif examples is None:
self.examples = parse_csv(open_data(name + '.csv').read())
else:
self.examples = examples
# Attrs are the indices of examples, unless otherwise stated.
if attrs is None and self.examples is not None:
attrs = list(range(len(self.examples[0])))
self.attrs = attrs
# Initialize .attrnames from string, list, or by default
if isinstance(attrnames, str):
self.attrnames = attrnames.split()
else:
self.attrnames = attrnames or attrs
self.setproblem(target, inputs=inputs, exclude=exclude)
def setproblem(self, target, inputs=None, exclude=()):
"""Set (or change) the target and/or inputs.
This way, one DataSet can be used multiple ways. inputs, if specified,
is a list of attributes, or specify exclude as a list of attributes
to not use in inputs. Attributes can be -n .. n, or an attrname.
Also computes the list of possible values, if that wasn't done yet."""
self.target = self.attrnum(target)
exclude = list(map(self.attrnum, exclude))
if inputs:
self.inputs = removeall(self.target, inputs)
else:
self.inputs = [a for a in self.attrs
if a != self.target and a not in exclude]
if not self.values:
self.update_values()
self.check_me()
def check_me(self):
"""Check that my fields make sense."""
assert len(self.attrnames) == len(self.attrs)
assert self.target in self.attrs
assert self.target not in self.inputs
assert set(self.inputs).issubset(set(self.attrs))
if self.got_values_flag:
# only check if values are provided while initializing DataSet
list(map(self.check_example, self.examples))
def add_example(self, example):
"""Add an example to the list of examples, checking it first."""
self.check_example(example)
self.examples.append(example)
def check_example(self, example):
"""Raise ValueError if example has any invalid values."""
if self.values:
for a in self.attrs:
if example[a] not in self.values[a]:
raise ValueError('Bad value {} for attribute {} in {}'
.format(example[a], self.attrnames[a], example))
def attrnum(self, attr):
"""Returns the number used for attr, which can be a name, or -n .. n-1."""
if isinstance(attr, str):
return self.attrnames.index(attr)
elif attr < 0:
return len(self.attrs) + attr
else:
return attr
def update_values(self):
self.values = list(map(unique, zip(*self.examples)))
def sanitize(self, example):
"""Return a copy of example, with non-input attributes replaced by None."""
return [attr_i if i in self.inputs else None
for i, attr_i in enumerate(example)]
def classes_to_numbers(self, classes=None):
"""Converts class names to numbers."""
if not classes:
# If classes were not given, extract them from values
classes = sorted(self.values[self.target])
for item in self.examples:
item[self.target] = classes.index(item[self.target])
def remove_examples(self, value=""):
"""Remove examples that contain given value."""
self.examples = [x for x in self.examples if value not in x]
self.update_values()
def split_values_by_classes(self):
"""Split values into buckets according to their class."""
buckets = defaultdict(lambda: [])
target_names = self.values[self.target]
for v in self.examples:
item = [a for a in v if a not in target_names] # Remove target from item
buckets[v[self.target]].append(item) # Add item to bucket of its class
return buckets
def find_means_and_deviations(self):
"""Finds the means and standard deviations of self.dataset.
means : A dictionary for each class/target. Holds a list of the means
of the features for the class.
deviations: A dictionary for each class/target. Holds a list of the sample
standard deviations of the features for the class."""
target_names = self.values[self.target]
feature_numbers = len(self.inputs)
item_buckets = self.split_values_by_classes()
means = defaultdict(lambda: [0 for i in range(feature_numbers)])
deviations = defaultdict(lambda: [0 for i in range(feature_numbers)])
for t in target_names:
# Find all the item feature values for item in class t
features = [[] for i in range(feature_numbers)]
for item in item_buckets[t]:
features = [features[i] + [item[i]] for i in range(feature_numbers)]
# Calculate means and deviations fo the class
for i in range(feature_numbers):
means[t][i] = mean(features[i])
deviations[t][i] = stdev(features[i])
return means, deviations
def __repr__(self):
return '<DataSet({}): {:d} examples, {:d} attributes>'.format(
self.name, len(self.examples), len(self.attrs))
# ______________________________________________________________________________
def parse_csv(input, delim=','):
r"""Input is a string consisting of lines, each line has comma-delimited
fields. Convert this into a list of lists. Blank lines are skipped.
Fields that look like numbers are converted to numbers.
The delim defaults to ',' but '\t' and None are also reasonable values.
>>> parse_csv('1, 2, 3 \n 0, 2, na')
[[1, 2, 3], [0, 2, 'na']]"""
lines = [line for line in input.splitlines() if line.strip()]
return [list(map(num_or_str, line.split(delim))) for line in lines]
# ______________________________________________________________________________
class CountingProbDist:
"""A probability distribution formed by observing and counting examples.
If p is an instance of this class and o is an observed value, then
there are 3 main operations:
p.add(o) increments the count for observation o by 1.
p.sample() returns a random element from the distribution.
p[o] returns the probability for o (as in a regular ProbDist)."""
def __init__(self, observations=[], default=0):
"""Create a distribution, and optionally add in some observations.
By default this is an unsmoothed distribution, but saying default=1,
for example, gives you add-one smoothing."""
self.dictionary = {}
self.n_obs = 0.0
self.default = default
self.sampler = None
for o in observations:
self.add(o)
def add(self, o):
"""Add an observation o to the distribution."""
self.smooth_for(o)
self.dictionary[o] += 1
self.n_obs += 1
self.sampler = None
def smooth_for(self, o):
"""Include o among the possible observations, whether or not
it's been observed yet."""
if o not in self.dictionary:
self.dictionary[o] = self.default
self.n_obs += self.default
self.sampler = None
def __getitem__(self, item):
"""Return an estimate of the probability of item."""
self.smooth_for(item)
return self.dictionary[item] / self.n_obs
# (top() and sample() are not used in this module, but elsewhere.)
def top(self, n):
"""Return (count, obs) tuples for the n most frequent observations."""
return heapq.nlargest(n, [(v, k) for (k, v) in self.dictionary.items()])
def sample(self):
"""Return a random sample from the distribution."""
if self.sampler is None:
self.sampler = weighted_sampler(list(self.dictionary.keys()),
list(self.dictionary.values()))
return self.sampler()
# ______________________________________________________________________________
def PluralityLearner(dataset):
"""A very dumb algorithm: always pick the result that was most popular
in the training data. Makes a baseline for comparison."""
most_popular = mode([e[dataset.target] for e in dataset.examples])
def predict(example):
"""Always return same result: the most popular from the training set."""
return most_popular
return predict
# ______________________________________________________________________________
def NaiveBayesLearner(dataset, continuous=True, simple=False):
if simple:
return NaiveBayesSimple(dataset)
if(continuous):
return NaiveBayesContinuous(dataset)
else:
return NaiveBayesDiscrete(dataset)
def NaiveBayesSimple(distribution):
"""A simple naive bayes classifier that takes as input a dictionary of
CountingProbDist objects and classifies items according to these distributions.
The input dictionary is in the following form:
(ClassName, ClassProb): CountingProbDist"""
target_dist = {c_name: prob for c_name, prob in distribution.keys()}
attr_dists = {c_name: count_prob for (c_name, _), count_prob in distribution.items()}
def predict(example):
"""Predict the target value for example. Calculate probabilities for each
class and pick the max."""
def class_probability(targetval):
attr_dist = attr_dists[targetval]
return target_dist[targetval] * product(attr_dist[a] for a in example)
return argmax(target_dist.keys(), key=class_probability)
return predict
def NaiveBayesDiscrete(dataset):
"""Just count how many times each value of each input attribute
occurs, conditional on the target value. Count the different
target values too."""
target_vals = dataset.values[dataset.target]
target_dist = CountingProbDist(target_vals)
attr_dists = {(gv, attr): CountingProbDist(dataset.values[attr])
for gv in target_vals
for attr in dataset.inputs}
for example in dataset.examples:
targetval = example[dataset.target]
target_dist.add(targetval)
for attr in dataset.inputs:
attr_dists[targetval, attr].add(example[attr])
def predict(example):
"""Predict the target value for example. Consider each possible value,
and pick the most likely by looking at each attribute independently."""
def class_probability(targetval):
return (target_dist[targetval] *
product(attr_dists[targetval, attr][example[attr]]
for attr in dataset.inputs))
return argmax(target_vals, key=class_probability)
return predict
def NaiveBayesContinuous(dataset):
"""Count how many times each target value occurs.
Also, find the means and deviations of input attribute values for each target value."""
means, deviations = dataset.find_means_and_deviations()
target_vals = dataset.values[dataset.target]
target_dist = CountingProbDist(target_vals)
def predict(example):
"""Predict the target value for example. Consider each possible value,
and pick the most likely by looking at each attribute independently."""
def class_probability(targetval):
prob = target_dist[targetval]
for attr in dataset.inputs:
prob *= gaussian(means[targetval][attr], deviations[targetval][attr], example[attr])
return prob
return argmax(target_vals, key=class_probability)
return predict
# ______________________________________________________________________________
def NearestNeighborLearner(dataset, k=1):
"""k-NearestNeighbor: the k nearest neighbors vote."""
def predict(example):
"""Find the k closest items, and have them vote for the best."""
best = heapq.nsmallest(k, ((dataset.distance(e, example), e)
for e in dataset.examples))
return mode(e[dataset.target] for (d, e) in best)
return predict
# ______________________________________________________________________________
def truncated_svd(X, num_val=2, max_iter=1000):
"""Computes the first component of SVD"""
def normalize_vec(X, n = 2):
"""Normalizes two parts (:m and m:) of the vector"""
X_m = X[:m]
X_n = X[m:]
norm_X_m = norm(X_m, n)
Y_m = [x/norm_X_m for x in X_m]
norm_X_n = norm(X_n, n)
Y_n = [x/norm_X_n for x in X_n]
return Y_m + Y_n
def remove_component(X):
"""Removes components of already obtained eigen vectors from X"""
X_m = X[:m]
X_n = X[m:]
for eivec in eivec_m:
coeff = dotproduct(X_m, eivec)
X_m = [x1 - coeff*x2 for x1, x2 in zip(X_m, eivec)]
for eivec in eivec_n:
coeff = dotproduct(X_n, eivec)
X_n = [x1 - coeff*x2 for x1, x2 in zip(X_n, eivec)]
return X_m + X_n
m, n = len(X), len(X[0])
A = [[0 for _ in range(n + m)] for _ in range(n + m)]
for i in range(m):
for j in range(n):
A[i][m + j] = A[m + j][i] = X[i][j]
eivec_m = []
eivec_n = []
eivals = []
for _ in range(num_val):
X = [random.random() for _ in range(m + n)]
X = remove_component(X)
X = normalize_vec(X)
for _ in range(max_iter):
old_X = X
X = matrix_multiplication(A, [[x] for x in X])
X = [x[0] for x in X]
X = remove_component(X)
X = normalize_vec(X)
# check for convergence
if norm([x1 - x2 for x1, x2 in zip(old_X, X)]) <= 1e-10:
break
projected_X = matrix_multiplication(A, [[x] for x in X])
projected_X = [x[0] for x in projected_X]
eivals.append(norm(projected_X, 1)/norm(X, 1))
eivec_m.append(X[:m])
eivec_n.append(X[m:])
return (eivec_m, eivec_n, eivals)
# ______________________________________________________________________________
class DecisionFork:
"""A fork of a decision tree holds an attribute to test, and a dict
of branches, one for each of the attribute's values."""
def __init__(self, attr, attrname=None, default_child=None, branches=None):
"""Initialize by saying what attribute this node tests."""
self.attr = attr
self.attrname = attrname or attr
self.default_child = default_child
self.branches = branches or {}
def __call__(self, example):
"""Given an example, classify it using the attribute and the branches."""
attrvalue = example[self.attr]
if attrvalue in self.branches:
return self.branches[attrvalue](example)
else:
# return default class when attribute is unknown
return self.default_child(example)
def add(self, val, subtree):
"""Add a branch. If self.attr = val, go to the given subtree."""
self.branches[val] = subtree
def display(self, indent=0):
name = self.attrname
print('Test', name)
for (val, subtree) in self.branches.items():
print(' ' * 4 * indent, name, '=', val, '==>', end=' ')
subtree.display(indent + 1)
def __repr__(self):
return ('DecisionFork({0!r}, {1!r}, {2!r})'
.format(self.attr, self.attrname, self.branches))
class DecisionLeaf:
"""A leaf of a decision tree holds just a result."""
def __init__(self, result):
self.result = result
def __call__(self, example):
return self.result
def display(self, indent=0):
print('RESULT =', self.result)
def __repr__(self):
return repr(self.result)
# ______________________________________________________________________________
def DecisionTreeLearner(dataset):
"""[Figure 18.5]"""
target, values = dataset.target, dataset.values
def decision_tree_learning(examples, attrs, parent_examples=()):
if len(examples) == 0:
return plurality_value(parent_examples)
elif all_same_class(examples):
return DecisionLeaf(examples[0][target])
elif len(attrs) == 0:
return plurality_value(examples)
else:
A = choose_attribute(attrs, examples)
tree = DecisionFork(A, dataset.attrnames[A], plurality_value(examples))
for (v_k, exs) in split_by(A, examples):
subtree = decision_tree_learning(
exs, removeall(A, attrs), examples)
tree.add(v_k, subtree)
return tree
def plurality_value(examples):
"""Return the most popular target value for this set of examples.
(If target is binary, this is the majority; otherwise plurality.)"""
popular = argmax_random_tie(values[target],
key=lambda v: count(target, v, examples))
return DecisionLeaf(popular)
def count(attr, val, examples):
"""Count the number of examples that have attr = val."""
return sum(e[attr] == val for e in examples)
def all_same_class(examples):
"""Are all these examples in the same target class?"""
class0 = examples[0][target]
return all(e[target] == class0 for e in examples)
def choose_attribute(attrs, examples):
"""Choose the attribute with the highest information gain."""
return argmax_random_tie(attrs,
key=lambda a: information_gain(a, examples))
def information_gain(attr, examples):
"""Return the expected reduction in entropy from splitting by attr."""
def I(examples):
return information_content([count(target, v, examples)
for v in values[target]])
N = float(len(examples))
remainder = sum((len(examples_i) / N) * I(examples_i)
for (v, examples_i) in split_by(attr, examples))
return I(examples) - remainder
def split_by(attr, examples):
"""Return a list of (val, examples) pairs for each val of attr."""
return [(v, [e for e in examples if e[attr] == v])
for v in values[attr]]
return decision_tree_learning(dataset.examples, dataset.inputs)
def information_content(values):
"""Number of bits to represent the probability distribution in values."""
probabilities = normalize(removeall(0, values))
return sum(-p * math.log2(p) for p in probabilities)
# ______________________________________________________________________________
def RandomForest(dataset, n=5):
"""An ensemble of Decision Trees trained using bagging and feature bagging."""
def data_bagging(dataset, m=0):
"""Sample m examples with replacement"""
n = len(dataset.examples)
return weighted_sample_with_replacement(m or n, dataset.examples, [1]*n)
def feature_bagging(dataset, p=0.7):
"""Feature bagging with probability p to retain an attribute"""
inputs = [i for i in dataset.inputs if probability(p)]
return inputs or dataset.inputs
def predict(example):
print([predictor(example) for predictor in predictors])
return mode(predictor(example) for predictor in predictors)
predictors = [DecisionTreeLearner(DataSet(examples=data_bagging(dataset),
attrs=dataset.attrs,
attrnames=dataset.attrnames,
target=dataset.target,
inputs=feature_bagging(dataset))) for _ in range(n)]
return predict
# ______________________________________________________________________________
# A decision list is implemented as a list of (test, value) pairs.
def DecisionListLearner(dataset):
"""[Figure 18.11]"""
def decision_list_learning(examples):
if not examples:
return [(True, False)]
t, o, examples_t = find_examples(examples)
if not t:
raise Exception
return [(t, o)] + decision_list_learning(examples - examples_t)
def find_examples(examples):
"""Find a set of examples that all have the same outcome under
some test. Return a tuple of the test, outcome, and examples."""
raise NotImplementedError
def passes(example, test):
"""Does the example pass the test?"""
raise NotImplementedError
def predict(example):
"""Predict the outcome for the first passing test."""
for test, outcome in predict.decision_list:
if passes(example, test):
return outcome
predict.decision_list = decision_list_learning(set(dataset.examples))
return predict
# ______________________________________________________________________________
def NeuralNetLearner(dataset, hidden_layer_sizes=[3],
learning_rate=0.01, epochs=100):
"""Layered feed-forward network.
hidden_layer_sizes: List of number of hidden units per hidden layer
learning_rate: Learning rate of gradient descent
epochs: Number of passes over the dataset
"""
i_units = len(dataset.inputs)
o_units = len(dataset.values[dataset.target])
# construct a network
raw_net = network(i_units, hidden_layer_sizes, o_units)
learned_net = BackPropagationLearner(dataset, raw_net,
learning_rate, epochs)
def predict(example):
# Input nodes
i_nodes = learned_net[0]
# Activate input layer
for v, n in zip(example, i_nodes):
n.value = v
# Forward pass
for layer in learned_net[1:]:
for node in layer:
inc = [n.value for n in node.inputs]
in_val = dotproduct(inc, node.weights)
node.value = node.activation(in_val)
# Hypothesis
o_nodes = learned_net[-1]
prediction = find_max_node(o_nodes)
return prediction
return predict
def random_weights(min_value, max_value, num_weights):
return [random.uniform(min_value, max_value) for i in range(num_weights)]
def BackPropagationLearner(dataset, net, learning_rate, epochs):
"""[Figure 18.23] The back-propagation algorithm for multilayer network"""
# Initialise weights
for layer in net:
for node in layer:
node.weights = random_weights(min_value=-0.5, max_value=0.5,
num_weights=len(node.weights))
examples = dataset.examples
'''
As of now dataset.target gives an int instead of list,
Changing dataset class will have effect on all the learners.
Will be taken care of later
'''
o_nodes = net[-1]
i_nodes = net[0]
o_units = len(o_nodes)
idx_t = dataset.target
idx_i = dataset.inputs
n_layers = len(net)
inputs, targets = init_examples(examples, idx_i, idx_t, o_units)
for epoch in range(epochs):
# Iterate over each example
for e in range(len(examples)):
i_val = inputs[e]
t_val = targets[e]
# Activate input layer
for v, n in zip(i_val, i_nodes):
n.value = v
# Forward pass
for layer in net[1:]:
for node in layer:
inc = [n.value for n in node.inputs]
in_val = dotproduct(inc, node.weights)
node.value = node.activation(in_val)
# Initialize delta
delta = [[] for i in range(n_layers)]
# Compute outer layer delta
# Error for the MSE cost function
err = [t_val[i] - o_nodes[i].value for i in range(o_units)]
# The activation function used is the sigmoid function
delta[-1] = [sigmoid_derivative(o_nodes[i].value) * err[i] for i in range(o_units)]
# Backward pass
h_layers = n_layers - 2
for i in range(h_layers, 0, -1):
layer = net[i]
h_units = len(layer)
nx_layer = net[i+1]
# weights from each ith layer node to each i + 1th layer node
w = [[node.weights[k] for node in nx_layer] for k in range(h_units)]
delta[i] = [sigmoid_derivative(layer[j].value) * dotproduct(w[j], delta[i+1])
for j in range(h_units)]
# Update weights
for i in range(1, n_layers):
layer = net[i]
inc = [node.value for node in net[i-1]]
units = len(layer)
for j in range(units):
layer[j].weights = vector_add(layer[j].weights,
scalar_vector_product(
learning_rate * delta[i][j], inc))
return net
def PerceptronLearner(dataset, learning_rate=0.01, epochs=100):
"""Logistic Regression, NO hidden layer"""
i_units = len(dataset.inputs)
o_units = len(dataset.values[dataset.target])
hidden_layer_sizes = []
raw_net = network(i_units, hidden_layer_sizes, o_units)
learned_net = BackPropagationLearner(dataset, raw_net, learning_rate, epochs)
def predict(example):
o_nodes = learned_net[1]
# Forward pass
for node in o_nodes:
in_val = dotproduct(example, node.weights)
node.value = node.activation(in_val)
# Hypothesis
return find_max_node(o_nodes)
return predict
class NNUnit:
"""Single Unit of Multiple Layer Neural Network
inputs: Incoming connections
weights: Weights to incoming connections
"""
def __init__(self, weights=None, inputs=None):
self.weights = []
self.inputs = []
self.value = None
self.activation = sigmoid
def network(input_units, hidden_layer_sizes, output_units):
"""Create Directed Acyclic Network of given number layers.
hidden_layers_sizes : List number of neuron units in each hidden layer
excluding input and output layers
"""
# Check for PerceptronLearner
if hidden_layer_sizes:
layers_sizes = [input_units] + hidden_layer_sizes + [output_units]
else:
layers_sizes = [input_units] + [output_units]
net = [[NNUnit() for n in range(size)]
for size in layers_sizes]
n_layers = len(net)
# Make Connection
for i in range(1, n_layers):
for n in net[i]:
for k in net[i-1]:
n.inputs.append(k)
n.weights.append(0)
return net
def init_examples(examples, idx_i, idx_t, o_units):
inputs = {}
targets = {}
for i in range(len(examples)):
e = examples[i]
# Input values of e
inputs[i] = [e[i] for i in idx_i]
if o_units > 1:
# One-Hot representation of e's target
t = [0 for i in range(o_units)]
t[e[idx_t]] = 1
targets[i] = t
else:
# Target value of e
targets[i] = [e[idx_t]]
return inputs, targets
def find_max_node(nodes):
return nodes.index(argmax(nodes, key=lambda node: node.value))
# ______________________________________________________________________________
def LinearLearner(dataset, learning_rate=0.01, epochs=100):
"""Define with learner = LinearLearner(data); infer with learner(x)."""
idx_i = dataset.inputs
idx_t = dataset.target # As of now, dataset.target gives only one index.
examples = dataset.examples
num_examples = len(examples)
# X transpose
X_col = [dataset.values[i] for i in idx_i] # vertical columns of X
# Add dummy
ones = [1 for _ in range(len(examples))]
X_col = [ones] + X_col
# Initialize random weigts
num_weights = len(idx_i) + 1
w = random_weights(min_value=-0.5, max_value=0.5, num_weights=num_weights)
for epoch in range(epochs):
err = []
# Pass over all examples
for example in examples:
x = [1] + example
y = dotproduct(w, x)
t = example[idx_t]
err.append(t - y)
# update weights
for i in range(len(w)):
w[i] = w[i] + learning_rate * (dotproduct(err, X_col[i]) / num_examples)
def predict(example):
x = [1] + example
return dotproduct(w, x)
return predict
# ______________________________________________________________________________
def EnsembleLearner(learners):
"""Given a list of learning algorithms, have them vote."""
def train(dataset):
predictors = [learner(dataset) for learner in learners]
def predict(example):
return mode(predictor(example) for predictor in predictors)
return predict
return train
# ______________________________________________________________________________
def AdaBoost(L, K):
"""[Figure 18.34]"""
def train(dataset):
examples, target = dataset.examples, dataset.target
N = len(examples)
epsilon = 1. / (2 * N)
w = [1. / N] * N
h, z = [], []
for k in range(K):
h_k = L(dataset, w)
h.append(h_k)
error = sum(weight for example, weight in zip(examples, w)
if example[target] != h_k(example))
# Avoid divide-by-0 from either 0% or 100% error rates:
error = clip(error, epsilon, 1 - epsilon)
for j, example in enumerate(examples):
if example[target] == h_k(example):
w[j] *= error / (1. - error)
w = normalize(w)
z.append(math.log((1. - error) / error))
return WeightedMajority(h, z)
return train
def WeightedMajority(predictors, weights):
"""Return a predictor that takes a weighted vote."""
def predict(example):
return weighted_mode((predictor(example) for predictor in predictors),
weights)
return predict
def weighted_mode(values, weights):
"""Return the value with the greatest total weight.
>>> weighted_mode('abbaa', [1,2,3,1,2])
'b'
"""
totals = defaultdict(int)
for v, w in zip(values, weights):
totals[v] += w
return max(list(totals.keys()), key=totals.get)
# _____________________________________________________________________________
# Adapting an unweighted learner for AdaBoost
def WeightedLearner(unweighted_learner):
"""Given a learner that takes just an unweighted dataset, return
one that takes also a weight for each example. [p. 749 footnote 14]"""
def train(dataset, weights):
return unweighted_learner(replicated_dataset(dataset, weights))
return train
def replicated_dataset(dataset, weights, n=None):
"""Copy dataset, replicating each example in proportion to its weight."""
n = n or len(dataset.examples)
result = copy.copy(dataset)
result.examples = weighted_replicate(dataset.examples, weights, n)
return result
def weighted_replicate(seq, weights, n):
"""Return n selections from seq, with the count of each element of
seq proportional to the corresponding weight (filling in fractions
randomly).
>>> weighted_replicate('ABC', [1,2,1], 4)
['A', 'B', 'B', 'C']
"""
assert len(seq) == len(weights)
weights = normalize(weights)
wholes = [int(w * n) for w in weights]
fractions = [(w * n) % 1 for w in weights]
return (flatten([x] * nx for x, nx in zip(seq, wholes)) +
weighted_sample_with_replacement(n - sum(wholes), seq, fractions))
def flatten(seqs): return sum(seqs, [])
# _____________________________________________________________________________
# Functions for testing learners on examples
def err_ratio(predict, dataset, examples=None, verbose=0):
"""Return the proportion of the examples that are NOT correctly predicted."""
"""verbose - 0: No output; 1: Output wrong; 2 (or greater): Output correct"""
if examples is None:
examples = dataset.examples
if len(examples) == 0:
return 0.0
right = 0.0
for example in examples:
desired = example[dataset.target]
output = predict(dataset.sanitize(example))
if output == desired:
right += 1
if verbose >= 2:
print(' OK: got {} for {}'.format(desired, example))
elif verbose:
print('WRONG: got {}, expected {} for {}'.format(
output, desired, example))
return 1 - (right / len(examples))
def grade_learner(predict, tests):
"""Grades the given learner based on how many tests it passes.
tests is a list with each element in the form: (values, output)."""
return mean(int(predict(X) == y) for X, y in tests)
def train_and_test(dataset, start, end):
"""Reserve dataset.examples[start:end] for test; train on the remainder."""
start = int(start)
end = int(end)
examples = dataset.examples
train = examples[:start] + examples[end:]
val = examples[start:end]
return train, val
def cross_validation(learner, size, dataset, k=10, trials=1):
"""Do k-fold cross_validate and return their mean.
That is, keep out 1/k of the examples for testing on each of k runs.
Shuffle the examples first; if trials>1, average over several shuffles.
Returns Training error, Validataion error"""
if k is None:
k = len(dataset.examples)
if trials > 1:
trial_errT = 0
trial_errV = 0
for t in range(trials):
errT, errV = cross_validation(learner, size, dataset,
k=10, trials=1)
trial_errT += errT
trial_errV += errV
return trial_errT / trials, trial_errV / trials
else:
fold_errT = 0
fold_errV = 0
n = len(dataset.examples)
examples = dataset.examples
for fold in range(k):
random.shuffle(dataset.examples)
train_data, val_data = train_and_test(dataset, fold * (n / k),
(fold + 1) * (n / k))
dataset.examples = train_data
h = learner(dataset, size)
fold_errT += err_ratio(h, dataset, train_data)
fold_errV += err_ratio(h, dataset, val_data)
# Reverting back to original once test is completed
dataset.examples = examples
return fold_errT / k, fold_errV / k
def cross_validation_wrapper(learner, dataset, k=10, trials=1):
"""[Fig 18.8]
Return the optimal value of size having minimum error
on validation set.
err_train: A training error array, indexed by size
err_val: A validation error array, indexed by size
"""
err_val = []
err_train = []
size = 1
while True:
errT, errV = cross_validation(learner, size, dataset, k)
# Check for convergence provided err_val is not empty
if (err_train and isclose(err_train[-1], errT, rel_tol=1e-6)):
best_size = 0
min_val = math.inf
i = 0
while i<size:
if err_val[i] < min_val:
min_val = err_val[i]
best_size = i
i += 1
err_val.append(errV)
err_train.append(errT)
print(err_val)
size += 1
def leave_one_out(learner, dataset, size=None):
"""Leave one out cross-validation over the dataset."""
return cross_validation(learner, size, dataset, k=len(dataset.examples))
def learningcurve(learner, dataset, trials=10, sizes=None):
if sizes is None:
sizes = list(range(2, len(dataset.examples) - 10, 2))
def score(learner, size):
random.shuffle(dataset.examples)
return train_and_test(learner, dataset, 0, size)
return [(size, mean([score(learner, size) for t in range(trials)]))
for size in sizes]
# ______________________________________________________________________________
# The rest of this file gives datasets for machine learning problems.
orings = DataSet(name='orings', target='Distressed',
attrnames="Rings Distressed Temp Pressure Flightnum")
zoo = DataSet(name='zoo', target='type', exclude=['name'],
attrnames="name hair feathers eggs milk airborne aquatic " +
"predator toothed backbone breathes venomous fins legs tail " +
"domestic catsize type")
iris = DataSet(name="iris", target="class",
attrnames="sepal-len sepal-width petal-len petal-width class")
# ______________________________________________________________________________
# The Restaurant example from [Figure 18.2]
def RestaurantDataSet(examples=None):
"""Build a DataSet of Restaurant waiting examples. [Figure 18.3]"""
return DataSet(name='restaurant', target='Wait', examples=examples,
attrnames='Alternate Bar Fri/Sat Hungry Patrons Price ' +
'Raining Reservation Type WaitEstimate Wait')
restaurant = RestaurantDataSet()
def T(attrname, branches):
branches = {value: (child if isinstance(child, DecisionFork)
else DecisionLeaf(child))
for value, child in branches.items()}
return DecisionFork(restaurant.attrnum(attrname), attrname, print, branches)
""" [Figure 18.2]
A decision tree for deciding whether to wait for a table at a hotel.
"""
waiting_decision_tree = T('Patrons',
{'None': 'No', 'Some': 'Yes',
'Full': T('WaitEstimate',
{'>60': 'No', '0-10': 'Yes',
'30-60': T('Alternate',
{'No': T('Reservation',
{'Yes': 'Yes',
'No': T('Bar', {'No': 'No',
'Yes': 'Yes'})}),
'Yes': T('Fri/Sat', {'No': 'No', 'Yes': 'Yes'})}
),
'10-30': T('Hungry',
{'No': 'Yes',
'Yes': T('Alternate',
{'No': 'Yes',
'Yes': T('Raining',
{'No': 'No',
'Yes': 'Yes'})})})})})
def SyntheticRestaurant(n=20):
"""Generate a DataSet with n examples."""
def gen():
example = list(map(random.choice, restaurant.values))
example[restaurant.target] = waiting_decision_tree(example)
return example
return RestaurantDataSet([gen() for i in range(n)])
# ______________________________________________________________________________
# Artificial, generated datasets.
def Majority(k, n):
"""Return a DataSet with n k-bit examples of the majority problem:
k random bits followed by a 1 if more than half the bits are 1, else 0."""
examples = []
for i in range(n):
bits = [random.choice([0, 1]) for i in range(k)]
bits.append(int(sum(bits) > k / 2))
examples.append(bits)
return DataSet(name="majority", examples=examples)
def Parity(k, n, name="parity"):
"""Return a DataSet with n k-bit examples of the parity problem:
k random bits followed by a 1 if an odd number of bits are 1, else 0."""
examples = []
for i in range(n):
bits = [random.choice([0, 1]) for i in range(k)]
bits.append(sum(bits) % 2)
examples.append(bits)
return DataSet(name=name, examples=examples)
def Xor(n):
"""Return a DataSet with n examples of 2-input xor."""
return Parity(2, n, name="xor")
def ContinuousXor(n):
"2 inputs are chosen uniformly from (0.0 .. 2.0]; output is xor of ints."
examples = []
for i in range(n):
x, y = [random.uniform(0.0, 2.0) for i in '12']
examples.append([x, y, int(x) != int(y)])
return DataSet(name="continuous xor", examples=examples)
# ______________________________________________________________________________
def compare(algorithms=[PluralityLearner, NaiveBayesLearner,
NearestNeighborLearner, DecisionTreeLearner],
datasets=[iris, orings, zoo, restaurant, SyntheticRestaurant(20),
Majority(7, 100), Parity(7, 100), Xor(100)],
k=10, trials=1):
"""Compare various learners on various datasets using cross-validation.
Print results as a table."""
print_table([[a.__name__.replace('Learner', '')] +
[cross_validation(a, d, k, trials) for d in datasets]
for a in algorithms],
header=[''] + [d.name[0:7] for d in datasets], numfmt='%.2f')
| 36.819592
| 100
| 0.612606
|
8e03122ff4141e66cffe787365bad709bac022f8
| 2,750
|
py
|
Python
|
nikola/plugins/task/posts.py
|
pellenilsson/nikola
|
67a944a40b35584525a1bb363b3abd85582704cb
|
[
"MIT"
] | null | null | null |
nikola/plugins/task/posts.py
|
pellenilsson/nikola
|
67a944a40b35584525a1bb363b3abd85582704cb
|
[
"MIT"
] | null | null | null |
nikola/plugins/task/posts.py
|
pellenilsson/nikola
|
67a944a40b35584525a1bb363b3abd85582704cb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright © 2012-2014 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from copy import copy
from nikola.plugin_categories import Task
from nikola import utils
def rest_deps(post, task):
"""Add extra_deps from ReST into task.
The .dep file is created by ReST so not available before the task starts
to execute.
"""
task.file_dep.update(post.extra_deps())
class RenderPosts(Task):
"""Build HTML fragments from metadata and text."""
name = "render_posts"
def gen_tasks(self):
"""Build HTML fragments from metadata and text."""
self.site.scan_posts()
kw = {
"translations": self.site.config["TRANSLATIONS"],
"timeline": self.site.timeline,
"default_lang": self.site.config["DEFAULT_LANG"],
"show_untranslated_posts": self.site.config['SHOW_UNTRANSLATED_POSTS'],
"demote_headers": self.site.config['DEMOTE_HEADERS'],
}
yield self.group_task()
for lang in kw["translations"]:
deps_dict = copy(kw)
deps_dict.pop('timeline')
for post in kw['timeline']:
dest = post.translated_base_path(lang)
task = {
'basename': self.name,
'name': dest,
'file_dep': post.fragment_deps(lang),
'targets': [dest],
'actions': [(post.compile, (lang, )),
(rest_deps, (post,)),
],
'clean': True,
'uptodate': [utils.config_changed(deps_dict)],
}
yield task
| 35.714286
| 83
| 0.629818
|
52136baf903bab2ed17becc1a8332d03f84bd536
| 1,183
|
py
|
Python
|
arviz/plots/backends/matplotlib/parallelplot.py
|
aseyboldt/arviz
|
1fb40ff442f5ba4b8d11ceeaef27e6c339eb1685
|
[
"Apache-2.0"
] | 1
|
2020-08-09T00:16:00.000Z
|
2020-08-09T00:16:00.000Z
|
arviz/plots/backends/matplotlib/parallelplot.py
|
aseyboldt/arviz
|
1fb40ff442f5ba4b8d11ceeaef27e6c339eb1685
|
[
"Apache-2.0"
] | null | null | null |
arviz/plots/backends/matplotlib/parallelplot.py
|
aseyboldt/arviz
|
1fb40ff442f5ba4b8d11ceeaef27e6c339eb1685
|
[
"Apache-2.0"
] | null | null | null |
"""Matplotlib Parallel coordinates plot."""
import matplotlib.pyplot as plt
import numpy as np
from . import backend_kwarg_defaults, backend_show
def plot_parallel(
ax,
colornd,
colord,
shadend,
diverging_mask,
_posterior,
textsize,
var_names,
xt_labelsize,
legend,
figsize,
backend_kwargs,
show,
):
"""Matplotlib parallel plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(),
**backend_kwargs,
}
if ax is None:
_, ax = plt.subplots(figsize=figsize, **backend_kwargs)
ax.plot(_posterior[:, ~diverging_mask], color=colornd, alpha=shadend)
if np.any(diverging_mask):
ax.plot(_posterior[:, diverging_mask], color=colord, lw=1)
ax.tick_params(labelsize=textsize)
ax.set_xticks(range(len(var_names)))
ax.set_xticklabels(var_names)
if legend:
ax.plot([], color=colornd, label="non-divergent")
if np.any(diverging_mask):
ax.plot([], color=colord, label="divergent")
ax.legend(fontsize=xt_labelsize)
if backend_show(show):
plt.show()
return ax
| 22.320755
| 73
| 0.64328
|
0cbf9f752e1abacad54f558aeddb386f76357a92
| 24,084
|
py
|
Python
|
python/ccxt/async/bitfinex.py
|
ddcash/ccxt
|
ea85be932071891fbf0bb2001dee3b1c2be941e3
|
[
"MIT"
] | 3
|
2018-01-16T02:03:26.000Z
|
2018-01-16T16:05:48.000Z
|
python/ccxt/async/bitfinex.py
|
snugghash/ccxt
|
ea85be932071891fbf0bb2001dee3b1c2be941e3
|
[
"MIT"
] | null | null | null |
python/ccxt/async/bitfinex.py
|
snugghash/ccxt
|
ea85be932071891fbf0bb2001dee3b1c2be941e3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
import base64
import hashlib
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NotSupported
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
class bitfinex (Exchange):
def describe(self):
return self.deep_extend(super(bitfinex, self).describe(), {
'id': 'bitfinex',
'name': 'Bitfinex',
'countries': 'VG',
'version': 'v1',
'rateLimit': 1500,
'hasCORS': False,
# old metainfo interface
'hasFetchOrder': True,
'hasFetchTickers': True,
'hasDeposit': True,
'hasWithdraw': True,
'hasFetchOHLCV': True,
'hasFetchOpenOrders': True,
'hasFetchClosedOrders': True,
# new metainfo interface
'has': {
'fetchOHLCV': True,
'fetchTickers': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchMyTrades': True,
'withdraw': True,
'deposit': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'3h': '3h',
'6h': '6h',
'12h': '12h',
'1d': '1D',
'1w': '7D',
'2w': '14D',
'1M': '1M',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766244-e328a50c-5ed2-11e7-947b-041416579bb3.jpg',
'api': 'https://api.bitfinex.com',
'www': 'https://www.bitfinex.com',
'doc': [
'https://bitfinex.readme.io/v1/docs',
'https://github.com/bitfinexcom/bitfinex-api-node',
],
},
'api': {
'v2': {
'get': [
'candles/trade:{timeframe}:{symbol}/{section}',
'candles/trade:{timeframe}:{symbol}/last',
'candles/trade:{timeframe}:{symbol}/hist',
],
},
'public': {
'get': [
'book/{symbol}',
# 'candles/{symbol}',
'lendbook/{currency}',
'lends/{currency}',
'pubticker/{symbol}',
'stats/{symbol}',
'symbols',
'symbols_details',
'tickers',
'today',
'trades/{symbol}',
],
},
'private': {
'post': [
'account_fees',
'account_infos',
'balances',
'basket_manage',
'credits',
'deposit/new',
'funding/close',
'history',
'history/movements',
'key_info',
'margin_infos',
'mytrades',
'mytrades_funding',
'offer/cancel',
'offer/new',
'offer/status',
'offers',
'offers/hist',
'order/cancel',
'order/cancel/all',
'order/cancel/multi',
'order/cancel/replace',
'order/new',
'order/new/multi',
'order/status',
'orders',
'orders/hist',
'position/claim',
'positions',
'summary',
'taken_funds',
'total_taken_funds',
'transfer',
'unused_taken_funds',
'withdraw',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'maker': 0.1 / 100,
'taker': 0.2 / 100,
'tiers': {
'taker': [
[0, 0.2 / 100],
[500000, 0.2 / 100],
[1000000, 0.2 / 100],
[2500000, 0.2 / 100],
[5000000, 0.2 / 100],
[7500000, 0.2 / 100],
[10000000, 0.18 / 100],
[15000000, 0.16 / 100],
[20000000, 0.14 / 100],
[25000000, 0.12 / 100],
[30000000, 0.1 / 100],
],
'maker': [
[0, 0.1 / 100],
[500000, 0.08 / 100],
[1000000, 0.06 / 100],
[2500000, 0.04 / 100],
[5000000, 0.02 / 100],
[7500000, 0],
[10000000, 0],
[15000000, 0],
[20000000, 0],
[25000000, 0],
[30000000, 0],
],
},
},
'funding': {
'tierBased': False, # True for tier-based/progressive
'percentage': False, # fixed commission
'deposit': {
'BTC': 0.0005,
'IOTA': 0.5,
'ETH': 0.01,
'BCH': 0.01,
'LTC': 0.1,
'EOS': 0.1,
'XMR': 0.04,
'SAN': 0.1,
'DASH': 0.01,
'ETC': 0.01,
'XPR': 0.02,
'YYW': 0.1,
'NEO': 0,
'ZEC': 0.1,
'BTG': 0,
'OMG': 0.1,
'DATA': 1,
'QASH': 1,
'ETP': 0.01,
'QTUM': 0.01,
'EDO': 0.5,
'AVT': 0.5,
'USDT': 0,
},
'withdraw': {
'BTC': 0.0005,
'IOTA': 0.5,
'ETH': 0.01,
'BCH': 0.01,
'LTC': 0.1,
'EOS': 0.1,
'XMR': 0.04,
'SAN': 0.1,
'DASH': 0.01,
'ETC': 0.01,
'XPR': 0.02,
'YYW': 0.1,
'NEO': 0,
'ZEC': 0.1,
'BTG': 0,
'OMG': 0.1,
'DATA': 1,
'QASH': 1,
'ETP': 0.01,
'QTUM': 0.01,
'EDO': 0.5,
'AVT': 0.5,
'USDT': 5,
},
},
},
})
def common_currency_code(self, currency):
currencies = {
'DSH': 'DASH', # Bitfinex names Dash as DSH, instead of DASH
'QTM': 'QTUM',
'BCC': 'CST_BCC',
'BCU': 'CST_BCU',
'IOT': 'IOTA',
}
return currencies[currency] if (currency in list(currencies.keys())) else currency
async def fetch_markets(self):
markets = await self.publicGetSymbolsDetails()
result = []
for p in range(0, len(markets)):
market = markets[p]
id = market['pair'].upper()
baseId = id[0:3]
quoteId = id[3:6]
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'price': market['price_precision'],
'amount': market['price_precision'],
}
result.append(self.extend(self.fees['trading'], {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'info': market,
'precision': precision,
'limits': {
'amount': {
'min': float(market['minimum_order_size']),
'max': float(market['maximum_order_size']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
'cost': {
'min': None,
'max': None,
},
},
}))
return result
async def fetch_balance(self, params={}):
await self.load_markets()
balanceType = self.safe_string(params, 'type', 'exchange')
balances = await self.privatePostBalances()
result = {'info': balances}
for i in range(0, len(balances)):
balance = balances[i]
if balance['type'] == balanceType:
currency = balance['currency']
uppercase = currency.upper()
uppercase = self.common_currency_code(uppercase)
account = self.account()
account['free'] = float(balance['available'])
account['total'] = float(balance['amount'])
account['used'] = account['total'] - account['free']
result[uppercase] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
orderbook = await self.publicGetBookSymbol(self.extend({
'symbol': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook, None, 'bids', 'asks', 'price', 'amount')
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
tickers = await self.publicGetTickers(params)
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
if 'pair' in ticker:
id = ticker['pair']
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
else:
raise ExchangeError(self.id + ' fetchTickers() failed to recognize symbol ' + id + ' ' + self.json(ticker))
else:
raise ExchangeError(self.id + ' fetchTickers() response not recognized ' + self.json(tickers))
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
ticker = await self.publicGetPubtickerSymbol(self.extend({
'symbol': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def parse_ticker(self, ticker, market=None):
timestamp = float(ticker['timestamp']) * 1000
symbol = None
if market:
symbol = market['symbol']
elif 'pair' in ticker:
id = ticker['pair']
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
else:
raise ExchangeError(self.id + ' unrecognized ticker symbol ' + id + ' ' + self.json(ticker))
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['bid']),
'ask': float(ticker['ask']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': float(ticker['last_price']),
'change': None,
'percentage': None,
'average': float(ticker['mid']),
'baseVolume': float(ticker['volume']),
'quoteVolume': None,
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = int(float(trade['timestamp'])) * 1000
side = trade['type'].lower()
orderId = self.safe_string(trade, 'order_id')
price = float(trade['price'])
amount = float(trade['amount'])
cost = price * amount
return {
'id': str(trade['tid']),
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'order': orderId,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetTradesSymbol(self.extend({
'symbol': market['id'],
}, params))
return self.parse_trades(response, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {'symbol': market['id']}
if limit:
request['limit_trades'] = limit
if since:
request['timestamp'] = int(since / 1000)
response = await self.privatePostMytrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
orderType = type
if (type == 'limit') or (type == 'market'):
orderType = 'exchange ' + type
# amount = self.amount_to_precision(symbol, amount)
order = {
'symbol': self.market_id(symbol),
'amount': str(amount),
'side': side,
'type': orderType,
'ocoorder': False,
'buy_price_oco': 0,
'sell_price_oco': 0,
}
if type == 'market':
order['price'] = str(self.nonce())
else:
# price = self.price_to_precision(symbol, price)
order['price'] = str(price)
result = await self.privatePostOrderNew(self.extend(order, params))
return self.parse_order(result)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
return await self.privatePostOrderCancel({'order_id': int(id)})
def parse_order(self, order, market=None):
side = order['side']
open = order['is_live']
canceled = order['is_cancelled']
status = None
if open:
status = 'open'
elif canceled:
status = 'canceled'
else:
status = 'closed'
symbol = None
if not market:
exchange = order['symbol'].upper()
if exchange in self.markets_by_id:
market = self.markets_by_id[exchange]
if market:
symbol = market['symbol']
orderType = order['type']
exchange = orderType.find('exchange ') >= 0
if exchange:
prefix, orderType = order['type'].split(' ')
timestamp = int(float(order['timestamp']) * 1000)
result = {
'info': order,
'id': str(order['id']),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': orderType,
'side': side,
'price': float(order['price']),
'average': float(order['avg_execution_price']),
'amount': float(order['original_amount']),
'remaining': float(order['remaining_amount']),
'filled': float(order['executed_amount']),
'status': status,
'fee': None,
}
return result
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
response = await self.privatePostOrders(params)
orders = self.parse_orders(response, None, since, limit)
if symbol:
orders = self.filter_by(orders, 'symbol', symbol)
return orders
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if limit:
request['limit'] = limit
response = await self.privatePostOrdersHist(self.extend(request, params))
orders = self.parse_orders(response, None, since, limit)
if symbol:
orders = self.filter_by(orders, 'symbol', symbol)
orders = self.filter_by(orders, 'status', 'closed')
return orders
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
response = await self.privatePostOrderStatus(self.extend({
'order_id': int(id),
}, params))
return self.parse_order(response)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv[0],
ohlcv[1],
ohlcv[3],
ohlcv[4],
ohlcv[2],
ohlcv[5],
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
v2id = 't' + market['id']
request = {
'symbol': v2id,
'timeframe': self.timeframes[timeframe],
'sort': 1,
}
if limit:
request['limit'] = limit
if since:
request['start'] = since
request = self.extend(request, params)
response = await self.v2GetCandlesTradeTimeframeSymbolHist(request)
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def get_currency_name(self, currency):
if currency == 'BTC':
return 'bitcoin'
elif currency == 'LTC':
return 'litecoin'
elif currency == 'ETH':
return 'ethereum'
elif currency == 'ETC':
return 'ethereumc'
elif currency == 'OMNI':
return 'mastercoin' # ???
elif currency == 'ZEC':
return 'zcash'
elif currency == 'XMR':
return 'monero'
elif currency == 'USD':
return 'wire'
elif currency == 'DASH':
return 'dash'
elif currency == 'XRP':
return 'ripple'
elif currency == 'EOS':
return 'eos'
elif currency == 'BCH':
return 'bcash'
elif currency == 'USDT':
return 'tetheruso'
raise NotSupported(self.id + ' ' + currency + ' not supported for withdrawal')
async def create_deposit_address(self, currency, params={}):
response = await self.fetch_deposit_address(currency, self.extend({
'renew': 1,
}, params))
return {
'currency': currency,
'address': response['address'],
'status': 'ok',
'info': response['info'],
}
async def fetch_deposit_address(self, currency, params={}):
name = self.get_currency_name(currency)
request = {
'method': name,
'wallet_name': 'exchange',
'renew': 0, # a value of 1 will generate a new address
}
response = await self.privatePostDepositNew(self.extend(request, params))
return {
'currency': currency,
'address': response['address'],
'status': 'ok',
'info': response,
}
async def withdraw(self, currency, amount, address, params={}):
name = self.get_currency_name(currency)
request = {
'withdraw_type': name,
'walletselected': 'exchange',
'amount': str(amount),
'address': address,
}
responses = await self.privatePostWithdraw(self.extend(request, params))
response = responses[0]
return {
'info': response,
'id': response['withdrawal_id'],
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
if api == 'v2':
request = '/' + api + request
else:
request = '/' + self.version + request
query = self.omit(params, self.extract_params(path))
url = self.urls['api'] + request
if (api == 'public') or (path.find('/hist') >= 0):
if query:
suffix = '?' + self.urlencode(query)
url += suffix
request += suffix
if api == 'private':
self.check_required_credentials()
nonce = self.nonce()
query = self.extend({
'nonce': str(nonce),
'request': request,
}, query)
query = self.json(query)
query = self.encode(query)
payload = base64.b64encode(query)
secret = self.encode(self.secret)
signature = self.hmac(payload, secret, hashlib.sha384)
headers = {
'X-BFX-APIKEY': self.apiKey,
'X-BFX-PAYLOAD': self.decode(payload),
'X-BFX-SIGNATURE': signature,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body):
if code == 400:
if body[0] == "{":
response = json.loads(body)
message = response['message']
if message.find('Key price should be a decimal number') >= 0:
raise InvalidOrder(self.id + ' ' + message)
elif message.find('Invalid order: not enough exchange balance') >= 0:
raise InsufficientFunds(self.id + ' ' + message)
elif message.find('Invalid order') >= 0:
raise InvalidOrder(self.id + ' ' + message)
elif message.find('Order could not be cancelled.') >= 0:
raise OrderNotFound(self.id + ' ' + message)
raise ExchangeError(self.id + ' ' + body)
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'message' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 37.281734
| 127
| 0.443365
|
c450ff4a7ec6c7014b3eaa26b957c9b1fa68b591
| 11,028
|
py
|
Python
|
grama/dfply/base.py
|
OscarDeGar/py_grama
|
0a02c291326b394a8d0c127dad4c58121e568777
|
[
"MIT"
] | 13
|
2020-02-24T16:51:51.000Z
|
2022-03-30T18:56:55.000Z
|
grama/dfply/base.py
|
OscarDeGar/py_grama
|
0a02c291326b394a8d0c127dad4c58121e568777
|
[
"MIT"
] | 78
|
2019-12-30T19:13:21.000Z
|
2022-02-23T18:17:54.000Z
|
grama/dfply/base.py
|
OscarDeGar/py_grama
|
0a02c291326b394a8d0c127dad4c58121e568777
|
[
"MIT"
] | 7
|
2020-10-19T17:49:25.000Z
|
2021-08-15T20:46:52.000Z
|
__all__ = [
"Intention",
"dfdelegate",
"make_symbolic",
"symbolic_evaluation",
"group_delegation",
"flatten"
]
import warnings
from .. import pipe, add_pipe # Use grama pipe to preserve metadata
from functools import partial, wraps
from numpy import zeros, array
from pandas import Series, Index, DataFrame
def _recursive_apply(f, l):
if isinstance(l, (list, tuple)):
out = [_recursive_apply(f, l_) for l_ in l]
if isinstance(l, tuple):
out = tuple(out)
return out
return f(l)
def contextualize(arg, context):
if isinstance(arg, Intention):
arg = arg.evaluate(context)
return arg
def flatten(l):
for el in l:
if isinstance(el, (tuple, list)):
yield from flatten(el)
else:
yield el
def _check_delayed_eval(args, kwargs):
check = lambda x: isinstance(x, Intention)
delay = any([a for a in flatten(_recursive_apply(check, args))])
delay = delay or any(
[v for v in flatten(_recursive_apply(check, list(kwargs.values())))]
)
return delay
def _context_args(args):
return lambda x: _recursive_apply(partial(contextualize, context=x), args)
def _context_kwargs(kwargs):
values_ = lambda x: _recursive_apply(
partial(contextualize, context=x), list(kwargs.values())
)
return lambda x: {k: v for k, v in zip(kwargs.keys(), values_(x))}
def _delayed_function(function, args, kwargs):
return lambda x: function(*_context_args(args)(x), **_context_kwargs(kwargs)(x))
def make_symbolic(f):
def wrapper(*args, **kwargs):
delay = _check_delayed_eval(args, kwargs)
if delay:
delayed = _delayed_function(f, args, kwargs)
return Intention(delayed)
return f(*args, **kwargs)
## Preserve documentation
wrapper.__doc__ = f.__doc__
wrapper.__name__ = f.__name__
return wrapper
class Intention(object):
def __init__(self, function=lambda x: x, invert=False):
self.function = function
self.inverted = invert
def evaluate(self, context):
return self.function(context)
def __getattr__(self, attribute):
return Intention(
lambda x: getattr(self.function(x), attribute), invert=self.inverted
)
def __invert__(self):
return Intention(self.function, invert=not self.inverted)
def __call__(self, *args, **kwargs):
return Intention(
lambda x: self.function(x)(
*_context_args(args)(x), **_context_kwargs(kwargs)(x)
),
invert=self.inverted,
)
_magic_method_names = [
"__abs__",
"__add__",
"__and__",
"__cmp__",
"__complex__",
"__contains__",
"__delattr__",
"__delete__",
"__delitem__",
"__delslice__",
"__div__",
"__divmod__",
"__enter__",
"__eq__",
"__exit__",
"__float__",
"__floordiv__",
"__ge__",
"__get__",
"__getitem__",
"__getslice__",
"__gt__",
"__hash__",
"__hex__",
"__iadd__",
"__iand__",
"__idiv__",
"__ifloordiv__",
"__ilshift__",
"__imod__",
"__imul__",
"__index__",
"__int__",
"__ior__",
"__ipow__",
"__irshift__",
"__isub__",
"__iter__",
"__itruediv__",
"__ixor__",
"__le__",
"__len__",
"__long__",
"__lshift__",
"__lt__",
"__mod__",
"__mul__",
"__ne__",
"__neg__",
"__nonzero__",
"__oct__",
"__or__",
"__pos__",
"__pow__",
"__radd__",
"__rand__",
"__rcmp__",
"__rdiv__",
"__rdivmod__", # '__repr__',
"__reversed__",
"__rfloordiv__",
"__rlshift__",
"__rmod__",
"__rmul__",
"__ror__",
"__rpow__",
"__rrshift__",
"__rshift__",
"__rsub__",
"__rtruediv__",
"__rxor__",
"__set__",
"__setitem__",
"__setslice__",
"__sub__",
"__truediv__",
"__unicode__",
"__xor__",
"__str__",
]
def _set_magic_method(name):
def magic_method(self, *args, **kwargs):
return Intention(
lambda x: getattr(self.function(x), name)(
*_context_args(args)(x), **_context_kwargs(kwargs)(x)
),
invert=self.inverted,
)
return magic_method
for name in _magic_method_names:
setattr(Intention, name, _set_magic_method(name))
class IntentionEvaluator(object):
"""
Parent class for symbolic argument decorators.
Default behavior is to recursively turn the arguments and keyword
arguments of a decorated function into `symbolic.Call` objects that
can be evaluated against a pandas DataFrame as it comes down a pipe.
"""
__name__ = "IntentionEvaluator"
def __init__(
self, function, eval_symbols=True, eval_as_label=[], eval_as_selector=[]
):
super(IntentionEvaluator, self).__init__()
self.function = function
self.__doc__ = function.__doc__
self.eval_symbols = eval_symbols
self.eval_as_label = eval_as_label
self.eval_as_selector = eval_as_selector
def _evaluate(self, df, arg):
if isinstance(arg, Intention):
negate = arg.inverted
arg = arg.evaluate(df)
if negate:
arg = ~arg
return arg
def _evaluate_label(self, df, arg):
arg = self._evaluate(df, arg)
cols = list(df.columns)
if isinstance(arg, Series):
arg = arg.name
if isinstance(arg, Index):
arg = list(arg)
if isinstance(arg, int):
arg = cols[arg]
return arg
def _evaluate_selector(self, df, arg):
negate = False
if isinstance(arg, Intention):
negate = arg.inverted
arg = arg.evaluate(df)
cols = list(df.columns)
if isinstance(arg, Series):
arg = [cols.index(arg.name)]
if isinstance(arg, Index):
arg = [cols.index(i) for i in list(arg)]
if isinstance(arg, DataFrame):
arg = [cols.index(i) for i in arg.columns]
if isinstance(arg, int):
arg = [arg]
if isinstance(arg, str):
arg = [cols.index(arg)]
if isinstance(arg, (list, tuple)):
arg = [cols.index(i) if isinstance(i, str) else i for i in arg]
selection_vector = zeros(df.shape[1])
col_idx = array(arg)
if negate and len(col_idx) > 0:
selection_vector[col_idx] = -1
elif len(col_idx) > 0:
selection_vector[col_idx] = 1
return selection_vector
def _evaluator_loop(self, df, arg, eval_func):
if isinstance(arg, (list, tuple)):
return [self._evaluator_loop(df, a_, eval_func) for a_ in arg]
return eval_func(df, arg)
def _symbolic_eval(self, df, arg):
return self._evaluator_loop(df, arg, self._evaluate)
def _symbolic_to_label(self, df, arg):
return self._evaluator_loop(df, arg, self._evaluate_label)
def _symbolic_to_selector(self, df, arg):
return self._evaluator_loop(df, arg, self._evaluate_selector)
def _recursive_arg_eval(self, df, args):
eval_symbols = self._find_eval_args(self.eval_symbols, args)
eval_as_label = self._find_eval_args(self.eval_as_label, args)
eval_as_selector = self._find_eval_args(self.eval_as_selector, args)
return [
self._symbolic_to_label(df, a)
if i in eval_as_label
else self._symbolic_to_selector(df, a)
if i in eval_as_selector
else self._symbolic_eval(df, a)
if i in eval_symbols
else a
for i, a in enumerate(args)
]
def _recursive_kwarg_eval(self, df, kwargs):
eval_symbols = self._find_eval_kwargs(self.eval_symbols, kwargs)
eval_as_label = self._find_eval_kwargs(self.eval_as_label, kwargs)
eval_as_selector = self._find_eval_kwargs(self.eval_as_selector, kwargs)
return {
k: (
self._symbolic_to_label(df, v)
if k in eval_as_label
else self._symbolic_to_selector(df, v)
if k in eval_as_selector
else self._symbolic_eval(df, v)
if k in eval_symbols
else v
)
for k, v in kwargs.items()
}
def _find_eval_args(self, request, args):
if (request is True) or ("*" in request):
return [i for i in range(len(args))]
if request in [None, False]:
return []
return request
def _find_eval_kwargs(self, request, kwargs):
if (request is True) or ("**" in request):
return [k for k in kwargs.keys()]
if request in [None, False]:
return []
return request
def __call__(self, *args, **kwargs):
df = args[0]
args = self._recursive_arg_eval(df, args[1:])
kwargs = self._recursive_kwarg_eval(df, kwargs)
return self.function(df, *args, **kwargs)
def symbolic_evaluation(
function=None, eval_symbols=True, eval_as_label=[], eval_as_selector=[]
):
if function:
return IntentionEvaluator(function)
@wraps(function)
def wrapper(function):
return IntentionEvaluator(
function,
eval_symbols=eval_symbols,
eval_as_label=eval_as_label,
eval_as_selector=eval_as_selector,
)
return wrapper
class group_delegation(object):
__name__ = "group_delegation"
def __init__(self, function):
self.function = function
self.__doc__ = function.__doc__
def _apply(self, df, *args, **kwargs):
grouped = df.groupby(df._grouped_by)
dff = grouped.apply(self.function, *args, **kwargs)
# Save all the metadata attributes back into the new data frame
for field in df._metadata:
setattr(dff, field, getattr(df, field))
df = dff
for name in df.index.names[:-1]:
if name in df:
df.reset_index(level=0, drop=True, inplace=True)
else:
df.reset_index(level=0, inplace=True)
if (df.index == 0).all():
df.reset_index(drop=True, inplace=True)
return df
def __call__(self, *args, **kwargs):
grouped_by = getattr(args[0], "_grouped_by", None)
if (grouped_by is None) or not all([g in args[0].columns for g in grouped_by]):
return self.function(*args, **kwargs)
applied = self._apply(args[0], *args[1:], **kwargs)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
applied._grouped_by = grouped_by
return applied
def dfpipe(f):
return add_pipe(group_delegation(symbolic_evaluation(f)))
def dfdelegate(f):
class addName(group_delegation):
__name__ = f.__name__
return addName(group_delegation(symbolic_evaluation(f)))
| 26.76699
| 87
| 0.599837
|
696ebbc1f7fd623bd5664e38210fe206c81536e6
| 747
|
py
|
Python
|
mbpo/models/perturbed_env.py
|
anyboby/ConstrainedMBPO
|
036f4ffefc464e676a287c35c92cc5c0b8925fcf
|
[
"MIT"
] | 5
|
2020-02-12T17:09:09.000Z
|
2021-09-29T16:06:40.000Z
|
mbpo/models/perturbed_env.py
|
anyboby/ConstrainedMBPO
|
036f4ffefc464e676a287c35c92cc5c0b8925fcf
|
[
"MIT"
] | 10
|
2020-08-31T02:50:02.000Z
|
2022-02-09T23:36:43.000Z
|
mbpo/models/perturbed_env.py
|
anyboby/ConstrainedMBPO
|
036f4ffefc464e676a287c35c92cc5c0b8925fcf
|
[
"MIT"
] | 2
|
2022-03-15T01:45:26.000Z
|
2022-03-15T06:46:47.000Z
|
import safety_gym
import gym
import numpy as np
class PerturbedEnv:
def __init__(self, env, std_inc=0.02):
self.std_inc = std_inc
self.env = env
self.rollouts = 1
def step(self, act):
next_obs, rewards, terminals, info = self.env.step(act)
next_obs = next_obs + np.random.normal(size=next_obs.shape)*(self.std_inc*self.rollouts)
self.rollouts += 1
return next_obs, rewards, terminals, info
def reset(self, sim_state=None):
obs = self.env.reset(state_config=sim_state)
self.rollouts = 1
return obs
def get_sim_state(self, *args, **kwargs):
assert hasattr(self.env, 'get_sim_state')
return self.env.get_sim_state(*args, **kwargs)
| 31.125
| 96
| 0.643909
|
964123fd644bc3abdadfc3853dae47d1b50729ae
| 2,691
|
py
|
Python
|
chatbot/util/utils.py
|
mphe/pychatbot
|
66d994e8e96d699b19083b25ee0fe80b8874c42b
|
[
"MIT"
] | 1
|
2020-05-28T18:26:11.000Z
|
2020-05-28T18:26:11.000Z
|
chatbot/util/utils.py
|
mphe/pychatbot
|
66d994e8e96d699b19083b25ee0fe80b8874c42b
|
[
"MIT"
] | null | null | null |
chatbot/util/utils.py
|
mphe/pychatbot
|
66d994e8e96d699b19083b25ee0fe80b8874c42b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
import asyncio
from concurrent.futures import ThreadPoolExecutor
from chatbot import api
from typing import Callable, List
def merge_dicts(srcdict: dict, mergedict: dict, overwrite=False):
"""Recursively merges `mergedict` into `srcdict` and returns `srcdict`.
Makes shallow copies of `dict` and `list` values.
"""
for k, v in mergedict.items():
srcvalue = srcdict.get(k, None)
if isinstance(v, dict) and isinstance(srcvalue, dict):
merge_dicts(srcvalue, v, overwrite)
continue
if overwrite or srcvalue is None:
if isinstance(v, dict):
v = dict(v)
elif isinstance(v, list):
v = list(v)
srcdict[k] = v
return srcdict
def merge_dicts_copy(srcdict, mergedict, overwrite=False):
"""Same as _merge_dicts but returns a shallow copy instead of merging directly into srcdict."""
return merge_dicts(dict(srcdict), mergedict, overwrite)
def string_prepend(prefix: str, string: str):
"""Prepends each line in `string` with `prefix`."""
sub = "\n" + prefix
return prefix + string.replace("\n", sub)
def list_shifted(l: List, num: int = 1) -> List: # noqa
"""Shift all elements by a given amount (default: shift one to left)."""
if num > 0:
return l[num:]
if num < 0:
return l[:num]
return l
def iter_chunks(c: List, chunk_size: int):
for i in range(0, len(c), chunk_size):
yield c[i:i + chunk_size] # Slicing clamps indices to container length
async def edit_or_reply(msg: api.ChatMessage, text: str):
if msg.is_editable:
await msg.edit(text)
else:
await msg.reply(text)
async def wait_until_api_ready(apiobj: api.APIBase):
if not apiobj.is_ready:
logging.debug("Waiting for API to become ready...")
await wait_until_true(lambda: apiobj.is_ready)
async def run_in_thread(callback, *args):
"""Spawn a new thread, run a callback in it, and wait until it returns.
Returns what the callback returns.
"""
loop = asyncio.get_running_loop()
with ThreadPoolExecutor(max_workers=1) as pool:
return await loop.run_in_executor(pool, callback, *args)
async def wait_until_true(callback: Callable, *args, **kwargs):
"""Wait (in 1s intervals) until `callback` returns true.
`callback` can be a normal function or a coroutine.
"""
while True:
if asyncio.iscoroutine(callback):
if await callback(*args, **kwargs):
break
else:
if callback(*args, **kwargs):
break
await asyncio.sleep(1)
| 28.935484
| 99
| 0.638424
|
bdbba0fd8c040ced67990520ee3f025e627f4232
| 248
|
py
|
Python
|
reskit/solarpower/__init__.py
|
r-beer/RESKit
|
1e8fe6c8176f995a53ca584450beb566fd40b4cb
|
[
"MIT"
] | null | null | null |
reskit/solarpower/__init__.py
|
r-beer/RESKit
|
1e8fe6c8176f995a53ca584450beb566fd40b4cb
|
[
"MIT"
] | null | null | null |
reskit/solarpower/__init__.py
|
r-beer/RESKit
|
1e8fe6c8176f995a53ca584450beb566fd40b4cb
|
[
"MIT"
] | null | null | null |
from ._pv import SolarLibrary, simulatePVModule, locToTilt, frankCorrectionFactors, simulatePVModuleDistribution
from ._score import scoreOpenfieldPVLocation
from ._workflow import workflowOpenFieldFixed, workflowOpenFieldTracking, workflowRooftop
| 62
| 112
| 0.891129
|
2e6be13f738986f60604f8d96ca64f89cccc6756
| 5,476
|
py
|
Python
|
app/api/v1/schema/devicedetailsupdate.py
|
Fozia-Zafar/Device-Registration-Subsystem
|
7854dd314c2f5cb09d722d16ca0114c4cd9907b6
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
app/api/v1/schema/devicedetailsupdate.py
|
Fozia-Zafar/Device-Registration-Subsystem
|
7854dd314c2f5cb09d722d16ca0114c4cd9907b6
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
app/api/v1/schema/devicedetailsupdate.py
|
Fozia-Zafar/Device-Registration-Subsystem
|
7854dd314c2f5cb09d722d16ca0114c4cd9907b6
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
"""
DRS Registration device update schema package.
Copyright (c) 2018 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from marshmallow import Schema, fields, validates, ValidationError, pre_load, pre_dump
import re
from app.api.v1.helpers.validators import *
from app.api.v1.models.status import Status
from app.api.v1.models.regdetails import RegDetails
from app.api.v1.models.devicetechnology import DeviceTechnology
from app.api.v1.models.technologies import Technologies
from app.api.v1.models.devicetype import DeviceType
class DeviceDetailsUpdateSchema(Schema):
"""Schema for Device Update routes."""
update_restricted = ['In Review', 'Approved', 'Rejected', 'Closed', 'New Request', 'Awaiting Documents']
reg_details_id = fields.Int(required=True, error_messages={'required': 'Request id is required'})
brand = fields.Str()
model_name = fields.Str()
model_num = fields.Str()
operating_system = fields.Str()
device_type = fields.Str()
technologies = fields.List(fields.Str())
user_id = fields.Str(required=True, error_messages={'required': 'user_id is required'})
@pre_load()
def check_reg_id(self, data):
"""Validates request id."""
reg_details_id = data['reg_details_id']
reg_details = RegDetails.get_by_id(reg_details_id)
if 'user_id' in data and reg_details.user_id != data['user_id']:
raise ValidationError('Permission denied for this request', field_names=['user_id'])
if not reg_details:
raise ValidationError('The request id provided is invalid', field_names=['reg_id'])
@pre_load()
def update_allow(self, data):
"""Check if update id alowed."""
status = Status.get_status_type(data['status'])
if status in self.update_restricted:
raise ValidationError('The request status is {0}, which cannot be updated'.format(status),
field_names=['status'])
@validates('technologies')
def validate_technologies(self, values):
"""Validate technologies."""
allowed_tech = Technologies.get_technologies_names()
for value in values:
if value not in allowed_tech:
raise ValidationError("Radio Access Technology can be {0} only".format(','.join(allowed_tech)),
fields=['technologies'])
@pre_load()
def pre_process_technologies(self, data):
"""Map technologies."""
if 'technologies' in data:
validate_input('technologies', data['technologies'])
data['technologies'] = data['technologies'].split(',')
@pre_dump()
def serialize_data(self, data):
"""Transform data."""
technologies_list = []
if data.device_technologies:
technologies = DeviceTechnology.get_device_technologies(data.id)
for tech in technologies:
tech_type = Technologies.get_technology_by_id(tech.technology_id)
technologies_list.append(tech_type)
data.technologies = technologies_list
if data.device_types_id:
device_type = DeviceType.get_device_type_by_id(data.device_types_id)
data.device_type = device_type
@validates('brand')
def validate_brand(self, value):
"""Validates device brand."""
validate_input('brand', value)
@validates('model_name')
def validate_model_name(self, value):
"""Validate device model name."""
validate_input('model name', value)
@validates('model_num')
def validate_model_num(self, value):
"""Validates device model number."""
validate_input('model number', value)
@validates('operating_system')
def validate_operating_system(self, value):
"""Vaidates device operating system."""
validate_input('operating system', value)
| 47.617391
| 118
| 0.708912
|
fb142d7bee45fe9be7f7b52cd2e51fbdf86124a4
| 354
|
py
|
Python
|
server.py
|
IllDepence/sirtetris.com_rebuild
|
68f7645ec69e6dd826a9414aad0238c14335f4f3
|
[
"WTFPL"
] | null | null | null |
server.py
|
IllDepence/sirtetris.com_rebuild
|
68f7645ec69e6dd826a9414aad0238c14335f4f3
|
[
"WTFPL"
] | null | null | null |
server.py
|
IllDepence/sirtetris.com_rebuild
|
68f7645ec69e6dd826a9414aad0238c14335f4f3
|
[
"WTFPL"
] | 1
|
2015-01-11T18:36:16.000Z
|
2015-01-11T18:36:16.000Z
|
#!/usr/bin/env python
import CGIHTTPServer
import BaseHTTPServer
# - - - for local testing - - -
if __name__ == "__main__":
server = BaseHTTPServer.HTTPServer
handler = CGIHTTPServer.CGIHTTPRequestHandler
server_address = ("", 8001)
handler.cgi_directories = ["/"]
httpd = server(server_address, handler)
httpd.serve_forever()
| 23.6
| 49
| 0.700565
|
9701066fe21722108896c779261004e70661cd83
| 75,888
|
py
|
Python
|
dist/weewx-3.9.2/bin/weewx/restx.py
|
v0rts/docker-weewx
|
70b2f252051dfead4fcb74e74662b297831e6342
|
[
"Apache-2.0"
] | 10
|
2017-01-05T17:30:48.000Z
|
2021-09-18T15:04:20.000Z
|
dist/weewx-3.9.2/bin/weewx/restx.py
|
v0rts/docker-weewx
|
70b2f252051dfead4fcb74e74662b297831e6342
|
[
"Apache-2.0"
] | 2
|
2019-07-21T10:48:42.000Z
|
2022-02-16T20:36:45.000Z
|
dist/weewx-3.9.2/bin/weewx/restx.py
|
v0rts/docker-weewx
|
70b2f252051dfead4fcb74e74662b297831e6342
|
[
"Apache-2.0"
] | 12
|
2017-01-05T18:50:30.000Z
|
2021-10-05T07:35:45.000Z
|
#
# Copyright (c) 2009-2015 Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
"""Publish weather data to RESTful sites such as the Weather Underground.
GENERAL ARCHITECTURE
Each protocol uses two classes:
o A weewx service, that runs in the main thread. Call this the
"controlling object"
o A separate "threading" class that runs in its own thread. Call this the
"posting object".
Communication between the two is via an instance of Queue.Queue. New loop
packets or archive records are put into the queue by the controlling object
and received by the posting object. Details below.
The controlling object should inherit from StdRESTful. The controlling object
is responsible for unpacking any configuration information from weewx.conf, and
supplying any defaults. It sets up the queue. It arranges for any new LOOP or
archive records to be put in the queue. It then launches the thread for the
posting object.
When a new LOOP or record arrives, the controlling object puts it in the queue,
to be received by the posting object. The controlling object can tell the
posting object to terminate by putting a 'None' in the queue.
The posting object should inherit from class RESTThread. It monitors the queue
and blocks until a new record arrives.
The base class RESTThread has a lot of functionality, so specializing classes
should only have to implement a few functions. In particular,
- format_url(self, record). This function takes a record dictionary as an
argument. It is responsible for formatting it as an appropriate URL.
For example, the station registry's version emits strings such as
http://weewx.com/register/register.cgi?weewx_info=2.6.0a5&python_info= ...
- skip_this_post(self, time_ts). If this function returns True, then the
post will be skipped. Otherwise, it is done. The default version does two
checks. First, it sees how old the record is. If it is older than the value
'stale', then the post is skipped. Second, it will not allow posts more
often than 'post_interval'. Both of these can be set in the constructor of
RESTThread.
- post_request(self, request). This function takes a urllib2.Request object
and is responsible for performing the HTTP GET or POST. The default version
simply uses urllib2.urlopen(request) and returns the result. If the post
could raise an unusual exception, override this function and catch the
exception. See the WOWThread implementation for an example.
- check_response(). After an HTTP request gets posted, the webserver sends
back a "response." This response may contain clues as to whether the post
worked. By overriding check_response() you can look for these clues. For
example, the station registry checks all lines in the response, looking for
any that start with the string "FAIL". If it finds one, it raises a
FailedPost exception, signaling that the post did not work.
In unusual cases, you might also have to implement the following:
- process_record(). The default version is for HTTP GET posts, but if you wish
to do a POST or use a socket, you may need to provide a specialized version.
See the CWOP version, CWOPThread.process_record(), for an example that
uses sockets.
"""
from __future__ import with_statement
import Queue
import datetime
import hashlib
import httplib
import platform
import re
import socket
import sys
import syslog
import threading
import time
import urllib
import urllib2
import weedb
import weeutil.weeutil
import weewx.engine
from weeutil.weeutil import to_int, to_float, to_bool, timestamp_to_string, search_up, \
accumulateLeaves, to_sorted_string
import weewx.manager
import weewx.units
class FailedPost(IOError):
"""Raised when a post fails after trying the max number of allowed times"""
class AbortedPost(StandardError):
"""Raised when a post is aborted by the client."""
class BadLogin(StandardError):
"""Raised when login information is bad or missing."""
class ConnectError(IOError):
"""Raised when unable to get a socket connection."""
class SendError(IOError):
"""Raised when unable to send through a socket."""
# ==============================================================================
# Abstract base classes
# ==============================================================================
class StdRESTful(weewx.engine.StdService):
"""Abstract base class for RESTful weewx services.
Offers a few common bits of functionality."""
def shutDown(self):
"""Shut down any threads"""
if hasattr(self, 'loop_queue') and hasattr(self, 'loop_thread'):
StdRESTful.shutDown_thread(self.loop_queue, self.loop_thread)
if hasattr(self, 'archive_queue') and hasattr(self, 'archive_thread'):
StdRESTful.shutDown_thread(self.archive_queue, self.archive_thread)
@staticmethod
def shutDown_thread(q, t):
"""Function to shut down a thread."""
if q and t.isAlive():
# Put a None in the queue to signal the thread to shutdown
q.put(None)
# Wait up to 20 seconds for the thread to exit:
t.join(20.0)
if t.isAlive():
syslog.syslog(syslog.LOG_ERR,
"restx: Unable to shut down %s thread" % t.name)
else:
syslog.syslog(syslog.LOG_DEBUG,
"restx: Shut down %s thread." % t.name)
# For backwards compatibility with early v2.6 alphas. In particular, the WeatherCloud uploader depends on it.
StdRESTbase = StdRESTful
class RESTThread(threading.Thread):
"""Abstract base class for RESTful protocol threads.
Offers a few bits of common functionality."""
def __init__(self, queue, protocol_name,
essentials={},
manager_dict=None,
post_interval=None, max_backlog=sys.maxint, stale=None,
log_success=True, log_failure=True,
timeout=10, max_tries=3, retry_wait=5, retry_login=3600,
softwaretype="weewx-%s" % weewx.__version__,
skip_upload=False):
"""Initializer for the class RESTThread
Required parameters:
queue: An instance of Queue.Queue where the records will appear.
protocol_name: A string holding the name of the protocol.
Optional parameters:
essentials: A dictionary that holds observation types that must
not be None for the post to go ahead.
manager_dict: A manager dictionary, to be used to open up a
database manager. Default is None.
post_interval: How long to wait between posts.
Default is None (post every record).
max_backlog: How many records are allowed to accumulate in the queue
before the queue is trimmed.
Default is sys.maxint (essentially, allow any number).
stale: How old a record can be and still considered useful.
Default is None (never becomes too old).
log_success: If True, log a successful post in the system log.
Default is True.
log_failure: If True, log an unsuccessful post in the system log.
Default is True.
timeout: How long to wait for the server to respond before giving up.
Default is 10 seconds.
max_tries: How many times to try the post before giving up.
Default is 3
retry_wait: How long to wait between retries when failures.
Default is 5 seconds.
retry_login: How long to wait before retrying a login. Default
is 3600 seconds (one hour).
softwaretype: Sent as field "softwaretype in the Ambient post.
Default is "weewx-x.y.z where x.y.z is the weewx version.
skip_upload: Do all record processing, but do not upload the result.
Useful for diagnostic purposes when local debugging should not
interfere with the downstream data service. Default is False.
"""
# Initialize my superclass:
threading.Thread.__init__(self, name=protocol_name)
self.setDaemon(True)
self.queue = queue
self.protocol_name = protocol_name
self.essentials = essentials
self.manager_dict = manager_dict
self.log_success = to_bool(log_success)
self.log_failure = to_bool(log_failure)
self.max_backlog = to_int(max_backlog)
self.max_tries = to_int(max_tries)
self.stale = to_int(stale)
self.post_interval = to_int(post_interval)
self.timeout = to_int(timeout)
self.retry_wait = to_int(retry_wait)
self.retry_login = to_int(retry_login)
self.softwaretype = softwaretype
self.lastpost = 0
self.skip_upload = to_bool(skip_upload)
def get_record(self, record, dbmanager):
"""Augment record data with additional data from the archive.
Should return results in the same units as the record and the database.
This is a general version that works for:
- WeatherUnderground
- PWSweather
- WOW
- CWOP
It can be overridden and specialized for additional protocols.
returns: A dictionary of weather values"""
# this will not work without a dbmanager
if dbmanager is None:
return record
_time_ts = record['dateTime']
_sod_ts = weeutil.weeutil.startOfDay(_time_ts)
# Make a copy of the record, then start adding to it:
_datadict = dict(record)
# If the type 'rain' does not appear in the archive schema,
# or the database is locked, an exception will be raised. Be prepared
# to catch it.
try:
if 'hourRain' not in _datadict:
# CWOP says rain should be "rain that fell in the past hour".
# WU says it should be "the accumulated rainfall in the past
# 60 min". Presumably, this is exclusive of the archive record
# 60 minutes before, so the SQL statement is exclusive on the
# left, inclusive on the right.
_result = dbmanager.getSql(
"SELECT SUM(rain), MIN(usUnits), MAX(usUnits) FROM %s "
"WHERE dateTime>? AND dateTime<=?" %
dbmanager.table_name, (_time_ts - 3600.0, _time_ts))
if _result is not None and _result[0] is not None:
if not _result[1] == _result[2] == record['usUnits']:
raise ValueError("Inconsistent units (%s vs %s vs %s) when querying for hourRain" %
(_result[1], _result[2], record['usUnits']))
_datadict['hourRain'] = _result[0]
else:
_datadict['hourRain'] = None
if 'rain24' not in _datadict:
# Similar issue, except for last 24 hours:
_result = dbmanager.getSql(
"SELECT SUM(rain), MIN(usUnits), MAX(usUnits) FROM %s "
"WHERE dateTime>? AND dateTime<=?" %
dbmanager.table_name, (_time_ts - 24 * 3600.0, _time_ts))
if _result is not None and _result[0] is not None:
if not _result[1] == _result[2] == record['usUnits']:
raise ValueError("Inconsistent units (%s vs %s vs %s) when querying for rain24" %
(_result[1], _result[2], record['usUnits']))
_datadict['rain24'] = _result[0]
else:
_datadict['rain24'] = None
if 'dayRain' not in _datadict:
# NB: The WU considers the archive with time stamp 00:00
# (midnight) as (wrongly) belonging to the current day
# (instead of the previous day). But, it's their site,
# so we'll do it their way. That means the SELECT statement
# is inclusive on both time ends:
_result = dbmanager.getSql(
"SELECT SUM(rain), MIN(usUnits), MAX(usUnits) FROM %s "
"WHERE dateTime>=? AND dateTime<=?" %
dbmanager.table_name, (_sod_ts, _time_ts))
if _result is not None and _result[0] is not None:
if not _result[1] == _result[2] == record['usUnits']:
raise ValueError("Inconsistent units (%s vs %s vs %s) when querying for dayRain" %
(_result[1], _result[2], record['usUnits']))
_datadict['dayRain'] = _result[0]
else:
_datadict['dayRain'] = None
except weedb.OperationalError as e:
syslog.syslog(syslog.LOG_DEBUG,
"restx: %s: Database OperationalError '%s'" %
(self.protocol_name, e))
return _datadict
def run(self):
"""If there is a database specified, open the database, then call
run_loop() with the database. If no database is specified, simply
call run_loop()."""
# Open up the archive. Use a 'with' statement. This will automatically
# close the archive in the case of an exception:
if self.manager_dict is not None:
with weewx.manager.open_manager(self.manager_dict) as _manager:
self.run_loop(_manager)
else:
self.run_loop()
def run_loop(self, dbmanager=None):
"""Runs a continuous loop, waiting for records to appear in the queue,
then processing them.
"""
while True:
while True:
# This will block until something appears in the queue:
_record = self.queue.get()
# A None record is our signal to exit:
if _record is None:
return
# If packets have backed up in the queue, trim it until it's
# no bigger than the max allowed backlog:
if self.queue.qsize() <= self.max_backlog:
break
if self.skip_this_post(_record['dateTime']):
continue
try:
# Process the record, using whatever method the specializing
# class provides
self.process_record(_record, dbmanager)
except AbortedPost as e:
if self.log_success:
_time_str = timestamp_to_string(_record['dateTime'])
syslog.syslog(syslog.LOG_INFO,
"restx: %s: Skipped record %s: %s" %
(self.protocol_name, _time_str, e))
except BadLogin:
syslog.syslog(syslog.LOG_ERR, "restx: %s: Bad login; "
"waiting %s minutes then retrying" %
(self.protocol_name, self.retry_login / 60.0))
time.sleep(self.retry_login)
except FailedPost as e:
if self.log_failure:
_time_str = timestamp_to_string(_record['dateTime'])
syslog.syslog(syslog.LOG_ERR,
"restx: %s: Failed to publish record %s: %s"
% (self.protocol_name, _time_str, e))
except Exception as e:
# Some unknown exception occurred. This is probably a serious
# problem. Exit.
syslog.syslog(syslog.LOG_CRIT,
"restx: %s: Unexpected exception of type %s" %
(self.protocol_name, type(e)))
weeutil.weeutil.log_traceback('*** ', syslog.LOG_DEBUG)
syslog.syslog(syslog.LOG_CRIT,
"restx: %s: Thread exiting. Reason: %s" %
(self.protocol_name, e))
return
else:
if self.log_success:
_time_str = timestamp_to_string(_record['dateTime'])
syslog.syslog(syslog.LOG_INFO,
"restx: %s: Published record %s" %
(self.protocol_name, _time_str))
def process_record(self, record, dbmanager):
"""Default version of process_record.
This version uses HTTP GETs to do the post, which should work for many
protocols, but it can always be replaced by a specializing class."""
# Get the full record by querying the database ...
_full_record = self.get_record(record, dbmanager)
# ... check it ...
self.check_this_record(_full_record)
# ... format the URL, using the relevant protocol ...
_url = self.format_url(_full_record)
# ... get the Request to go with it...
_request = self.get_request(_url)
# ... get any POST payload...
_payload = self.get_post_body(_full_record)
# ... add a proper Content-Type if needed...
if _payload:
_request.add_header('Content-Type', _payload[1])
data = _payload[0]
else:
data = None
# ... check to see if this is just a drill...
if self.skip_upload:
raise AbortedPost("Skip post")
# ... then, finally, post it
self.post_with_retries(_request, data)
def get_request(self, url):
"""Get a request object. This can be overridden to add any special headers."""
_request = urllib2.Request(url)
_request.add_header("User-Agent", "weewx/%s" % weewx.__version__)
return _request
def post_with_retries(self, request, data=None):
"""Post a request, retrying if necessary
Attempts to post the request object up to max_tries times.
Catches a set of generic exceptions.
request: An instance of urllib2.Request
data: The body of the POST. If not given, the request will be done as a GET.
"""
# Retry up to max_tries times:
for _count in range(self.max_tries):
try:
# Do a single post. The function post_request() can be
# specialized by a RESTful service to catch any unusual
# exceptions.
_response = self.post_request(request, data)
if 200 <= _response.code <= 299:
# No exception thrown and we got a good response code, but
# we're still not done. Some protocols encode a bad
# station ID or password in the return message.
# Give any interested protocols a chance to examine it.
# This must also be inside the try block because some
# implementations defer hitting the socket until the
# response is used.
self.check_response(_response)
# Does not seem to be an error. We're done.
return
# We got a bad response code. By default, log it and try again.
# Provide method for derived classes to behave otherwise if
# necessary.
self.handle_code(_response.code, _count + 1)
except (urllib2.URLError, socket.error, httplib.HTTPException) as e:
# An exception was thrown. By default, log it and try again.
# Provide method for derived classes to behave otherwise if
# necessary.
self.handle_exception(e, _count + 1)
time.sleep(self.retry_wait)
else:
# This is executed only if the loop terminates normally, meaning
# the upload failed max_tries times. Raise an exception. Caller
# can decide what to do with it.
raise FailedPost("Failed upload after %d tries" % (self.max_tries,))
def check_this_record(self, record):
"""Raises exception AbortedPost if the record should not be posted.
Otherwise, does nothing"""
for obs_type in self.essentials:
if self.essentials[obs_type] and record.get(obs_type) is None:
raise AbortedPost("Observation type %s missing" % obs_type)
def check_response(self, response):
"""Check the response from a HTTP post. This version does nothing."""
pass
def handle_code(self, code, count):
"""Check code from HTTP post. This simply logs the response."""
syslog.syslog(syslog.LOG_DEBUG,
"restx: %s: Failed upload attempt %d: Code %s" %
(self.protocol_name, count, code))
def handle_exception(self, e, count):
"""Check exception from HTTP post. This simply logs the exception."""
syslog.syslog(syslog.LOG_DEBUG,
"restx: %s: Failed upload attempt %d: %s" %
(self.protocol_name, count, e))
def post_request(self, request, data=None):
"""Post a request object. This version does not catch any HTTP
exceptions.
Specializing versions can can catch any unusual exceptions that might
get raised by their protocol.
request: An instance of urllib2.Request
data: If given, the request will be done as a POST. Otherwise,
as a GET. [optional]
"""
try:
# Python 2.5 and earlier do not have a "timeout" parameter.
# Including one could cause a TypeError exception. Be prepared
# to catch it.
_response = urllib2.urlopen(request, data=data, timeout=self.timeout)
except TypeError:
# Must be Python 2.5 or early. Use a simple, unadorned request
_response = urllib2.urlopen(request, data=data)
return _response
def skip_this_post(self, time_ts):
"""Check whether the post is current"""
# Don't post if this record is too old
if self.stale is not None:
_how_old = time.time() - time_ts
if _how_old > self.stale:
syslog.syslog(
syslog.LOG_DEBUG,
"restx: %s: record %s is stale (%d > %d)." %
(self.protocol_name, timestamp_to_string(time_ts),
_how_old, self.stale))
return True
if self.post_interval is not None:
# We don't want to post more often than the post interval
_how_long = time_ts - self.lastpost
if _how_long < self.post_interval:
syslog.syslog(
syslog.LOG_DEBUG,
"restx: %s: wait interval (%d < %d) has not passed for record %s" %
(self.protocol_name, _how_long, self.post_interval,
timestamp_to_string(time_ts)))
return True
self.lastpost = time_ts
return False
def get_post_body(self, record): # @UnusedVariable
"""Return any POST payload.
The returned value should be a 2-way tuple. First element is the Python
object to be included as the payload. Second element is the MIME type it
is in (such as "application/json").
Return a simple 'None' if there is no POST payload. This is the default.
"""
# Maintain backwards compatibility with the old format_data() function.
body = self.format_data(record)
if body:
return (body, 'application/x-www-form-urlencoded')
return None
def format_data(self, record): # @UnusedVariable
"""Return a POST payload as an urlencoded object.
DEPRECATED. Use get_post_body() instead.
"""
return None
# ==============================================================================
# Ambient protocols
# ==============================================================================
class StdWunderground(StdRESTful):
"""Specialized version of the Ambient protocol for the Weather Underground.
"""
# the rapidfire URL:
rf_url = "https://rtupdate.wunderground.com/weatherstation/updateweatherstation.php"
# the personal weather station URL:
pws_url = "https://weatherstation.wunderground.com/weatherstation/updateweatherstation.php"
def __init__(self, engine, config_dict):
super(StdWunderground, self).__init__(engine, config_dict)
_ambient_dict = get_site_dict(
config_dict, 'Wunderground', 'station', 'password')
if _ambient_dict is None:
return
_essentials_dict = search_up(config_dict['StdRESTful']['Wunderground'], 'Essentials', {})
syslog.syslog(syslog.LOG_DEBUG, "restx: WU essentials: %s" % _essentials_dict)
# Get the manager dictionary:
_manager_dict = weewx.manager.get_manager_dict_from_config(
config_dict, 'wx_binding')
# The default is to not do an archive post if a rapidfire post
# has been specified, but this can be overridden
do_rapidfire_post = to_bool(_ambient_dict.pop('rapidfire', False))
do_archive_post = to_bool(_ambient_dict.pop('archive_post',
not do_rapidfire_post))
if do_archive_post:
_ambient_dict.setdefault('server_url', StdWunderground.pws_url)
self.archive_queue = Queue.Queue()
self.archive_thread = AmbientThread(
self.archive_queue,
_manager_dict,
protocol_name="Wunderground-PWS",
essentials=_essentials_dict,
**_ambient_dict)
self.archive_thread.start()
self.bind(weewx.NEW_ARCHIVE_RECORD, self.new_archive_record)
syslog.syslog(syslog.LOG_INFO, "restx: Wunderground-PWS: "
"Data for station %s will be posted" %
_ambient_dict['station'])
if do_rapidfire_post:
_ambient_dict.setdefault('server_url', StdWunderground.rf_url)
_ambient_dict.setdefault('log_success', False)
_ambient_dict.setdefault('log_failure', False)
_ambient_dict.setdefault('max_backlog', 0)
_ambient_dict.setdefault('max_tries', 1)
_ambient_dict.setdefault('rtfreq', 2.5)
self.cached_values = CachedValues()
self.loop_queue = Queue.Queue()
self.loop_thread = AmbientLoopThread(
self.loop_queue,
_manager_dict,
protocol_name="Wunderground-RF",
essentials=_essentials_dict,
**_ambient_dict)
self.loop_thread.start()
self.bind(weewx.NEW_LOOP_PACKET, self.new_loop_packet)
syslog.syslog(syslog.LOG_INFO, "restx: Wunderground-RF: "
"Data for station %s will be posted" %
_ambient_dict['station'])
def new_loop_packet(self, event):
"""Puts new LOOP packets in the loop queue"""
if weewx.debug >= 3:
syslog.syslog(syslog.LOG_DEBUG, "restx: raw packet: %s" % to_sorted_string(event.packet))
self.cached_values.update(event.packet, event.packet['dateTime'])
if weewx.debug >= 3:
syslog.syslog(syslog.LOG_DEBUG, "restx: cached packet: %s" %
to_sorted_string(self.cached_values.get_packet(event.packet['dateTime'])))
self.loop_queue.put(
self.cached_values.get_packet(event.packet['dateTime']))
def new_archive_record(self, event):
"""Puts new archive records in the archive queue"""
self.archive_queue.put(event.record)
class CachedValues(object):
"""Dictionary of value-timestamp pairs. Each timestamp indicates when the
corresponding value was last updated."""
def __init__(self):
self.unit_system = None
self.values = dict()
def update(self, packet, ts):
# update the cache with values from the specified packet, using the
# specified timestamp.
for k in packet:
if k is None:
# well-formed packets do not have None as key, but just in case
continue
elif k == 'dateTime':
# do not cache the timestamp
continue
elif k == 'usUnits':
# assume unit system of first packet, then enforce consistency
if self.unit_system is None:
self.unit_system = packet['usUnits']
elif packet['usUnits'] != self.unit_system:
raise ValueError("Mixed units encountered in cache. %s vs %s" % \
(self.unit_system, packet['usUnits']))
else:
# cache each value, associating it with the it was cached
self.values[k] = {'value': packet[k], 'ts': ts}
def get_value(self, k, ts, stale_age):
# get the value for the specified key. if the value is older than
# stale_age (seconds) then return None.
if k in self.values and ts - self.values[k]['ts'] < stale_age:
return self.values[k]['value']
return None
def get_packet(self, ts=None, stale_age=960):
if ts is None:
ts = int(time.time() + 0.5)
pkt = {'dateTime': ts, 'usUnits': self.unit_system}
for k in self.values:
pkt[k] = self.get_value(k, ts, stale_age)
return pkt
class StdPWSWeather(StdRESTful):
"""Specialized version of the Ambient protocol for PWSWeather"""
# The URL used by PWSWeather:
archive_url = "http://www.pwsweather.com/pwsupdate/pwsupdate.php"
def __init__(self, engine, config_dict):
super(StdPWSWeather, self).__init__(engine, config_dict)
_ambient_dict = get_site_dict(
config_dict, 'PWSweather', 'station', 'password')
if _ambient_dict is None:
return
# Get the manager dictionary:
_manager_dict = weewx.manager.get_manager_dict_from_config(
config_dict, 'wx_binding')
_ambient_dict.setdefault('server_url', StdPWSWeather.archive_url)
self.archive_queue = Queue.Queue()
self.archive_thread = AmbientThread(self.archive_queue, _manager_dict,
protocol_name="PWSWeather",
**_ambient_dict)
self.archive_thread.start()
self.bind(weewx.NEW_ARCHIVE_RECORD, self.new_archive_record)
syslog.syslog(syslog.LOG_INFO, "restx: PWSWeather: "
"Data for station %s will be posted" %
_ambient_dict['station'])
def new_archive_record(self, event):
self.archive_queue.put(event.record)
# For backwards compatibility with early alpha versions:
StdPWSweather = StdPWSWeather
class StdWOW(StdRESTful):
"""Upload using the UK Met Office's WOW protocol.
For details of the WOW upload protocol, see
http://wow.metoffice.gov.uk/support/dataformats#dataFileUpload
"""
# The URL used by WOW:
archive_url = "http://wow.metoffice.gov.uk/automaticreading"
def __init__(self, engine, config_dict):
super(StdWOW, self).__init__(engine, config_dict)
_ambient_dict = get_site_dict(
config_dict, 'WOW', 'station', 'password')
if _ambient_dict is None:
return
# Get the manager dictionary:
_manager_dict = weewx.manager.get_manager_dict_from_config(
config_dict, 'wx_binding')
_ambient_dict.setdefault('server_url', StdWOW.archive_url)
_ambient_dict.setdefault('post_interval', 900)
self.archive_queue = Queue.Queue()
self.archive_thread = WOWThread(self.archive_queue, _manager_dict,
protocol_name="WOW",
**_ambient_dict)
self.archive_thread.start()
self.bind(weewx.NEW_ARCHIVE_RECORD, self.new_archive_record)
syslog.syslog(syslog.LOG_INFO, "restx: WOW: "
"Data for station %s will be posted" %
_ambient_dict['station'])
def new_archive_record(self, event):
self.archive_queue.put(event.record)
class AmbientThread(RESTThread):
"""Concrete class for threads posting from the archive queue,
using the Ambient PWS protocol."""
def __init__(self,
queue,
manager_dict,
station, password, server_url,
post_indoor_observations=False,
protocol_name="Unknown-Ambient",
essentials={},
post_interval=None, max_backlog=sys.maxint, stale=None,
log_success=True, log_failure=True,
timeout=10, max_tries=3, retry_wait=5, retry_login=3600,
softwaretype="weewx-%s" % weewx.__version__,
skip_upload=False):
"""
Initializer for the AmbientThread class.
Parameters specific to this class:
station: The name of the station. For example, for the WU, this
would be something like "KORHOODR3".
password: Password used for the station.
server_url: An url where the server for this protocol can be found.
"""
super(AmbientThread, self).__init__(queue,
protocol_name=protocol_name,
essentials=essentials,
manager_dict=manager_dict,
post_interval=post_interval,
max_backlog=max_backlog,
stale=stale,
log_success=log_success,
log_failure=log_failure,
timeout=timeout,
max_tries=max_tries,
retry_wait=retry_wait,
retry_login=retry_login,
softwaretype=softwaretype,
skip_upload=skip_upload)
self.station = station
self.password = password
self.server_url = server_url
self.formats = dict(AmbientThread._FORMATS)
if to_bool(post_indoor_observations):
self.formats.update(AmbientThread._INDOOR_FORMATS)
# Types and formats of the data to be published:
_FORMATS = {'dateTime' : 'dateutc=%s',
'barometer' : 'baromin=%.3f',
'outTemp' : 'tempf=%.1f',
'outHumidity': 'humidity=%03.0f',
'windSpeed' : 'windspeedmph=%03.1f',
'windDir' : 'winddir=%03.0f',
'windGust' : 'windgustmph=%03.1f',
'dewpoint' : 'dewptf=%.1f',
'hourRain' : 'rainin=%.2f',
'dayRain' : 'dailyrainin=%.2f',
'radiation' : 'solarradiation=%.2f',
'UV' : 'UV=%.2f',
# The following four formats have been commented out until the WU
# fixes the bug that causes them to be displayed as soil moisture.
# 'extraTemp1' : "temp2f=%.1f",
# 'extraTemp2' : "temp3f=%.1f",
# 'extraTemp3' : "temp4f=%.1f",
# 'extraTemp4' : "temp5f=%.1f",
'soilTemp1' : "soiltempf=%.1f",
'soilTemp2' : "soiltemp2f=%.1f",
'soilTemp3' : "soiltemp3f=%.1f",
'soilTemp4' : "soiltemp4f=%.1f",
'soilMoist1' : "soilmoisture=%03.0f",
'soilMoist2' : "soilmoisture2=%03.0f",
'soilMoist3' : "soilmoisture3=%03.0f",
'soilMoist4' : "soilmoisture4=%03.0f",
'leafWet1' : "leafwetness=%03.0f",
'leafWet2' : "leafwetness2=%03.0f",
'realtime' : 'realtime=%d',
'rtfreq' : 'rtfreq=%.1f'}
_INDOOR_FORMATS = {
'inTemp' : 'indoortempf=%.1f',
'inHumidity': 'indoorhumidity=%.0f'}
def format_url(self, incoming_record):
"""Return an URL for posting using the Ambient protocol."""
record = weewx.units.to_US(incoming_record)
_liststr = ["action=updateraw",
"ID=%s" % self.station,
"PASSWORD=%s" % urllib.quote(self.password),
"softwaretype=%s" % self.softwaretype]
# Go through each of the supported types, formatting it, then adding
# to _liststr:
for _key in self.formats:
_v = record.get(_key)
# Check to make sure the type is not null
if _v is not None:
if _key == 'dateTime':
# For dates, convert from time stamp to a string, using
# what the Weather Underground calls "MySQL format." I've
# fiddled with formatting, and it seems that escaping the
# colons helps its reliability. But, I could be imagining
# things.
_v = urllib.quote(str(datetime.datetime.utcfromtimestamp(_v)))
# Format the value, and accumulate in _liststr:
_liststr.append(self.formats[_key] % _v)
# Now stick all the pieces together with an ampersand between them:
_urlquery = '&'.join(_liststr)
# This will be the complete URL for the HTTP GET:
_url = "%s?%s" % (self.server_url, _urlquery)
# show the url in the logs for debug, but mask any password
if weewx.debug >= 2:
syslog.syslog(syslog.LOG_DEBUG, "restx: Ambient: url: %s" %
re.sub(r"PASSWORD=[^\&]*", "PASSWORD=XXX", _url))
return _url
def check_response(self, response):
"""Check the HTTP response code for an Ambient related error."""
for line in response:
# PWSweather signals with 'ERROR', WU with 'INVALID':
if line.startswith('ERROR') or line.startswith('INVALID'):
# Bad login. No reason to retry. Raise an exception.
raise BadLogin(line)
class AmbientLoopThread(AmbientThread):
"""Version used for the Rapidfire protocol."""
def __init__(self, queue, manager_dict,
station, password, server_url,
protocol_name="Unknown-Ambient",
essentials={},
post_interval=None, max_backlog=sys.maxint, stale=None,
log_success=True, log_failure=True,
timeout=10, max_tries=3, retry_wait=5, rtfreq=2.5):
"""
Initializer for the AmbientLoopThread class.
Parameters specific to this class:
rtfreq: Frequency of update in seconds for RapidFire
"""
super(AmbientLoopThread, self).__init__(queue,
station=station,
password=password,
server_url=server_url,
protocol_name=protocol_name,
essentials=essentials,
manager_dict=manager_dict,
post_interval=post_interval,
max_backlog=max_backlog,
stale=stale,
log_success=log_success,
log_failure=log_failure,
timeout=timeout,
max_tries=max_tries,
retry_wait=retry_wait)
self.rtfreq = float(rtfreq)
self.formats.update(AmbientLoopThread.WUONLY_FORMATS)
# may also be used by non-rapidfire; this is the least invasive way to just fix rapidfire, which i know supports windGustDir, while the Ambient class is used elsewhere
WUONLY_FORMATS = {
'windGustDir' : 'windgustdir=%03.0f'}
def get_record(self, record, dbmanager):
"""Prepare a record for the Rapidfire protocol."""
# Call the regular Ambient PWS version
_record = AmbientThread.get_record(self, record, dbmanager)
# Add the Rapidfire-specific keywords:
_record['realtime'] = 1
_record['rtfreq'] = self.rtfreq
return _record
class WOWThread(AmbientThread):
"""Class for posting to the WOW variant of the Ambient protocol."""
# Types and formats of the data to be published:
_FORMATS = {'dateTime' : 'dateutc=%s',
'barometer' : 'baromin=%.3f',
'outTemp' : 'tempf=%.1f',
'outHumidity': 'humidity=%.0f',
'windSpeed' : 'windspeedmph=%.0f',
'windDir' : 'winddir=%.0f',
'windGust' : 'windgustmph=%.0f',
'windGustDir': 'windgustdir=%.0f',
'dewpoint' : 'dewptf=%.1f',
'hourRain' : 'rainin=%.2f',
'dayRain' : 'dailyrainin=%.2f'}
def format_url(self, incoming_record):
"""Return an URL for posting using WOW's version of the Ambient
protocol."""
record = weewx.units.to_US(incoming_record)
_liststr = ["action=updateraw",
"siteid=%s" % self.station,
"siteAuthenticationKey=%s" % self.password,
"softwaretype=weewx-%s" % weewx.__version__]
# Go through each of the supported types, formatting it, then adding
# to _liststr:
for _key in WOWThread._FORMATS:
_v = record.get(_key)
# Check to make sure the type is not null
if _v is not None:
if _key == 'dateTime':
_v = urllib.quote_plus(datetime.datetime.utcfromtimestamp(_v).isoformat(' '))
# Format the value, and accumulate in _liststr:
_liststr.append(WOWThread._FORMATS[_key] % _v)
# Now stick all the pieces together with an ampersand between them:
_urlquery = '&'.join(_liststr)
# This will be the complete URL for the HTTP GET:
_url = "%s?%s" % (self.server_url, _urlquery)
# show the url in the logs for debug, but mask any password
if weewx.debug >= 2:
syslog.syslog(syslog.LOG_DEBUG, "restx: WOW: url: %s" %
re.sub(r"siteAuthenticationKey=[^\&]*",
"siteAuthenticationKey=XXX", _url))
return _url
def post_request(self, request, data=None): # @UnusedVariable
"""Version of post_request() for the WOW protocol, which
uses a response error code to signal a bad login."""
try:
try:
_response = urllib2.urlopen(request, timeout=self.timeout)
except TypeError:
_response = urllib2.urlopen(request)
except urllib2.HTTPError as e:
# WOW signals a bad login with a HTML Error 400 or 403 code:
if e.code == 400 or e.code == 403:
raise BadLogin(e)
else:
raise
else:
return _response
# ==============================================================================
# CWOP
# ==============================================================================
class StdCWOP(StdRESTful):
"""Weewx service for posting using the CWOP protocol.
Manages a separate thread CWOPThread"""
# A regular expression that matches CWOP stations that
# don't need a passcode. This will match CW1234, etc.
valid_prefix_re = re.compile('[C-Z]W+[0-9]+')
# Default list of CWOP servers to try:
default_servers = ['cwop.aprs.net:14580', 'cwop.aprs.net:23']
def __init__(self, engine, config_dict):
super(StdCWOP, self).__init__(engine, config_dict)
_cwop_dict = get_site_dict(config_dict, 'CWOP', 'station')
if _cwop_dict is None:
return
_cwop_dict['station'] = _cwop_dict['station'].upper()
# See if this station requires a passcode:
if re.match(StdCWOP.valid_prefix_re, _cwop_dict['station']):
# It does not.
_cwop_dict.setdefault('passcode', '-1')
elif 'passcode' not in _cwop_dict:
syslog.syslog(syslog.LOG_NOTICE,
"APRS station %s requires passcode" %
_cwop_dict['station'])
return
# Get the database manager dictionary:
_manager_dict = weewx.manager.get_manager_dict_from_config(
config_dict, 'wx_binding')
_cwop_dict.setdefault('latitude', self.engine.stn_info.latitude_f)
_cwop_dict.setdefault('longitude', self.engine.stn_info.longitude_f)
_cwop_dict.setdefault('station_type', config_dict['Station'].get(
'station_type', 'Unknown'))
self.archive_queue = Queue.Queue()
self.archive_thread = CWOPThread(self.archive_queue, _manager_dict,
**_cwop_dict)
self.archive_thread.start()
self.bind(weewx.NEW_ARCHIVE_RECORD, self.new_archive_record)
syslog.syslog(syslog.LOG_INFO, "restx: CWOP: "
"Data for station %s will be posted" %
_cwop_dict['station'])
def new_archive_record(self, event):
self.archive_queue.put(event.record)
class CWOPThread(RESTThread):
"""Concrete class for threads posting from the archive queue,
using the CWOP protocol."""
def __init__(self, queue, manager_dict,
station, passcode, latitude, longitude, station_type,
server_list=StdCWOP.default_servers,
post_interval=600, max_backlog=sys.maxint, stale=600,
log_success=True, log_failure=True,
timeout=10, max_tries=3, retry_wait=5, skip_upload=False):
"""
Initializer for the CWOPThread class.
Parameters specific to this class:
station: The name of the station. Something like "DW1234".
passcode: Some stations require a passcode.
latitude: Latitude of the station in decimal degrees.
longitude: Longitude of the station in decimal degrees.
station_type: The type of station. Generally, this is the driver
symbolic name, such as "Vantage".
server_list: A list of strings holding the CWOP server name and
port. Default is ['cwop.aprs.net:14580', 'cwop.aprs.net:23']
Parameters customized for this class:
post_interval: How long to wait between posts.
Default is 600 (every 10 minutes).
stale: How old a record can be and still considered useful.
Default is 60 (one minute).
"""
# Initialize my superclass
super(CWOPThread, self).__init__(queue,
protocol_name="CWOP",
manager_dict=manager_dict,
post_interval=post_interval,
max_backlog=max_backlog,
stale=stale,
log_success=log_success,
log_failure=log_failure,
timeout=timeout,
max_tries=max_tries,
retry_wait=retry_wait,
skip_upload=skip_upload)
self.station = station
self.passcode = passcode
self.server_list = server_list
self.latitude = to_float(latitude)
self.longitude = to_float(longitude)
self.station_type = station_type
def process_record(self, record, dbmanager):
"""Process a record in accordance with the CWOP protocol."""
# Get the full record by querying the database ...
_full_record = self.get_record(record, dbmanager)
# ... convert to US if necessary ...
_us_record = weewx.units.to_US(_full_record)
# ... get the login and packet strings...
_login = self.get_login_string()
_tnc_packet = self.get_tnc_packet(_us_record)
if self.skip_upload:
raise AbortedPost("Skip post")
# ... then post them:
self.send_packet(_login, _tnc_packet)
def get_login_string(self):
_login = "user %s pass %s vers weewx %s\r\n" % (
self.station, self.passcode, weewx.__version__)
return _login
def get_tnc_packet(self, record):
"""Form the TNC2 packet used by CWOP."""
# Preamble to the TNC packet:
_prefix = "%s>APRS,TCPIP*:" % (self.station,)
# Time:
_time_tt = time.gmtime(record['dateTime'])
_time_str = time.strftime("@%d%H%Mz", _time_tt)
# Position:
_lat_str = weeutil.weeutil.latlon_string(self.latitude,
('N', 'S'), 'lat')
_lon_str = weeutil.weeutil.latlon_string(self.longitude,
('E', 'W'), 'lon')
_latlon_str = '%s%s%s/%s%s%s' % (_lat_str + _lon_str)
# Wind and temperature
_wt_list = []
for _obs_type in ['windDir', 'windSpeed', 'windGust', 'outTemp']:
_v = record.get(_obs_type)
_wt_list.append("%03d" % int(_v + 0.5) if _v is not None else '...')
_wt_str = "_%s/%sg%st%s" % tuple(_wt_list)
# Rain
_rain_list = []
for _obs_type in ['hourRain', 'rain24', 'dayRain']:
_v = record.get(_obs_type)
_rain_list.append("%03d" % int(_v * 100.0 + 0.5) if _v is not None else '...')
_rain_str = "r%sp%sP%s" % tuple(_rain_list)
# Barometer:
_baro = record.get('altimeter')
if _baro is None:
_baro_str = "b....."
else:
# While everything else in the CWOP protocol is in US Customary,
# they want the barometer in millibars.
_baro_vt = weewx.units.convert((_baro, 'inHg', 'group_pressure'),
'mbar')
_baro_str = "b%05d" % int(_baro_vt[0] * 10.0 + 0.5)
# Humidity:
_humidity = record.get('outHumidity')
if _humidity is None:
_humid_str = "h.."
else:
_humid_str = ("h%02d" % int(_humidity + 0.5) ) if _humidity < 99.5 else "h00"
# Radiation:
_radiation = record.get('radiation')
if _radiation is None:
_radiation_str = ""
elif _radiation < 999.5:
_radiation_str = "L%03d" % int(_radiation + 0.5)
elif _radiation < 1999.5:
_radiation_str = "l%03d" % int(_radiation - 1000 + 0.5)
else:
_radiation_str = ""
# Station equipment
_equipment_str = ".weewx-%s-%s" % (weewx.__version__, self.station_type)
_tnc_packet = ''.join([_prefix, _time_str, _latlon_str, _wt_str,
_rain_str, _baro_str, _humid_str,
_radiation_str, _equipment_str, "\r\n"])
# show the packet in the logs for debug
if weewx.debug >= 2:
syslog.syslog(syslog.LOG_DEBUG, 'restx: CWOP: packet: %s' %
_tnc_packet)
return _tnc_packet
def send_packet(self, login, tnc_packet):
# Go through the list of known server:ports, looking for
# a connection that works:
for _serv_addr_str in self.server_list:
try:
_server, _port_str = _serv_addr_str.split(":")
_port = int(_port_str)
except ValueError:
syslog.syslog(syslog.LOG_ALERT,
"restx: %s: Bad server address: '%s'; ignored" %
(self.protocol_name, _serv_addr_str))
continue
# Try each combination up to max_tries times:
for _count in range(self.max_tries):
try:
# Get a socket connection:
_sock = self._get_connect(_server, _port)
syslog.syslog(syslog.LOG_DEBUG,
"restx: %s: Connected to server %s:%d" %
(self.protocol_name, _server, _port))
try:
# Send the login ...
self._send(_sock, login, 'login')
# ... and then the packet
self._send(_sock, tnc_packet, 'packet')
return
finally:
_sock.close()
except ConnectError as e:
syslog.syslog(
syslog.LOG_DEBUG,
"restx: %s: Attempt %d to %s:%d. Connection error: %s"
% (self.protocol_name, _count + 1, _server, _port, e))
except SendError as e:
syslog.syslog(
syslog.LOG_DEBUG,
"restx: %s: Attempt %d to %s:%d. Socket send error: %s"
% (self.protocol_name, _count + 1, _server, _port, e))
# If we get here, the loop terminated normally, meaning we failed
# all tries
raise FailedPost("Tried %d servers %d times each" %
(len(self.server_list), self.max_tries))
def _get_connect(self, server, port):
"""Get a socket connection to a specific server and port."""
_sock = None
try:
_sock = socket.socket()
_sock.connect((server, port))
except IOError as e:
# Unsuccessful. Close it in case it was open:
try:
_sock.close()
except (AttributeError, socket.error):
pass
raise ConnectError(e)
return _sock
def _send(self, sock, msg, dbg_msg):
"""Send a message to a specific socket."""
try:
sock.send(msg)
except IOError as e:
# Unsuccessful. Log it and go around again for another try
raise SendError("Packet %s; Error %s" % (dbg_msg, e))
else:
# Success. Look for response from the server.
try:
_resp = sock.recv(1024)
return _resp
except IOError as e:
syslog.syslog(
syslog.LOG_DEBUG,
"restx: %s: Exception %s (%s) when looking for response to %s packet" %
(self.protocol_name, type(e), e, dbg_msg))
return
# ==============================================================================
# Station Registry
# ==============================================================================
class StdStationRegistry(StdRESTful):
"""Class for phoning home to register a weewx station.
To enable this module, add the following to weewx.conf:
[StdRESTful]
[[StationRegistry]]
register_this_station = True
This will periodically do a http GET with the following information:
station_url Should be world-accessible. Used as key.
description Brief synopsis of the station
latitude Station latitude in decimal
longitude Station longitude in decimal
station_type The driver name, for example Vantage, FineOffsetUSB
station_model The hardware_name property from the driver
weewx_info weewx version
python_info
platform_info
The station_url is the unique key by which a station is identified.
"""
archive_url = 'http://weewx.com/register/register.cgi'
def __init__(self, engine, config_dict):
super(StdStationRegistry, self).__init__(engine, config_dict)
# Extract a copy of the dictionary with the registry options:
_registry_dict = accumulateLeaves(config_dict['StdRESTful']['StationRegistry'], max_level=1)
# Should the service be run?
if not to_bool(_registry_dict.pop('register_this_station', False)):
syslog.syslog(syslog.LOG_INFO, "restx: StationRegistry: "
"Registration not requested.")
return
# Registry requires a valid station url
_registry_dict.setdefault('station_url',
self.engine.stn_info.station_url)
if _registry_dict['station_url'] is None:
syslog.syslog(syslog.LOG_INFO, "restx: StationRegistry: "
"Station will not be registered: no station_url specified.")
return
_registry_dict.setdefault('station_type', config_dict['Station'].get('station_type', 'Unknown'))
_registry_dict.setdefault('description', self.engine.stn_info.location)
_registry_dict.setdefault('latitude', self.engine.stn_info.latitude_f)
_registry_dict.setdefault('longitude', self.engine.stn_info.longitude_f)
_registry_dict.setdefault('station_model', self.engine.stn_info.hardware)
self.archive_queue = Queue.Queue()
self.archive_thread = StationRegistryThread(self.archive_queue,
**_registry_dict)
self.archive_thread.start()
self.bind(weewx.NEW_ARCHIVE_RECORD, self.new_archive_record)
syslog.syslog(syslog.LOG_INFO, "restx: StationRegistry: "
"Station will be registered.")
def new_archive_record(self, event):
self.archive_queue.put(event.record)
class StationRegistryThread(RESTThread):
"""Concrete threaded class for posting to the weewx station registry."""
def __init__(self, queue, station_url, latitude, longitude,
server_url=StdStationRegistry.archive_url,
description="Unknown",
station_type="Unknown", station_model="Unknown",
post_interval=604800, max_backlog=0, stale=None,
log_success=True, log_failure=True,
timeout=60, max_tries=3, retry_wait=5):
"""Initialize an instance of StationRegistryThread.
Parameters specific to this class:
station_url: An URL used to identify the station. This will be
used as the unique key in the registry to identify each station.
latitude: Latitude of the staion
longitude: Longitude of the station
server_url: The URL of the registry server.
Default is 'http://weewx.com/register/register.cgi'
description: A brief description of the station.
Default is 'Unknown'
station_type: The type of station. Generally, this is the name of
the driver used by the station.
Default is 'Unknown'
station_model: The hardware model, typically the hardware_name
property provided by the driver.
Default is 'Unknown'.
Parameters customized for this class:
post_interval: How long to wait between posts.
Default is 604800 seconds (1 week).
"""
super(StationRegistryThread, self).__init__(
queue,
protocol_name='StationRegistry',
post_interval=post_interval,
max_backlog=max_backlog,
stale=stale,
log_success=log_success,
log_failure=log_failure,
timeout=timeout,
max_tries=max_tries,
retry_wait=retry_wait)
self.station_url = station_url
self.latitude = to_float(latitude)
self.longitude = to_float(longitude)
self.server_url = server_url
self.description = weeutil.weeutil.list_as_string(description)
self.station_type = station_type
self.station_model = station_model
def get_record(self, dummy_record, dummy_archive):
_record = dict()
_record['station_url'] = self.station_url
_record['description'] = self.description
_record['latitude'] = self.latitude
_record['longitude'] = self.longitude
_record['station_type'] = self.station_type
_record['station_model'] = self.station_model
_record['python_info'] = platform.python_version()
_record['platform_info'] = platform.platform()
_record['weewx_info'] = weewx.__version__
_record['usUnits'] = weewx.US
return _record
_FORMATS = {'station_url' : 'station_url=%s',
'description' : 'description=%s',
'latitude' : 'latitude=%.4f',
'longitude' : 'longitude=%.4f',
'station_type' : 'station_type=%s',
'station_model': 'station_model=%s',
'python_info' : 'python_info=%s',
'platform_info': 'platform_info=%s',
'weewx_info' : 'weewx_info=%s'}
def format_url(self, record):
"""Return an URL for posting using the StationRegistry protocol."""
_liststr = []
for _key in StationRegistryThread._FORMATS:
v = record[_key]
if v is not None:
_liststr.append(urllib.quote_plus(
StationRegistryThread._FORMATS[_key] % v, '='))
_urlquery = '&'.join(_liststr)
_url = "%s?%s" % (self.server_url, _urlquery)
return _url
def check_response(self, response):
"""Check the response from a Station Registry post."""
for line in response:
# the server replies to a bad post with a line starting with "FAIL"
if line.startswith('FAIL'):
raise FailedPost(line)
# ==============================================================================
# AWEKAS
# ==============================================================================
class StdAWEKAS(StdRESTful):
"""Upload data to AWEKAS - Automatisches WEtterKArten System
http://www.awekas.at
To enable this module, add the following to weewx.conf:
[StdRESTful]
[[AWEKAS]]
enable = True
username = AWEKAS_USERNAME
password = AWEKAS_PASSWORD
The AWEKAS server expects a single string of values delimited by
semicolons. The position of each value matters, for example position 1
is the awekas username and position 2 is the awekas password.
Positions 1-25 are defined for the basic API:
Pos1: user (awekas username)
Pos2: password (awekas password MD5 Hash)
Pos3: date (dd.mm.yyyy) (varchar)
Pos4: time (hh:mm) (varchar)
Pos5: temperature (C) (float)
Pos6: humidity (%) (int)
Pos7: air pressure (hPa) (float) [22dec15. This should be SLP. -tk personal communications]
Pos8: precipitation (rain at this day) (float)
Pos9: wind speed (km/h) float)
Pos10: wind direction (degree) (int)
Pos11: weather condition (int)
0=clear warning
1=clear
2=sunny sky
3=partly cloudy
4=cloudy
5=heavy cloundy
6=overcast sky
7=fog
8=rain showers
9=heavy rain showers
10=light rain
11=rain
12=heavy rain
13=light snow
14=snow
15=light snow showers
16=snow showers
17=sleet
18=hail
19=thunderstorm
20=storm
21=freezing rain
22=warning
23=drizzle
24=heavy snow
25=heavy snow showers
Pos12: warning text (varchar)
Pos13: snow high (cm) (int) if no snow leave blank
Pos14: language (varchar)
de=german; en=english; it=italian; fr=french; nl=dutch
Pos15: tendency (int)
-2 = high falling
-1 = falling
0 = steady
1 = rising
2 = high rising
Pos16. wind gust (km/h) (float)
Pos17: solar radiation (W/m^2) (float)
Pos18: UV Index (float)
Pos19: brightness (LUX) (int)
Pos20: sunshine hours today (float)
Pos21: soil temperature (degree C) (float)
Pos22: rain rate (mm/h) (float)
Pos23: software flag NNNN_X.Y, for example, WLIP_2.15
Pos24: longitude (float)
Pos25: latitude (float)
positions 26-111 are defined for API2
"""
def __init__(self, engine, config_dict):
super(StdAWEKAS, self).__init__(engine, config_dict)
site_dict = get_site_dict(
config_dict, 'AWEKAS', 'username', 'password')
if site_dict is None:
return
site_dict.setdefault('latitude', engine.stn_info.latitude_f)
site_dict.setdefault('longitude', engine.stn_info.longitude_f)
site_dict.setdefault('language', 'de')
site_dict['manager_dict'] = weewx.manager.get_manager_dict_from_config(
config_dict, 'wx_binding')
self.archive_queue = Queue.Queue()
self.archive_thread = AWEKASThread(self.archive_queue, **site_dict)
self.archive_thread.start()
self.bind(weewx.NEW_ARCHIVE_RECORD, self.new_archive_record)
syslog.syslog(syslog.LOG_INFO, "restx: AWEKAS: "
"Data will be uploaded for user %s" %
site_dict['username'])
def new_archive_record(self, event):
self.archive_queue.put(event.record)
# For compatibility with some early alpha versions:
AWEKAS = StdAWEKAS
class AWEKASThread(RESTThread):
_SERVER_URL = 'http://data.awekas.at/eingabe_pruefung.php'
_FORMATS = {'barometer' : '%.3f',
'outTemp' : '%.1f',
'outHumidity': '%.0f',
'windSpeed' : '%.1f',
'windDir' : '%.0f',
'windGust' : '%.1f',
'dewpoint' : '%.1f',
'hourRain' : '%.2f',
'dayRain' : '%.2f',
'radiation' : '%.2f',
'UV' : '%.2f',
'rainRate' : '%.2f'}
def __init__(self, queue, username, password, latitude, longitude,
manager_dict,
language='de', server_url=_SERVER_URL,
post_interval=300, max_backlog=sys.maxint, stale=None,
log_success=True, log_failure=True,
timeout=60, max_tries=3, retry_wait=5, retry_login=3600, skip_upload=False):
"""Initialize an instances of AWEKASThread.
Parameters specific to this class:
username: AWEKAS user name
password: AWEKAS password
language: Possible values include de, en, it, fr, nl
Default is de
latitude: Station latitude in decimal degrees
Default is station latitude
longitude: Station longitude in decimal degrees
Default is station longitude
manager_dict: A dictionary holding the database manager
information. It will be used to open a connection to the archive
database.
server_url: URL of the server
Default is the AWEKAS site
Parameters customized for this class:
post_interval: The interval in seconds between posts. AWEKAS requests
that uploads happen no more often than 5 minutes, so this should be
set to no less than 300. Default is 300
"""
super(AWEKASThread, self).__init__(queue,
protocol_name='AWEKAS',
manager_dict=manager_dict,
post_interval=post_interval,
max_backlog=max_backlog,
stale=stale,
log_success=log_success,
log_failure=log_failure,
timeout=timeout,
max_tries=max_tries,
retry_wait=retry_wait,
retry_login=retry_login,
skip_upload=skip_upload)
self.username = username
self.password = password
self.latitude = float(latitude)
self.longitude = float(longitude)
self.language = language
self.server_url = server_url
def get_record(self, record, dbmanager):
"""Ensure that rainRate is in the record."""
# Get the record from my superclass
r = super(AWEKASThread, self).get_record(record, dbmanager)
# If rain rate is already available, return the record
if 'rainRate' in r:
return r
# Otherwise, augment with rainRate, which AWEKAS expects. If the
# archive does not have rainRate, an exception will be raised.
# Be prepare to catch it.
try:
rr = dbmanager.getSql('select rainRate from %s where dateTime=?' %
dbmanager.table_name, (r['dateTime'],))
except weedb.OperationalError:
pass
else:
# There should be a record in the database with timestamp r['dateTime'], but check
# just in case:
if rr:
r['rainRate'] = rr[0]
return r
def process_record(self, record, dbmanager):
r = self.get_record(record, dbmanager)
url = self.get_url(r)
if self.skip_upload:
raise AbortedPost("Skip post")
req = urllib2.Request(url)
req.add_header("User-Agent", "weewx/%s" % weewx.__version__)
self.post_with_retries(req)
def check_response(self, response):
for line in response:
if line.startswith("Benutzer/Passwort Fehler"):
raise BadLogin(line)
elif not line.startswith('OK'):
raise FailedPost("server returned '%s'" % line)
def get_url(self, in_record):
# Convert to units required by awekas
record = weewx.units.to_METRIC(in_record)
if 'dayRain' in record and record['dayRain'] is not None:
record['dayRain'] *= 10
if 'rainRate' in record and record['rainRate'] is not None:
record['rainRate'] *= 10
# assemble an array of values in the proper order
values = [self.username]
m = hashlib.md5()
m.update(self.password)
values.append(m.hexdigest())
time_tt = time.gmtime(record['dateTime'])
values.append(time.strftime("%d.%m.%Y", time_tt))
values.append(time.strftime("%H:%M", time_tt))
values.append(self._format(record, 'outTemp')) # C
values.append(self._format(record, 'outHumidity')) # %
values.append(self._format(record, 'barometer')) # mbar
values.append(self._format(record, 'dayRain')) # mm
values.append(self._format(record, 'windSpeed')) # km/h
values.append(self._format(record, 'windDir'))
values.append('') # weather condition
values.append('') # warning text
values.append('') # snow high
values.append(self.language)
values.append('') # tendency
values.append(self._format(record, 'windGust')) # km/h
values.append(self._format(record, 'radiation')) # W/m^2
values.append(self._format(record, 'UV')) # uv index
values.append('') # brightness in lux
values.append('') # sunshine hours
values.append('') # soil temperature
values.append(self._format(record, 'rainRate')) # mm/h
values.append('weewx_%s' % weewx.__version__)
values.append(str(self.longitude))
values.append(str(self.latitude))
valstr = ';'.join(values)
url = self.server_url + '?val=' + valstr
# show the url in the logs for debug, but mask any credentials
if weewx.debug >= 2:
syslog.syslog(syslog.LOG_DEBUG, 'restx: AWEKAS: url: %s' %
re.sub(m.hexdigest(), "XXX", url))
return url
def _format(self, record, label):
if label in record and record[label] is not None:
if label in self._FORMATS:
return self._FORMATS[label] % record[label]
return str(record[label])
return ''
###############################################################################
def get_site_dict(config_dict, service, *args):
"""Obtain the site options, with defaults from the StdRESTful section.
If the service is not enabled, or if one or more required parameters is
not specified, then return None."""
try:
site_dict = accumulateLeaves(config_dict['StdRESTful'][service],
max_level=1)
except KeyError:
syslog.syslog(syslog.LOG_INFO, "restx: %s: "
"No config info. Skipped." % service)
return None
# If site_dict has the key 'enable' and it is False, then
# the service is not enabled.
try:
if not to_bool(site_dict['enable']):
syslog.syslog(syslog.LOG_INFO, "restx: %s: "
"Posting not enabled." % service)
return None
except KeyError:
pass
# At this point, either the key 'enable' does not exist, or
# it is set to True. Check to see whether all the needed
# options exist, and none of them have been set to 'replace_me':
try:
for option in args:
if site_dict[option] == 'replace_me':
raise KeyError(option)
except KeyError as e:
syslog.syslog(syslog.LOG_DEBUG, "restx: %s: "
"Data will not be posted: Missing option %s" %
(service, e))
return None
# If the site dictionary does not have a log_success or log_failure, get
# them from the root dictionary
site_dict.setdefault('log_success', to_bool(config_dict.get('log_success', True)))
site_dict.setdefault('log_failure', to_bool(config_dict.get('log_failure', True)))
# Get rid of the no longer needed key 'enable':
site_dict.pop('enable', None)
return site_dict
# For backward compatibility pre 3.6.0
check_enable = get_site_dict
| 41.696703
| 171
| 0.565017
|
62abc6e722ab3048b766b8a62e31c6a3ff721910
| 30,481
|
py
|
Python
|
datalad/interface/base.py
|
andycon/datalad
|
47614e0b75cefc0ba59b2efe579c5ce540ebdcd9
|
[
"MIT"
] | null | null | null |
datalad/interface/base.py
|
andycon/datalad
|
47614e0b75cefc0ba59b2efe579c5ce540ebdcd9
|
[
"MIT"
] | 21
|
2015-03-12T11:48:18.000Z
|
2020-08-31T09:28:05.000Z
|
datalad/interface/base.py
|
bpoldrack/datalad
|
6d871f8e7ce033f5d79a1aee63ab4e2761028ff6
|
[
"MIT"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""High-level interface generation
"""
__docformat__ = 'restructuredtext'
import logging
lgr = logging.getLogger('datalad.interface.base')
import os
import sys
import re
import textwrap
from importlib import import_module
import inspect
import string
from collections import (
defaultdict,
OrderedDict,
)
from ..ui import ui
from ..dochelpers import exc_str
from datalad.interface.common_opts import eval_params
from datalad.interface.common_opts import eval_defaults
from datalad.support.constraints import (
EnsureKeyChoice,
EnsureChoice,
)
from datalad.distribution.dataset import Dataset
from datalad.distribution.dataset import resolve_path
from datalad.plugin import _get_plugins
from datalad.plugin import _load_plugin
default_logchannels = {
'': 'debug',
'ok': 'debug',
'notneeded': 'debug',
'impossible': 'warning',
'error': 'error',
}
def get_api_name(intfspec):
"""Given an interface specification return an API name for it"""
if len(intfspec) > 3:
name = intfspec[3]
else:
name = intfspec[0].split('.')[-1]
return name
def get_cmdline_command_name(intfspec):
"""Given an interface specification return a cmdline command name"""
if len(intfspec) > 2:
name = intfspec[2]
else:
name = intfspec[0].split('.')[-1].replace('_', '-')
return name
def get_interface_groups(include_plugins=False):
"""Return a list of command groups.
Parameters
----------
include_plugins : bool, optional
Whether to include a group named 'plugins' that has a list of
discovered plugin commands.
Returns
-------
A list of tuples with the form (GROUP_NAME, GROUP_DESCRIPTION, COMMANDS).
"""
from .. import interface as _interfaces
grps = []
# auto detect all available interfaces and generate a function-based
# API from them
for _item in _interfaces.__dict__:
if not _item.startswith('_group_'):
continue
grp_name = _item[7:]
grp = getattr(_interfaces, _item)
grps.append((grp_name,) + grp)
# TODO(yoh): see if we could retain "generator" for plugins
# ATM we need to make it explicit so we could check the command(s) below
# It could at least follow the same destiny as extensions so we would
# just do more iterative "load ups"
if include_plugins:
grps.append(('plugins', 'Plugins', list(_get_plugins())))
return grps
def get_cmd_summaries(descriptions, groups, width=79):
"""Return summaries for the commands in `groups`.
Parameters
----------
descriptions : dict
A map of group names to summaries.
groups : list of tuples
A list of groups and commands in the form described by
`get_interface_groups`.
width : int, optional
The maximum width of each line in the summary text.
Returns
-------
A list with a formatted entry for each command. The first command of each
group is preceded by an entry describing the group.
"""
cmd_summary = []
for grp in sorted(groups, key=lambda x: x[1]):
grp_descr = grp[1]
grp_cmds = descriptions[grp[0]]
cmd_summary.append('\n*%s*\n' % (grp_descr,))
for cd in grp_cmds:
cmd_summary.append(' %s\n%s'
% ((cd[0],
textwrap.fill(
cd[1].rstrip(' .'),
width - 5,
initial_indent=' ' * 6,
subsequent_indent=' ' * 6))))
return cmd_summary
def load_interface(spec):
"""Load and return the class for `spec`.
Parameters
----------
spec : tuple
For a standard interface, the first item is the datalad source module
and the second object name for the interface. For a plugin, the second
item should be a dictionary that maps 'file' to the path the of module.
Returns
-------
The interface class or, if importing the module fails, None.
"""
if isinstance(spec[1], dict):
intf = _load_plugin(spec[1]['file'], fail=False)
else:
lgr.log(5, "Importing module %s " % spec[0])
try:
mod = import_module(spec[0], package='datalad')
except Exception as e:
lgr.error("Internal error, cannot import interface '%s': %s",
spec[0], exc_str(e))
intf = None
else:
intf = getattr(mod, spec[1])
return intf
def get_cmd_doc(interface):
"""Return the documentation for the command defined by `interface`.
Parameters
----------
interface : subclass of Interface
"""
intf_doc = '' if interface.__doc__ is None else interface.__doc__.strip()
if hasattr(interface, '_docs_'):
# expand docs
intf_doc = intf_doc.format(**interface._docs_)
return intf_doc
def get_cmd_ex(interface):
"""Return the examples for the command defined by 'interface'.
Parameters
----------
interface : subclass of Interface
"""
intf_ex = "\n\n*Examples*\n\n"
for example in interface._examples_:
intf_ex += build_example(example, api='cmdline')
return intf_ex
def dedent_docstring(text):
"""Remove uniform indentation from a multiline docstring"""
# Problem is that first line might often have no offset, so might
# need to be ignored from dedent call
if text is None:
return None
if not text.startswith(' '):
lines = text.split('\n')
if len(lines) == 1:
# single line, no indentation, nothing to do
return text
text2 = '\n'.join(lines[1:])
return lines[0] + "\n" + textwrap.dedent(text2)
else:
return textwrap.dedent(text)
def alter_interface_docs_for_api(docs):
"""Apply modifications to interface docstrings for Python API use."""
# central place to alter the impression of docstrings,
# like removing cmdline specific sections
if not docs:
return docs
docs = dedent_docstring(docs)
# clean cmdline sections
docs = re.sub(
r'\|\| CMDLINE \>\>.*?\<\< CMDLINE \|\|',
'',
docs,
flags=re.MULTILINE | re.DOTALL)
# clean cmdline in-line bits
docs = re.sub(
r'\[CMD:\s[^\[\]]*\sCMD\]',
'',
docs,
flags=re.MULTILINE | re.DOTALL)
docs = re.sub(
r'\[PY:\s([^\[\]]*)\sPY\]',
lambda match: match.group(1),
docs,
flags=re.MULTILINE)
docs = re.sub(
r'\|\| PYTHON \>\>(.*?)\<\< PYTHON \|\|',
lambda match: match.group(1),
docs,
flags=re.MULTILINE | re.DOTALL)
if 'DATALAD_SPHINX_RUN' not in os.environ:
# remove :role:`...` RST markup for cmdline docs
docs = re.sub(
r':\S+:`[^`]*`[\\]*',
lambda match: ':'.join(match.group(0).split(':')[2:]).strip('`\\'),
docs,
flags=re.MULTILINE | re.DOTALL)
# make the handbook doc references more accessible
# the URL is a redirect configured at readthedocs
docs = re.sub(
r'(handbook:[0-9]-[0-9]*)',
'\\1 (http://handbook.datalad.org/symbols)',
docs)
docs = re.sub(
r'\|\| REFLOW \>\>\n(.*?)\<\< REFLOW \|\|',
lambda match: textwrap.fill(match.group(1)),
docs,
flags=re.MULTILINE | re.DOTALL)
return docs
def alter_interface_docs_for_cmdline(docs):
"""Apply modifications to interface docstrings for cmdline doc use."""
# central place to alter the impression of docstrings,
# like removing Python API specific sections, and argument markup
if not docs:
return docs
docs = dedent_docstring(docs)
# clean cmdline sections
docs = re.sub(
r'\|\| PYTHON \>\>.*?\<\< PYTHON \|\|',
'',
docs,
flags=re.MULTILINE | re.DOTALL)
# clean cmdline in-line bits
docs = re.sub(
r'\[PY:\s[^\[\]]*\sPY\]',
'',
docs,
flags=re.MULTILINE | re.DOTALL)
docs = re.sub(
r'\[CMD:\s([^\[\]]*)\sCMD\]',
lambda match: match.group(1),
docs,
flags=re.MULTILINE)
docs = re.sub(
r'\|\| CMDLINE \>\>(.*?)\<\< CMDLINE \|\|',
lambda match: match.group(1),
docs,
flags=re.MULTILINE | re.DOTALL)
# remove :role:`...` RST markup for cmdline docs
docs = re.sub(
r':\S+:`[^`]*`[\\]*',
lambda match: ':'.join(match.group(0).split(':')[2:]).strip('`\\'),
docs,
flags=re.MULTILINE | re.DOTALL)
# make the handbook doc references more accessible
# the URL is a redirect configured at readthedocs
docs = re.sub(
r'(handbook:[0-9]-[0-9]*)',
'\\1 (http://handbook.datalad.org/symbols)',
docs)
# remove None constraint. In general, `None` on the cmdline means don't
# give option at all, but specifying `None` explicitly is practically
# impossible
docs = re.sub(
r',\sor\svalue\smust\sbe\s`None`',
'',
docs,
flags=re.MULTILINE | re.DOTALL)
# capitalize variables and remove backticks to uniformize with
# argparse output
docs = re.sub(
r'`\S*`',
lambda match: match.group(0).strip('`').upper(),
docs)
# clean up sphinx API refs
docs = re.sub(
r'\~datalad\.api\.\S*',
lambda match: "`{0}`".format(match.group(0)[13:]),
docs)
# Remove RST paragraph markup
docs = re.sub(
r'^.. \S+::',
lambda match: match.group(0)[3:-2].upper(),
docs,
flags=re.MULTILINE)
docs = re.sub(
r'\|\| REFLOW \>\>\n(.*?)\<\< REFLOW \|\|',
lambda match: textwrap.fill(match.group(1)),
docs,
flags=re.MULTILINE | re.DOTALL)
return docs
def is_api_arg(arg):
"""Return True if argument is our API argument or self or used for internal
purposes
"""
return arg != 'self' and not arg.startswith('_')
def update_docstring_with_parameters(func, params, prefix=None, suffix=None,
add_args=None):
"""Generate a useful docstring from a parameter spec
Amends any existing docstring of a callable with a textual
description of its parameters. The Parameter spec needs to match
the number and names of the callables arguments.
"""
from datalad.utils import getargspec
# get the signature
ndefaults = 0
args, varargs, varkw, defaults = getargspec(func)
if add_args:
add_argnames = sorted(add_args.keys())
args.extend(add_argnames)
defaults = defaults + tuple(add_args[k] for k in add_argnames)
if defaults is not None:
ndefaults = len(defaults)
# start documentation with what the callable brings with it
doc = prefix if prefix else u''
if len(args) > 1:
if len(doc):
doc += '\n'
doc += "Parameters\n----------\n"
for i, arg in enumerate(args):
if not is_api_arg(arg):
continue
# we need a parameter spec for each argument
if not arg in params:
raise ValueError("function has argument '%s' not described as a parameter" % arg)
param = params[arg]
# validate the default -- to make sure that the parameter description is
# somewhat OK
defaults_idx = ndefaults - len(args) + i
if defaults_idx >= 0:
if param.constraints is not None:
param.constraints(defaults[defaults_idx])
orig_docs = param._doc
param._doc = alter_interface_docs_for_api(param._doc)
doc += param.get_autodoc(
arg,
default=defaults[defaults_idx] if defaults_idx >= 0 else None,
has_default=defaults_idx >= 0)
param._doc = orig_docs
doc += '\n'
doc += suffix if suffix else u""
# assign the amended docs
func.__doc__ = doc
return func
def build_example(example, api='python'):
"""Build a code example.
Take a dict from a classes _example_ specification (list of dicts) and
build a string with an api or cmd example (for use in cmd help or
docstring).
Parameters
----------
api : {'python', 'cmdline'}
If 'python', build Python example for docstring. If 'cmdline', build
cmd example.
Returns
-------
ex : str
Concatenated examples for the given class.
"""
if api == 'python' :
code_field='code_py'
indicator='>'
elif api == 'cmdline':
code_field='code_cmd'
indicator='%'
else:
raise ValueError("unknown API selection: {}".format(api))
if code_field not in example:
# only show an example if it exist for the API
return ''
description = textwrap.fill(example.get('text'))
# this indent the code snippet to get it properly rendered as code
# we are not using textwrap.fill(), because it would not acknowledge
# any meaningful structure/formatting of code snippets. Instead, we
# maintain line content as is.
code = dedent_docstring(example.get(code_field))
needs_indicator = not code.startswith(indicator)
code = textwrap.indent(code, ' ' * (5 if needs_indicator else 3)).lstrip()
ex = """{}::\n\n {}{}\n\n""".format(
description,
# disable automatic prefixing, if the example already has one
# this enables providing more complex examples without having
# to infer its inner structure
'{} '.format(indicator)
if needs_indicator
# maintain spacing to avoid undesired relative indentation
else '',
code)
return ex
def update_docstring_with_examples(cls_doc, ex):
"""Update a commands docstring with examples.
Take _examples_ of a command, build the Python examples, and append
them to the docstring.
cls_doc: docstring
ex: list
list of dicts with examples
"""
from textwrap import indent
if len(cls_doc):
cls_doc += "\n"
cls_doc += " Examples\n --------\n"
# loop though provided examples
for example in ex:
cls_doc += indent(build_example(example, api='python'), ' '*4)
return cls_doc
def build_doc(cls, **kwargs):
"""Decorator to build docstrings for datalad commands
It's intended to decorate the class, the __call__-method of which is the
actual command. It expects that __call__-method to be decorated by
eval_results.
Note that values for any `eval_params` keys in `cls._params_` are
ignored. This means one class may extend another's `_params_`
without worrying about filtering out `eval_params`.
Parameters
----------
cls: Interface
class defining a datalad command
"""
# Note, that this is a class decorator, which is executed only once when the
# class is imported. It builds the docstring for the class' __call__ method
# and returns the original class.
#
# This is because a decorator for the actual function would not be able to
# behave like this. To build the docstring we need to access the attribute
# _params of the class. From within a function decorator we cannot do this
# during import time, since the class is being built in this very moment and
# is not yet available in the module. And if we do it from within the part
# of a function decorator, that is executed when the function is called, we
# would need to actually call the command once in order to build this
# docstring.
lgr.debug("Building doc for {}".format(cls))
cls_doc = cls.__doc__
if hasattr(cls, '_docs_'):
# expand docs
cls_doc = cls_doc.format(**cls._docs_)
# get examples
ex = getattr(cls, '_examples_', [])
if ex:
cls_doc = update_docstring_with_examples(cls_doc, ex)
call_doc = None
# suffix for update_docstring_with_parameters:
if cls.__call__.__doc__:
call_doc = cls.__call__.__doc__
# build standard doc and insert eval_doc
spec = getattr(cls, '_params_', dict())
# update class attributes that may override defaults
if hasattr(cls, '_no_eval_results'):
add_args = None
else:
add_args = {k: getattr(cls, k, v) for k, v in eval_defaults.items()}
# ATTN: An important consequence of this update() call is that it
# fulfills the docstring's promise of overriding any existing
# values for eval_params keys in _params_.
#
# get docs for eval_results parameters:
spec.update(eval_params)
update_docstring_with_parameters(
cls.__call__, spec,
prefix=alter_interface_docs_for_api(cls_doc),
suffix=alter_interface_docs_for_api(call_doc),
add_args=add_args
)
# return original
return cls
NA_STRING = 'N/A' # we might want to make it configurable via config
class nagen(object):
"""A helper to provide a desired missing value if no value is known
Usecases
- could be used as a generator for `defaultdict`
- since it returns itself upon getitem, should work even for complex
nested dictionaries/lists .format templates
"""
def __init__(self, missing=NA_STRING):
self.missing = missing
def __repr__(self):
cls = self.__class__.__name__
args = str(self.missing) if self.missing != NA_STRING else ''
return '%s(%s)' % (cls, args)
def __str__(self):
return self.missing
def __getitem__(self, *args):
return self
def __getattr__(self, item):
return self
def nadict(*items):
"""A generator of default dictionary with the default nagen"""
dd = defaultdict(nagen)
dd.update(*items)
return dd
class DefaultOutputFormatter(string.Formatter):
"""A custom formatter for default output rendering using .format
"""
# TODO: make missing configurable?
def __init__(self, missing=nagen()):
"""
Parameters
----------
missing: string, optional
What to output for the missing values
"""
super(DefaultOutputFormatter, self).__init__()
self.missing = missing
def _d(self, msg, *args):
# print(" HERE %s" % (msg % args))
pass
def get_value(self, key, args, kwds):
assert not args
self._d("get_value: %r %r %r", key, args, kwds)
return kwds.get(key, self.missing)
# def get_field(self, field_name, args, kwds):
# assert not args
# self._d("get_field: %r args=%r kwds=%r" % (field_name, args, kwds))
# try:
# out = string.Formatter.get_field(self, field_name, args, kwds)
# except Exception as exc:
# # TODO needs more than just a value
# return "!ERR %s" % exc
class DefaultOutputRenderer(object):
"""A default renderer for .format'ed output line
"""
def __init__(self, format):
self.format = format
# We still need custom output formatter since at the "first level"
# within .format template all items there is no `nadict`
self.formatter = DefaultOutputFormatter()
@classmethod
def _dict_to_nadict(cls, v):
"""Traverse datastructure and replace any regular dict with nadict"""
if isinstance(v, list):
return [cls._dict_to_nadict(x) for x in v]
elif isinstance(v, dict):
return nadict((k, cls._dict_to_nadict(x)) for k, x in v.items())
else:
return v
def __call__(self, x, **kwargs):
dd = nadict(
(k, nadict({k_.replace(':', '#'): self._dict_to_nadict(v_)
for k_, v_ in v.items()})
if isinstance(v, dict) else v)
for k, v in x.items()
)
msg = self.formatter.format(self.format, **dd)
return ui.message(msg)
class Interface(object):
"""Base class for interface implementations"""
# exit code to return if user-interrupted
# if None, would just reraise the Exception, so if in --dbg
# mode would fall into the debugger
_interrupted_exit_code = 1
_OLDSTYLE_COMMANDS = (
'AddArchiveContent', 'CrawlInit', 'Crawl', 'CreateSiblingGithub',
'CreateTestDataset', 'Export', 'Ls', 'SSHRun', 'Test')
@classmethod
def setup_parser(cls, parser):
# XXX needs safety check for name collisions
# XXX allow for parser kwargs customization
parser_kwargs = {}
from datalad.utils import getargspec
# get the signature
ndefaults = 0
args, varargs, varkw, defaults = getargspec(cls.__call__)
if not defaults is None:
ndefaults = len(defaults)
for i, arg in enumerate(args):
if not is_api_arg(arg):
continue
param = cls._params_[arg]
defaults_idx = ndefaults - len(args) + i
cmd_args = param.cmd_args
if cmd_args == tuple():
# explicitly provided an empty sequence of argument names
# this shall not appear in the parser
continue
elif cmd_args is None:
cmd_args = []
if not len(cmd_args):
if defaults_idx >= 0:
# dealing with a kwarg
template = '--%s'
else:
# positional arg
template = '%s'
# use parameter name as default argument name
parser_args = (template % arg.replace('_', '-'),)
else:
parser_args = [c.replace('_', '-') for c in cmd_args]
parser_kwargs = param.cmd_kwargs
if defaults_idx >= 0:
parser_kwargs['default'] = defaults[defaults_idx]
help = alter_interface_docs_for_cmdline(param._doc)
if help and help.rstrip()[-1] != '.':
help = help.rstrip() + '.'
if param.constraints is not None:
parser_kwargs['type'] = param.constraints
# include value constraint description and default
# into the help string
cdoc = alter_interface_docs_for_cmdline(
param.constraints.long_description())
if cdoc[0] == '(' and cdoc[-1] == ')':
cdoc = cdoc[1:-1]
help += ' Constraints: %s' % cdoc
if 'metavar' not in parser_kwargs and \
isinstance(param.constraints, EnsureChoice):
parser_kwargs['metavar'] = \
'{%s}' % '|'.join(
# don't use short_description(), because
# it also needs to give valid output for
# Python syntax (quotes...), but here we
# can simplify to shell syntax where everything
# is a string
p for p in param.constraints._allowed
# in the cmdline None pretty much means
# don't give the options, so listing it
# doesn't make sense. Moreover, any non-string
# value cannot be given and very likely only
# serves a special purpose in the Python API
# or implementation details
if isinstance(p, str))
if defaults_idx >= 0:
# if it is a flag, in commandline it makes little sense to show
# showing the Default: (likely boolean).
# See https://github.com/datalad/datalad/issues/3203
if not parser_kwargs.get('action', '').startswith('store_'):
# [Default: None] also makes little sense for cmdline
if defaults[defaults_idx] is not None:
help += " [Default: %r]" % (defaults[defaults_idx],)
# create the parameter, using the constraint instance for type
# conversion
parser.add_argument(*parser_args, help=help,
**parser_kwargs)
@classmethod
def call_from_parser(cls, args):
# XXX needs safety check for name collisions
from datalad.utils import getargspec
argspec = getargspec(cls.__call__)
if argspec[2] is None:
# no **kwargs in the call receiver, pull argnames from signature
argnames = getargspec(cls.__call__)[0]
else:
# common options
# XXX define or better get from elsewhere
common_opts = ('change_path', 'common_debug', 'common_idebug', 'func',
'help', 'log_level', 'logger', 'pbs_runner',
'result_renderer', 'subparser')
argnames = [name for name in dir(args)
if not (name.startswith('_') or name in common_opts)]
kwargs = {k: getattr(args, k)
for k in argnames
# some arguments might be Python-only and do not appear in the
# parser Namespace
if hasattr(args, k) and is_api_arg(k)}
# we are coming from the entry point, this is the toplevel command,
# let it run like generator so we can act on partial results quicker
# TODO remove following condition test when transition is complete and
# run indented code unconditionally
if cls.__name__ not in Interface._OLDSTYLE_COMMANDS:
# set all common args explicitly to override class defaults
# that are tailored towards the the Python API
kwargs['return_type'] = 'generator'
kwargs['result_xfm'] = None
# allow commands to override the default, unless something other than
# default is requested
kwargs['result_renderer'] = \
args.common_output_format if args.common_output_format != 'default' \
else getattr(cls, 'result_renderer', args.common_output_format)
if '{' in args.common_output_format:
# stupid hack, could and should become more powerful
kwargs['result_renderer'] = DefaultOutputRenderer(args.common_output_format)
if args.common_on_failure:
kwargs['on_failure'] = args.common_on_failure
# compose filter function from to be invented cmdline options
res_filter = cls._get_result_filter(args)
if res_filter is not None:
# Don't add result_filter if it's None because then
# eval_results can't distinguish between --report-{status,type}
# not specified via the CLI and None passed via the Python API.
kwargs['result_filter'] = res_filter
try:
ret = cls.__call__(**kwargs)
if inspect.isgenerator(ret):
ret = list(ret)
return ret
except KeyboardInterrupt as exc:
ui.error("\nInterrupted by user while doing magic: %s" % exc_str(exc))
if cls._interrupted_exit_code is not None:
sys.exit(cls._interrupted_exit_code)
else:
raise
@classmethod
def _get_result_filter(cls, args):
from datalad import cfg
result_filter = None
if args.common_report_status or 'datalad.runtime.report-status' in cfg:
report_status = args.common_report_status or \
cfg.obtain('datalad.runtime.report-status')
if report_status == "all":
pass # no filter
elif report_status == 'success':
result_filter = EnsureKeyChoice('status', ('ok', 'notneeded'))
elif report_status == 'failure':
result_filter = EnsureKeyChoice('status',
('impossible', 'error'))
else:
result_filter = EnsureKeyChoice('status', (report_status,))
if args.common_report_type:
tfilt = EnsureKeyChoice('type', tuple(args.common_report_type))
result_filter = result_filter & tfilt if result_filter else tfilt
return result_filter
@classmethod
def get_refds_path(cls, dataset):
"""Return a resolved reference dataset path from a `dataset` argument"""
# theoretically a dataset could come in as a relative path -> resolve
if dataset is None:
return dataset
refds_path = dataset.path if isinstance(dataset, Dataset) \
else Dataset(dataset).path
if refds_path:
refds_path = str(resolve_path(refds_path))
return refds_path
def get_allargs_as_kwargs(call, args, kwargs):
"""Generate a kwargs dict from a call signature and *args, **kwargs
Basically resolving the argnames for all positional arguments, and
resolvin the defaults for all kwargs that are not given in a kwargs
dict
"""
from datalad.utils import getargspec
argspec = getargspec(call)
defaults = argspec.defaults
nargs = len(argspec.args)
assert (nargs >= len(defaults))
# map any args to their name
argmap = list(zip(argspec.args[:len(args)], args))
kwargs_ = OrderedDict(argmap)
# map defaults of kwargs to their names (update below)
for k, v in zip(argspec.args[-len(defaults):], defaults):
if k not in kwargs_:
kwargs_[k] = v
# update with provided kwarg args
kwargs_.update(kwargs)
# XXX we cannot assert the following, because our own highlevel
# API commands support more kwargs than what is discoverable
# from their signature...
#assert (nargs == len(kwargs_))
return kwargs_
| 35.650292
| 97
| 0.590007
|
6fe7e78c4aa5cbccb77d8ae2905f4cd7bb1205ae
| 876
|
py
|
Python
|
commandHandler.py
|
Dadaskis/DadaskisBot
|
a09f3651fd5c33423ed21ab17e18419a2e4b9e42
|
[
"MIT"
] | 9
|
2019-08-21T04:52:26.000Z
|
2020-07-20T08:09:38.000Z
|
commandHandler.py
|
Dadaskis/DadaskisBot
|
a09f3651fd5c33423ed21ab17e18419a2e4b9e42
|
[
"MIT"
] | null | null | null |
commandHandler.py
|
Dadaskis/DadaskisBot
|
a09f3651fd5c33423ed21ab17e18419a2e4b9e42
|
[
"MIT"
] | null | null | null |
from chainCommandHandler import ChainCommandHandler
from lvlUpHandler import LvlUpHandler
from questsHandler import QuestsHandler
from rpCommands import RPCommands
class ChatUserInfo:
def __init__(self):
self.chatName = ""
self.chatID = 0
self.userName = ""
self.userID = 0
self.platform = ""
self.reply = False
self.botReply = False
self.userIDReply = 0
self.userNameReply = ""
self.telegramUpdate = None
self.telegramBot = None
class CommandHandler:
def __init__(self):
self.Handlers = [ChainCommandHandler(), LvlUpHandler(), QuestsHandler(self), RPCommands()]
def handle(self, stringMessage, info):
for handler in self.Handlers:
try:
skip, string = handler.handle(stringMessage, info)
if skip == True:
if len(string) == 0:
string = " "
return string
except Exception as ex:
continue#print(ex)
return " "
| 25.028571
| 92
| 0.712329
|
115b114b089fce5ef9e590856a223217d3c5e807
| 2,082
|
py
|
Python
|
diofant/matrices/__init__.py
|
rajkk1/diofant
|
6b361334569e4ec2e8c7d30dc324387a4ad417c2
|
[
"BSD-3-Clause"
] | 57
|
2016-09-13T23:16:26.000Z
|
2022-03-29T06:45:51.000Z
|
diofant/matrices/__init__.py
|
rajkk1/diofant
|
6b361334569e4ec2e8c7d30dc324387a4ad417c2
|
[
"BSD-3-Clause"
] | 402
|
2016-05-11T11:11:47.000Z
|
2022-03-31T14:27:02.000Z
|
diofant/matrices/__init__.py
|
rajkk1/diofant
|
6b361334569e4ec2e8c7d30dc324387a4ad417c2
|
[
"BSD-3-Clause"
] | 20
|
2016-05-11T08:17:37.000Z
|
2021-09-10T09:15:51.000Z
|
"""A module that handles matrices.
Includes functions for fast creating matrices like zero, one/eye, random
matrix, etc.
"""
from .dense import (GramSchmidt, MutableDenseMatrix, MutableMatrix, casoratian,
diag, eye, hessian, jordan_cell, list2numpy, matrix2numpy,
matrix_multiply_elementwise, ones, randMatrix, rot_axis1,
rot_axis2, rot_axis3, symarray, vandermonde, wronskian,
zeros)
from .expressions import (Adjoint, BlockDiagMatrix, BlockMatrix, Determinant,
DiagonalMatrix, DiagonalOf, FunctionMatrix,
HadamardProduct, Identity, Inverse, MatAdd, MatMul,
MatPow, MatrixExpr, MatrixSlice, MatrixSymbol, Trace,
Transpose, ZeroMatrix, block_collapse, blockcut, det,
hadamard_product, trace)
from .immutable import (ImmutableDenseMatrix, ImmutableMatrix,
ImmutableSparseMatrix)
from .matrices import MatrixBase, NonSquareMatrixError, ShapeError
from .sparse import MutableSparseMatrix, SparseMatrix
Matrix = MutableMatrix
__all__ = ('GramSchmidt', 'MutableDenseMatrix', 'MutableMatrix', 'casoratian',
'diag', 'eye', 'hessian', 'jordan_cell', 'list2numpy',
'matrix2numpy', 'matrix_multiply_elementwise', 'ones', 'randMatrix',
'rot_axis1', 'rot_axis2', 'rot_axis3', 'symarray', 'vandermonde',
'wronskian', 'zeros', 'Adjoint', 'BlockDiagMatrix', 'BlockMatrix',
'Determinant', 'DiagonalMatrix', 'DiagonalOf', 'FunctionMatrix',
'HadamardProduct', 'Identity', 'Inverse', 'MatAdd', 'MatMul',
'MatPow', 'MatrixExpr', 'MatrixSlice', 'MatrixSymbol', 'Trace',
'Transpose', 'ZeroMatrix', 'block_collapse', 'blockcut', 'det',
'hadamard_product', 'trace', 'ImmutableDenseMatrix',
'ImmutableMatrix', 'ImmutableSparseMatrix', 'MatrixBase',
'NonSquareMatrixError', 'ShapeError', 'MutableSparseMatrix',
'SparseMatrix', 'Matrix')
| 53.384615
| 79
| 0.641691
|
a1602e8e7f2214abb01a66c35cae005238b0333e
| 5,706
|
py
|
Python
|
cirq-core/cirq/protocols/json_test_data/spec.py
|
kevinsung/Cirq
|
5ee7ff62428d7242ff041d00305116fc78d9f784
|
[
"Apache-2.0"
] | 1
|
2021-12-18T00:12:12.000Z
|
2021-12-18T00:12:12.000Z
|
cirq-core/cirq/protocols/json_test_data/spec.py
|
kevinsung/Cirq
|
5ee7ff62428d7242ff041d00305116fc78d9f784
|
[
"Apache-2.0"
] | 1
|
2021-03-11T21:05:42.000Z
|
2021-03-17T19:44:34.000Z
|
cirq-core/cirq/protocols/json_test_data/spec.py
|
kevinsung/Cirq
|
5ee7ff62428d7242ff041d00305116fc78d9f784
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import cirq
from cirq.json_resolver_cache import _class_resolver_dictionary
from cirq.testing.json import ModuleJsonTestSpec
TestSpec = ModuleJsonTestSpec(
name="cirq",
packages=[cirq, cirq.work],
test_data_path=pathlib.Path(__file__).parent,
resolver_cache=_class_resolver_dictionary(),
not_yet_serializable=[
'Alignment',
'AxisAngleDecomposition',
'CircuitDag',
'CircuitDiagramInfo',
'CircuitDiagramInfoArgs',
'CircuitSampleJob',
'CliffordSimulatorStepResult',
'CliffordTrialResult',
'DensityMatrixSimulator',
'DensityMatrixSimulatorState',
'DensityMatrixStepResult',
'DensityMatrixTrialResult',
'ExpressionMap',
'InsertStrategy',
'IonDevice',
'KakDecomposition',
'LinearCombinationOfGates',
'LinearCombinationOfOperations',
'Linspace',
'ListSweep',
'DiagonalGate',
'NeutralAtomDevice',
'PauliInteractionGate',
'PauliStringPhasor',
'PauliSum',
'PauliSumCollector',
'PauliSumExponential',
'PauliTransform',
'PeriodicValue',
'PointOptimizationSummary',
'Points',
'Product',
'QasmArgs',
'QasmOutput',
'QuantumState',
'QubitOrder',
'QubitPermutationGate',
'QuilFormatter',
'QuilOutput',
'SimulationTrialResult',
'SparseSimulatorStep',
'StateVectorMixin',
'TextDiagramDrawer',
'ThreeQubitDiagonalGate',
'Timestamp',
'TwoQubitDiagonalGate',
'UnitSweep',
'StateVectorSimulatorState',
'StateVectorTrialResult',
'ZerosSampler',
'Zip',
],
should_not_be_serialized=[
# Heatmaps
'Heatmap',
'TwoQubitInteractionHeatmap',
# Intermediate states with work buffers and unknown external prng guts.
'ActOnArgs',
'ActOnArgsContainer',
'ActOnCliffordTableauArgs',
'ActOnDensityMatrixArgs',
'ActOnStabilizerCHFormArgs',
'ActOnStateVectorArgs',
'ApplyChannelArgs',
'ApplyMixtureArgs',
'ApplyUnitaryArgs',
'OperationTarget',
# Circuit optimizers are function-like. Only attributes
# are ignore_failures, tolerance, and other feature flags
'AlignLeft',
'AlignRight',
'ConvertToCzAndSingleGates',
'ConvertToIonGates',
'ConvertToNeutralAtomGates',
'DropEmptyMoments',
'DropNegligible',
'EjectPhasedPaulis',
'EjectZ',
'ExpandComposite',
'MEASUREMENT_KEY_SEPARATOR',
'MergeInteractions',
'MergeInteractionsToSqrtIswap',
'MergeSingleQubitGates',
'PointOptimizer',
'SynchronizeTerminalMeasurements',
# global objects
'CONTROL_TAG',
'PAULI_BASIS',
'PAULI_STATES',
# abstract, but not inspect.isabstract():
'Device',
'InterchangeableQubitsGate',
'Pauli',
'SingleQubitGate',
'ABCMetaImplementAnyOneOf',
'GenericMetaImplementAnyOneOf',
'SimulatesAmplitudes',
'SimulatesExpectationValues',
'SimulatesFinalState',
'NamedTopology',
# protocols:
'HasJSONNamespace',
'SupportsActOn',
'SupportsActOnQubits',
'SupportsApplyChannel',
'SupportsApplyMixture',
'SupportsApproximateEquality',
'SupportsCircuitDiagramInfo',
'SupportsCommutes',
'SupportsConsistentApplyUnitary',
'SupportsControlKey',
'SupportsDecompose',
'SupportsDecomposeWithQubits',
'SupportsEqualUpToGlobalPhase',
'SupportsExplicitHasUnitary',
'SupportsExplicitNumQubits',
'SupportsExplicitQidShape',
'SupportsJSON',
'SupportsKraus',
'SupportsMeasurementKey',
'SupportsMixture',
'SupportsParameterization',
'SupportsPauliExpansion',
'SupportsPhase',
'SupportsQasm',
'SupportsQasmWithArgs',
'SupportsQasmWithArgsAndQubits',
'SupportsTraceDistanceBound',
'SupportsUnitary',
# mypy types:
'CIRCUIT_LIKE',
'DURATION_LIKE',
'JsonResolver',
'LabelEntity',
'NOISE_MODEL_LIKE',
'OP_TREE',
'PAULI_GATE_LIKE',
'PAULI_STRING_LIKE',
'ParamResolverOrSimilarType',
'PauliSumLike',
'QUANTUM_STATE_LIKE',
'QubitOrderOrList',
'RANDOM_STATE_OR_SEED_LIKE',
'STATE_VECTOR_LIKE',
'Sweepable',
'TParamKey',
'TParamVal',
'ParamDictType',
# utility:
'CliffordSimulator',
'Simulator',
'StabilizerSampler',
'Unique',
'DEFAULT_RESOLVERS',
],
deprecated={},
tested_elsewhere=[
# SerializableByKey does not follow common serialization rules.
# It is tested separately in test_context_serialization.
'SerializableByKey',
],
)
| 30.190476
| 79
| 0.622503
|
c4b5b2936e878a9744c193e6212a0e8184b67908
| 3,228
|
py
|
Python
|
qtopic/pyqtopics.py
|
Quora-Users/pyQTopic
|
7285eda5db218a629eaea58382536a2d6a786ff2
|
[
"MIT"
] | 9
|
2015-10-27T05:47:08.000Z
|
2022-01-04T22:58:04.000Z
|
qtopic/pyqtopics.py
|
Quora-Users/pyQTopic
|
7285eda5db218a629eaea58382536a2d6a786ff2
|
[
"MIT"
] | 2
|
2015-11-02T01:31:19.000Z
|
2015-11-05T01:46:21.000Z
|
qtopic/pyqtopics.py
|
Quora-Users/pyQTopic
|
7285eda5db218a629eaea58382536a2d6a786ff2
|
[
"MIT"
] | 2
|
2015-10-30T16:11:07.000Z
|
2020-06-14T19:29:12.000Z
|
import urllib2
from bs4 import BeautifulSoup
import feedparser
####################################################################
# API
####################################################################
class QTopic:
@staticmethod
def get_follower_count(topic):
url = "https://www.quora.com/" + topic
html_doc = urllib2.urlopen(url)
soup = BeautifulSoup(html_doc.read())
raw_data = str(soup.find_all('span', class_="count"))
soup = BeautifulSoup(raw_data)
followers = soup.find_all('span')[0].get_text()
return {
'topic': topic,
'followers': followers,
}
@staticmethod
def get_some_followers(topic):
url = "https://www.quora.com/" + topic + "/followers"
html_doc = urllib2.urlopen(url)
soup = BeautifulSoup(html_doc.read())
raw_data = str(soup.find_all('a', class_='user'))
soup = BeautifulSoup(raw_data)
name = soup.get_text()
return {
'name': name,
'topic': topic,
}
@staticmethod
def get_related_topics(topic):
url = "https://www.quora.com/" + topic
html_doc = urllib2.urlopen(url)
soup = BeautifulSoup(html_doc.read())
raw_data = str(soup.find_all(
'div', class_='RelatedTopicFaqsSection RelatedTopicsSection'))
soup = BeautifulSoup(raw_data)
raw_data = str(soup.find_all('span', class_='TopicName'))
soup = BeautifulSoup(raw_data)
related_topics = soup.get_text()
return {
'topic': topic,
'related_topics': related_topics,
}
@staticmethod
def get_best_questions(topic):
url = "https://www.quora.com/" + topic + "/best_questions/rss"
f = feedparser.parse(url)
feed_len = len(f.entries)
links = []
title = []
published = []
for i in range(feed_len):
links.append(f['entries'][i]['links'][0]['href'])
title.append(f['entries'][i]['title'])
published.append(f['entries'][i]['published'])
return {
'links': links,
'title': title,
'published': published
}
@staticmethod
def get_top_stories(topic):
url = "https://www.quora.com/" + topic + "/rss"
f = feedparser.parse(url)
feed_len = len(f.entries)
links = []
title = []
published = []
for i in range(feed_len):
links.append(f['entries'][i]['links'][0]['href'])
title.append(f['entries'][i]['title'])
published.append(f['entries'][i]['published'])
return {
'links': links,
'title': title,
'published': published
}
@staticmethod
def get_open_questions(topic):
url = "https://www.quora.com/" + topic + "/questions"
html_doc = urllib2.urlopen(url)
soup = BeautifulSoup(html_doc.read())
raw_data = str(soup.find_all('div', class_='QuestionText'))
soup = BeautifulSoup(raw_data)
title = soup.get_text()
return {
'question_titles': title,
'topic': topic,
}
| 31.647059
| 74
| 0.525713
|
1591b5056facef54ddbe9145e2c54fe7f646aca1
| 86,298
|
py
|
Python
|
sphinx/writers/latex.py
|
mseng10/sphinx
|
d635d94eebbca0ebb1a5402aa07ed58c0464c6d3
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/writers/latex.py
|
mseng10/sphinx
|
d635d94eebbca0ebb1a5402aa07ed58c0464c6d3
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/writers/latex.py
|
mseng10/sphinx
|
d635d94eebbca0ebb1a5402aa07ed58c0464c6d3
|
[
"BSD-2-Clause"
] | null | null | null |
"""
sphinx.writers.latex
~~~~~~~~~~~~~~~~~~~~
Custom docutils writer for LaTeX.
Much of this code is adapted from Dave Kuhlman's "docpy" writer from his
docutils sandbox.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import warnings
from collections import defaultdict
from os import path
from typing import Any, Dict, Iterable, Iterator, List, Set, Tuple, Union, cast
from docutils import nodes, writers
from docutils.nodes import Element, Node, Text
from sphinx import addnodes, highlighting
from sphinx.deprecation import (RemovedInSphinx40Warning, RemovedInSphinx50Warning,
deprecated_alias)
from sphinx.domains import IndexEntry
from sphinx.domains.std import StandardDomain
from sphinx.errors import SphinxError
from sphinx.locale import _, __, admonitionlabels
from sphinx.util import logging, split_into, texescape
from sphinx.util.docutils import SphinxTranslator
from sphinx.util.nodes import clean_astext, get_prev_node
from sphinx.util.template import LaTeXRenderer
from sphinx.util.texescape import tex_replace_map
try:
from docutils.utils.roman import toRoman
except ImportError:
# In Debain/Ubuntu, roman package is provided as roman, not as docutils.utils.roman
from roman import toRoman # type: ignore
if False:
# For type annotation
from sphinx.builders.latex import LaTeXBuilder
from sphinx.builders.latex.theming import Theme
logger = logging.getLogger(__name__)
MAX_CITATION_LABEL_LENGTH = 8
LATEXSECTIONNAMES = ["part", "chapter", "section", "subsection",
"subsubsection", "paragraph", "subparagraph"]
ENUMERATE_LIST_STYLE = defaultdict(lambda: r'\arabic',
{
'arabic': r'\arabic',
'loweralpha': r'\alph',
'upperalpha': r'\Alph',
'lowerroman': r'\roman',
'upperroman': r'\Roman',
})
EXTRA_RE = re.compile(r'^(.*\S)\s+\(([^()]*)\)\s*$')
class collected_footnote(nodes.footnote):
"""Footnotes that are collected are assigned this class."""
class UnsupportedError(SphinxError):
category = 'Markup is unsupported in LaTeX'
class LaTeXWriter(writers.Writer):
supported = ('sphinxlatex',)
settings_spec = ('LaTeX writer options', '', (
('Document name', ['--docname'], {'default': ''}),
('Document class', ['--docclass'], {'default': 'manual'}),
('Author', ['--author'], {'default': ''}),
))
settings_defaults = {} # type: Dict
output = None
def __init__(self, builder: "LaTeXBuilder") -> None:
super().__init__()
self.builder = builder
self.theme = None # type: Theme
def translate(self) -> None:
try:
visitor = self.builder.create_translator(self.document, self.builder, self.theme)
except TypeError:
warnings.warn('LaTeXTranslator now takes 3rd argument; "theme".',
RemovedInSphinx50Warning, stacklevel=2)
visitor = self.builder.create_translator(self.document, self.builder)
self.document.walkabout(visitor)
self.output = cast(LaTeXTranslator, visitor).astext()
# Helper classes
class Table:
"""A table data"""
def __init__(self, node: Element) -> None:
self.header = [] # type: List[str]
self.body = [] # type: List[str]
self.align = node.get('align')
self.colcount = 0
self.colspec = None # type: str
self.colwidths = [] # type: List[int]
self.has_problematic = False
self.has_oldproblematic = False
self.has_verbatim = False
self.caption = None # type: List[str]
self.stubs = [] # type: List[int]
# current position
self.col = 0
self.row = 0
# for internal use
self.classes = node.get('classes', []) # type: List[str]
self.cells = defaultdict(int) # type: Dict[Tuple[int, int], int]
# it maps table location to cell_id
# (cell = rectangular area)
self.cell_id = 0 # last assigned cell_id
def is_longtable(self) -> bool:
"""True if and only if table uses longtable environment."""
return self.row > 30 or 'longtable' in self.classes
def get_table_type(self) -> str:
"""Returns the LaTeX environment name for the table.
The class currently supports:
* longtable
* tabular
* tabulary
"""
if self.is_longtable():
return 'longtable'
elif self.has_verbatim:
return 'tabular'
elif self.colspec:
return 'tabulary'
elif self.has_problematic or (self.colwidths and 'colwidths-given' in self.classes):
return 'tabular'
else:
return 'tabulary'
def get_colspec(self) -> str:
"""Returns a column spec of table.
This is what LaTeX calls the 'preamble argument' of the used table environment.
.. note:: the ``\\X`` and ``T`` column type specifiers are defined in ``sphinx.sty``.
"""
if self.colspec:
return self.colspec
elif self.colwidths and 'colwidths-given' in self.classes:
total = sum(self.colwidths)
colspecs = ['\\X{%d}{%d}' % (width, total) for width in self.colwidths]
return '{|%s|}\n' % '|'.join(colspecs)
elif self.has_problematic:
return '{|*{%d}{\\X{1}{%d}|}}\n' % (self.colcount, self.colcount)
elif self.get_table_type() == 'tabulary':
# sphinx.sty sets T to be J by default.
return '{|' + ('T|' * self.colcount) + '}\n'
elif self.has_oldproblematic:
return '{|*{%d}{\\X{1}{%d}|}}\n' % (self.colcount, self.colcount)
else:
return '{|' + ('l|' * self.colcount) + '}\n'
def add_cell(self, height: int, width: int) -> None:
"""Adds a new cell to a table.
It will be located at current position: (``self.row``, ``self.col``).
"""
self.cell_id += 1
for col in range(width):
for row in range(height):
assert self.cells[(self.row + row, self.col + col)] == 0
self.cells[(self.row + row, self.col + col)] = self.cell_id
def cell(self, row: int = None, col: int = None) -> "TableCell":
"""Returns a cell object (i.e. rectangular area) containing given position.
If no option arguments: ``row`` or ``col`` are given, the current position;
``self.row`` and ``self.col`` are used to get a cell object by default.
"""
try:
if row is None:
row = self.row
if col is None:
col = self.col
return TableCell(self, row, col)
except IndexError:
return None
class TableCell:
"""A cell data of tables."""
def __init__(self, table: Table, row: int, col: int) -> None:
if table.cells[(row, col)] == 0:
raise IndexError
self.table = table
self.cell_id = table.cells[(row, col)]
self.row = row
self.col = col
# adjust position for multirow/multicol cell
while table.cells[(self.row - 1, self.col)] == self.cell_id:
self.row -= 1
while table.cells[(self.row, self.col - 1)] == self.cell_id:
self.col -= 1
@property
def width(self) -> int:
"""Returns the cell width."""
width = 0
while self.table.cells[(self.row, self.col + width)] == self.cell_id:
width += 1
return width
@property
def height(self) -> int:
"""Returns the cell height."""
height = 0
while self.table.cells[(self.row + height, self.col)] == self.cell_id:
height += 1
return height
def escape_abbr(text: str) -> str:
"""Adjust spacing after abbreviations."""
return re.sub(r'\.(?=\s|$)', r'.\@', text)
def rstdim_to_latexdim(width_str: str, scale: int = 100) -> str:
"""Convert `width_str` with rst length to LaTeX length."""
match = re.match(r'^(\d*\.?\d*)\s*(\S*)$', width_str)
if not match:
raise ValueError
res = width_str
amount, unit = match.groups()[:2]
if scale == 100:
float(amount) # validate amount is float
if unit in ('', "px"):
res = "%s\\sphinxpxdimen" % amount
elif unit == 'pt':
res = '%sbp' % amount # convert to 'bp'
elif unit == "%":
res = "%.3f\\linewidth" % (float(amount) / 100.0)
else:
amount_float = float(amount) * scale / 100.0
if unit in ('', "px"):
res = "%.5f\\sphinxpxdimen" % amount_float
elif unit == 'pt':
res = '%.5fbp' % amount_float
elif unit == "%":
res = "%.5f\\linewidth" % (amount_float / 100.0)
else:
res = "%.5f%s" % (amount_float, unit)
return res
class LaTeXTranslator(SphinxTranslator):
builder = None # type: LaTeXBuilder
secnumdepth = 2 # legacy sphinxhowto.cls uses this, whereas article.cls
# default is originally 3. For book/report, 2 is already LaTeX default.
ignore_missing_images = False
# sphinx specific document classes
docclasses = ('howto', 'manual')
def __init__(self, document: nodes.document, builder: "LaTeXBuilder",
theme: "Theme" = None) -> None:
super().__init__(document, builder)
self.body = [] # type: List[str]
self.theme = theme
if theme is None:
warnings.warn('LaTeXTranslator now takes 3rd argument; "theme".',
RemovedInSphinx50Warning, stacklevel=2)
# flags
self.in_title = 0
self.in_production_list = 0
self.in_footnote = 0
self.in_caption = 0
self.in_term = 0
self.needs_linetrimming = 0
self.in_minipage = 0
self.no_latex_floats = 0
self.first_document = 1
self.this_is_the_title = 1
self.literal_whitespace = 0
self.in_parsed_literal = 0
self.compact_list = 0
self.first_param = 0
sphinxpkgoptions = []
# sort out some elements
self.elements = self.builder.context.copy()
# initial section names
self.sectionnames = LATEXSECTIONNAMES[:]
if self.theme:
# new style: control sectioning via theme's setting
#
# .. note:: template variables(elements) are already assigned in builder
docclass = self.theme.docclass
if self.theme.toplevel_sectioning == 'section':
self.sectionnames.remove('chapter')
else:
# old style: sectioning control is hard-coded
# but some have other interface in config file
self.elements['wrapperclass'] = self.format_docclass(self.settings.docclass)
# we assume LaTeX class provides \chapter command except in case
# of non-Japanese 'howto' case
if document.get('docclass') == 'howto':
docclass = self.config.latex_docclass.get('howto', 'article')
if docclass[0] == 'j': # Japanese class...
pass
else:
self.sectionnames.remove('chapter')
else:
docclass = self.config.latex_docclass.get('manual', 'report')
self.elements['docclass'] = docclass
# determine top section level
self.top_sectionlevel = 1
if self.config.latex_toplevel_sectioning:
try:
self.top_sectionlevel = \
self.sectionnames.index(self.config.latex_toplevel_sectioning)
except ValueError:
logger.warning(__('unknown %r toplevel_sectioning for class %r') %
(self.config.latex_toplevel_sectioning, docclass))
if self.config.numfig:
self.numfig_secnum_depth = self.config.numfig_secnum_depth
if self.numfig_secnum_depth > 0: # default is 1
# numfig_secnum_depth as passed to sphinx.sty indices same names as in
# LATEXSECTIONNAMES but with -1 for part, 0 for chapter, 1 for section...
if len(self.sectionnames) < len(LATEXSECTIONNAMES) and \
self.top_sectionlevel > 0:
self.numfig_secnum_depth += self.top_sectionlevel
else:
self.numfig_secnum_depth += self.top_sectionlevel - 1
# this (minus one) will serve as minimum to LaTeX's secnumdepth
self.numfig_secnum_depth = min(self.numfig_secnum_depth,
len(LATEXSECTIONNAMES) - 1)
# if passed key value is < 1 LaTeX will act as if 0; see sphinx.sty
sphinxpkgoptions.append('numfigreset=%s' % self.numfig_secnum_depth)
else:
sphinxpkgoptions.append('nonumfigreset')
if self.config.numfig and self.config.math_numfig:
sphinxpkgoptions.append('mathnumfig')
if (self.config.language not in {None, 'en', 'ja'} and
'fncychap' not in self.config.latex_elements):
# use Sonny style if any language specified (except English)
self.elements['fncychap'] = ('\\usepackage[Sonny]{fncychap}\n'
'\\ChNameVar{\\Large\\normalfont'
'\\sffamily}\n\\ChTitleVar{\\Large'
'\\normalfont\\sffamily}')
self.babel = self.builder.babel
if self.config.language and not self.babel.is_supported_language():
# emit warning if specified language is invalid
# (only emitting, nothing changed to processing)
logger.warning(__('no Babel option known for language %r'),
self.config.language)
minsecnumdepth = self.secnumdepth # 2 from legacy sphinx manual/howto
if self.document.get('tocdepth'):
# reduce tocdepth if `part` or `chapter` is used for top_sectionlevel
# tocdepth = -1: show only parts
# tocdepth = 0: show parts and chapters
# tocdepth = 1: show parts, chapters and sections
# tocdepth = 2: show parts, chapters, sections and subsections
# ...
tocdepth = self.document.get('tocdepth', 999) + self.top_sectionlevel - 2
if len(self.sectionnames) < len(LATEXSECTIONNAMES) and \
self.top_sectionlevel > 0:
tocdepth += 1 # because top_sectionlevel is shifted by -1
if tocdepth > len(LATEXSECTIONNAMES) - 2: # default is 5 <-> subparagraph
logger.warning(__('too large :maxdepth:, ignored.'))
tocdepth = len(LATEXSECTIONNAMES) - 2
self.elements['tocdepth'] = '\\setcounter{tocdepth}{%d}' % tocdepth
minsecnumdepth = max(minsecnumdepth, tocdepth)
if self.config.numfig and (self.config.numfig_secnum_depth > 0):
minsecnumdepth = max(minsecnumdepth, self.numfig_secnum_depth - 1)
if minsecnumdepth > self.secnumdepth:
self.elements['secnumdepth'] = '\\setcounter{secnumdepth}{%d}' %\
minsecnumdepth
contentsname = document.get('contentsname')
if contentsname:
self.elements['contentsname'] = self.babel_renewcommand('\\contentsname',
contentsname)
if self.elements['maxlistdepth']:
sphinxpkgoptions.append('maxlistdepth=%s' % self.elements['maxlistdepth'])
if sphinxpkgoptions:
self.elements['sphinxpkgoptions'] = '[,%s]' % ','.join(sphinxpkgoptions)
if self.elements['sphinxsetup']:
self.elements['sphinxsetup'] = ('\\sphinxsetup{%s}' %
self.elements['sphinxsetup'])
if self.elements['extraclassoptions']:
self.elements['classoptions'] += ',' + \
self.elements['extraclassoptions']
self.highlighter = highlighting.PygmentsBridge('latex', self.config.pygments_style,
latex_engine=self.config.latex_engine)
self.context = [] # type: List[Any]
self.descstack = [] # type: List[str]
self.tables = [] # type: List[Table]
self.next_table_colspec = None # type: str
self.bodystack = [] # type: List[List[str]]
self.footnote_restricted = None # type: nodes.Element
self.pending_footnotes = [] # type: List[nodes.footnote_reference]
self.curfilestack = [] # type: List[str]
self.handled_abbrs = set() # type: Set[str]
def pushbody(self, newbody: List[str]) -> None:
self.bodystack.append(self.body)
self.body = newbody
def popbody(self) -> List[str]:
body = self.body
self.body = self.bodystack.pop()
return body
def format_docclass(self, docclass: str) -> str:
""" prepends prefix to sphinx document classes
"""
warnings.warn('LaTeXWriter.format_docclass() is deprecated.',
RemovedInSphinx50Warning, stacklevel=2)
if docclass in self.docclasses:
docclass = 'sphinx' + docclass
return docclass
def astext(self) -> str:
self.elements.update({
'body': ''.join(self.body),
'indices': self.generate_indices()
})
return self.render('latex.tex_t', self.elements)
def hypertarget(self, id: str, withdoc: bool = True, anchor: bool = True) -> str:
if withdoc:
id = self.curfilestack[-1] + ':' + id
return ('\\phantomsection' if anchor else '') + \
'\\label{%s}' % self.idescape(id)
def hypertarget_to(self, node: Element, anchor: bool = False) -> str:
labels = ''.join(self.hypertarget(node_id, anchor=False) for node_id in node['ids'])
if anchor:
return r'\phantomsection' + labels
else:
return labels
def hyperlink(self, id: str) -> str:
return '{\\hyperref[%s]{' % self.idescape(id)
def hyperpageref(self, id: str) -> str:
return '\\autopageref*{%s}' % self.idescape(id)
def escape(self, s: str) -> str:
return texescape.escape(s, self.config.latex_engine)
def idescape(self, id: str) -> str:
return '\\detokenize{%s}' % str(id).translate(tex_replace_map).\
encode('ascii', 'backslashreplace').decode('ascii').\
replace('\\', '_')
def babel_renewcommand(self, command: str, definition: str) -> str:
if self.elements['multilingual']:
prefix = '\\addto\\captions%s{' % self.babel.get_language()
suffix = '}'
else: # babel is disabled (mainly for Japanese environment)
prefix = ''
suffix = ''
return ('%s\\renewcommand{%s}{%s}%s\n' % (prefix, command, definition, suffix))
def generate_indices(self) -> str:
def generate(content: List[Tuple[str, List[IndexEntry]]], collapsed: bool) -> None:
ret.append('\\begin{sphinxtheindex}\n')
ret.append('\\let\\bigletter\\sphinxstyleindexlettergroup\n')
for i, (letter, entries) in enumerate(content):
if i > 0:
ret.append('\\indexspace\n')
ret.append('\\bigletter{%s}\n' % self.escape(letter))
for entry in entries:
if not entry[3]:
continue
ret.append('\\item\\relax\\sphinxstyleindexentry{%s}' %
self.encode(entry[0]))
if entry[4]:
# add "extra" info
ret.append('\\sphinxstyleindexextra{%s}' % self.encode(entry[4]))
ret.append('\\sphinxstyleindexpageref{%s:%s}\n' %
(entry[2], self.idescape(entry[3])))
ret.append('\\end{sphinxtheindex}\n')
ret = []
# latex_domain_indices can be False/True or a list of index names
indices_config = self.config.latex_domain_indices
if indices_config:
for domain in self.builder.env.domains.values():
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
content, collapsed = indexcls(domain).generate(
self.builder.docnames)
if not content:
continue
ret.append('\\renewcommand{\\indexname}{%s}\n' %
indexcls.localname)
generate(content, collapsed)
return ''.join(ret)
def render(self, template_name: str, variables: Dict) -> str:
renderer = LaTeXRenderer(latex_engine=self.config.latex_engine)
for template_dir in self.config.templates_path:
template = path.join(self.builder.confdir, template_dir,
template_name)
if path.exists(template):
return renderer.render(template, variables)
return renderer.render(template_name, variables)
@property
def table(self) -> Table:
"""Get current table."""
if self.tables:
return self.tables[-1]
else:
return None
def visit_document(self, node: Element) -> None:
self.curfilestack.append(node.get('docname', ''))
if self.first_document == 1:
# the first document is all the regular content ...
self.first_document = 0
elif self.first_document == 0:
# ... and all others are the appendices
self.body.append('\n\\appendix\n')
self.first_document = -1
if 'docname' in node:
self.body.append(self.hypertarget(':doc'))
# "- 1" because the level is increased before the title is visited
self.sectionlevel = self.top_sectionlevel - 1
def depart_document(self, node: Element) -> None:
pass
def visit_start_of_file(self, node: Element) -> None:
self.curfilestack.append(node['docname'])
def depart_start_of_file(self, node: Element) -> None:
self.curfilestack.pop()
def visit_section(self, node: Element) -> None:
if not self.this_is_the_title:
self.sectionlevel += 1
self.body.append('\n\n')
def depart_section(self, node: Element) -> None:
self.sectionlevel = max(self.sectionlevel - 1,
self.top_sectionlevel - 1)
def visit_problematic(self, node: Element) -> None:
self.body.append(r'{\color{red}\bfseries{}')
def depart_problematic(self, node: Element) -> None:
self.body.append('}')
def visit_topic(self, node: Element) -> None:
self.in_minipage = 1
self.body.append('\n\\begin{sphinxShadowBox}\n')
def depart_topic(self, node: Element) -> None:
self.in_minipage = 0
self.body.append('\\end{sphinxShadowBox}\n')
visit_sidebar = visit_topic
depart_sidebar = depart_topic
def visit_glossary(self, node: Element) -> None:
pass
def depart_glossary(self, node: Element) -> None:
pass
def visit_productionlist(self, node: Element) -> None:
self.body.append('\n\n\\begin{productionlist}\n')
self.in_production_list = 1
def depart_productionlist(self, node: Element) -> None:
self.body.append('\\end{productionlist}\n\n')
self.in_production_list = 0
def visit_production(self, node: Element) -> None:
if node['tokenname']:
tn = node['tokenname']
self.body.append(self.hypertarget('grammar-token-' + tn))
self.body.append('\\production{%s}{' % self.encode(tn))
else:
self.body.append('\\productioncont{')
def depart_production(self, node: Element) -> None:
self.body.append('}\n')
def visit_transition(self, node: Element) -> None:
self.body.append(self.elements['transition'])
def depart_transition(self, node: Element) -> None:
pass
def visit_title(self, node: Element) -> None:
parent = node.parent
if isinstance(parent, addnodes.seealso):
# the environment already handles this
raise nodes.SkipNode
elif isinstance(parent, nodes.section):
if self.this_is_the_title:
if len(node.children) != 1 and not isinstance(node.children[0],
nodes.Text):
logger.warning(__('document title is not a single Text node'),
location=node)
if not self.elements['title']:
# text needs to be escaped since it is inserted into
# the output literally
self.elements['title'] = self.escape(node.astext())
self.this_is_the_title = 0
raise nodes.SkipNode
else:
short = ''
if node.traverse(nodes.image):
short = ('[%s]' % self.escape(' '.join(clean_astext(node).split())))
try:
self.body.append(r'\%s%s{' % (self.sectionnames[self.sectionlevel], short))
except IndexError:
# just use "subparagraph", it's not numbered anyway
self.body.append(r'\%s%s{' % (self.sectionnames[-1], short))
self.context.append('}\n' + self.hypertarget_to(node.parent))
elif isinstance(parent, nodes.topic):
self.body.append(r'\sphinxstyletopictitle{')
self.context.append('}\n')
elif isinstance(parent, nodes.sidebar):
self.body.append(r'\sphinxstylesidebartitle{')
self.context.append('}\n')
elif isinstance(parent, nodes.Admonition):
self.body.append('{')
self.context.append('}\n')
elif isinstance(parent, nodes.table):
# Redirect body output until title is finished.
self.pushbody([])
else:
logger.warning(__('encountered title node not in section, topic, table, '
'admonition or sidebar'),
location=node)
self.body.append('\\sphinxstyleothertitle{')
self.context.append('}\n')
self.in_title = 1
def depart_title(self, node: Element) -> None:
self.in_title = 0
if isinstance(node.parent, nodes.table):
self.table.caption = self.popbody()
else:
self.body.append(self.context.pop())
def visit_subtitle(self, node: Element) -> None:
if isinstance(node.parent, nodes.sidebar):
self.body.append('\\sphinxstylesidebarsubtitle{')
self.context.append('}\n')
else:
self.context.append('')
def depart_subtitle(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_desc(self, node: Element) -> None:
self.body.append('\n\n\\begin{fulllineitems}\n')
if self.table:
self.table.has_problematic = True
def depart_desc(self, node: Element) -> None:
self.body.append('\n\\end{fulllineitems}\n\n')
def _visit_signature_line(self, node: Element) -> None:
for child in node:
if isinstance(child, addnodes.desc_parameterlist):
self.body.append(r'\pysiglinewithargsret{')
break
else:
self.body.append(r'\pysigline{')
def _depart_signature_line(self, node: Element) -> None:
self.body.append('}')
def visit_desc_signature(self, node: Element) -> None:
if node.parent['objtype'] != 'describe' and node['ids']:
hyper = self.hypertarget(node['ids'][0])
else:
hyper = ''
self.body.append(hyper)
if not node.get('is_multiline'):
self._visit_signature_line(node)
else:
self.body.append('%\n\\pysigstartmultiline\n')
def depart_desc_signature(self, node: Element) -> None:
if not node.get('is_multiline'):
self._depart_signature_line(node)
else:
self.body.append('%\n\\pysigstopmultiline')
def visit_desc_signature_line(self, node: Element) -> None:
self._visit_signature_line(node)
def depart_desc_signature_line(self, node: Element) -> None:
self._depart_signature_line(node)
def visit_desc_addname(self, node: Element) -> None:
self.body.append(r'\sphinxcode{\sphinxupquote{')
self.literal_whitespace += 1
def depart_desc_addname(self, node: Element) -> None:
self.body.append('}}')
self.literal_whitespace -= 1
def visit_desc_type(self, node: Element) -> None:
pass
def depart_desc_type(self, node: Element) -> None:
pass
def visit_desc_returns(self, node: Element) -> None:
self.body.append(r'{ $\rightarrow$ ')
def depart_desc_returns(self, node: Element) -> None:
self.body.append(r'}')
def visit_desc_name(self, node: Element) -> None:
self.body.append(r'\sphinxbfcode{\sphinxupquote{')
self.literal_whitespace += 1
def depart_desc_name(self, node: Element) -> None:
self.body.append('}}')
self.literal_whitespace -= 1
def visit_desc_parameterlist(self, node: Element) -> None:
# close name, open parameterlist
self.body.append('}{')
self.first_param = 1
def depart_desc_parameterlist(self, node: Element) -> None:
# close parameterlist, open return annotation
self.body.append('}{')
def visit_desc_parameter(self, node: Element) -> None:
if not self.first_param:
self.body.append(', ')
else:
self.first_param = 0
if not node.hasattr('noemph'):
self.body.append(r'\emph{')
def depart_desc_parameter(self, node: Element) -> None:
if not node.hasattr('noemph'):
self.body.append('}')
def visit_desc_optional(self, node: Element) -> None:
self.body.append(r'\sphinxoptional{')
def depart_desc_optional(self, node: Element) -> None:
self.body.append('}')
def visit_desc_annotation(self, node: Element) -> None:
self.body.append(r'\sphinxbfcode{\sphinxupquote{')
def depart_desc_annotation(self, node: Element) -> None:
self.body.append('}}')
def visit_desc_content(self, node: Element) -> None:
if node.children and not isinstance(node.children[0], nodes.paragraph):
# avoid empty desc environment which causes a formatting bug
self.body.append('~')
def depart_desc_content(self, node: Element) -> None:
pass
def visit_seealso(self, node: Element) -> None:
self.body.append('\n\n\\sphinxstrong{%s:}\n\\nopagebreak\n\n'
% admonitionlabels['seealso'])
def depart_seealso(self, node: Element) -> None:
self.body.append("\n\n")
def visit_rubric(self, node: Element) -> None:
if len(node) == 1 and node.astext() in ('Footnotes', _('Footnotes')):
raise nodes.SkipNode
self.body.append('\\subsubsection*{')
self.context.append('}\n')
self.in_title = 1
def depart_rubric(self, node: Element) -> None:
self.in_title = 0
self.body.append(self.context.pop())
def visit_footnote(self, node: Element) -> None:
self.in_footnote += 1
label = cast(nodes.label, node[0])
if self.in_parsed_literal:
self.body.append('\\begin{footnote}[%s]' % label.astext())
else:
self.body.append('%%\n\\begin{footnote}[%s]' % label.astext())
self.body.append('\\sphinxAtStartFootnote\n')
def depart_footnote(self, node: Element) -> None:
if self.in_parsed_literal:
self.body.append('\\end{footnote}')
else:
self.body.append('%\n\\end{footnote}')
self.in_footnote -= 1
def visit_label(self, node: Element) -> None:
raise nodes.SkipNode
def visit_tabular_col_spec(self, node: Element) -> None:
self.next_table_colspec = node['spec']
raise nodes.SkipNode
def visit_table(self, node: Element) -> None:
if len(self.tables) == 1:
if self.table.get_table_type() == 'longtable':
raise UnsupportedError(
'%s:%s: longtable does not support nesting a table.' %
(self.curfilestack[-1], node.line or ''))
else:
# change type of parent table to tabular
# see https://groups.google.com/d/msg/sphinx-users/7m3NeOBixeo/9LKP2B4WBQAJ
self.table.has_problematic = True
elif len(self.tables) > 2:
raise UnsupportedError(
'%s:%s: deeply nested tables are not implemented.' %
(self.curfilestack[-1], node.line or ''))
self.tables.append(Table(node))
if self.next_table_colspec:
self.table.colspec = '{%s}\n' % self.next_table_colspec
if 'colwidths-given' in node.get('classes', []):
logger.info(__('both tabularcolumns and :widths: option are given. '
':widths: is ignored.'), location=node)
self.next_table_colspec = None
def depart_table(self, node: Element) -> None:
labels = self.hypertarget_to(node)
table_type = self.table.get_table_type()
table = self.render(table_type + '.tex_t',
dict(table=self.table, labels=labels))
self.body.append("\n\n")
self.body.append(table)
self.body.append("\n")
self.tables.pop()
def visit_colspec(self, node: Element) -> None:
self.table.colcount += 1
if 'colwidth' in node:
self.table.colwidths.append(node['colwidth'])
if 'stub' in node:
self.table.stubs.append(self.table.colcount - 1)
def depart_colspec(self, node: Element) -> None:
pass
def visit_tgroup(self, node: Element) -> None:
pass
def depart_tgroup(self, node: Element) -> None:
pass
def visit_thead(self, node: Element) -> None:
# Redirect head output until header is finished.
self.pushbody(self.table.header)
def depart_thead(self, node: Element) -> None:
self.popbody()
def visit_tbody(self, node: Element) -> None:
# Redirect body output until table is finished.
self.pushbody(self.table.body)
def depart_tbody(self, node: Element) -> None:
self.popbody()
def visit_row(self, node: Element) -> None:
self.table.col = 0
# fill columns if the row starts with the bottom of multirow cell
while True:
cell = self.table.cell(self.table.row, self.table.col)
if cell is None: # not a bottom of multirow cell
break
else: # a bottom of multirow cell
self.table.col += cell.width
if cell.col:
self.body.append('&')
if cell.width == 1:
# insert suitable strut for equalizing row heights in given multirow
self.body.append('\\sphinxtablestrut{%d}' % cell.cell_id)
else: # use \multicolumn for wide multirow cell
self.body.append('\\multicolumn{%d}{|l|}'
'{\\sphinxtablestrut{%d}}' %
(cell.width, cell.cell_id))
def depart_row(self, node: Element) -> None:
self.body.append('\\\\\n')
cells = [self.table.cell(self.table.row, i) for i in range(self.table.colcount)]
underlined = [cell.row + cell.height == self.table.row + 1 for cell in cells]
if all(underlined):
self.body.append('\\hline')
else:
i = 0
underlined.extend([False]) # sentinel
while i < len(underlined):
if underlined[i] is True:
j = underlined[i:].index(False)
self.body.append('\\cline{%d-%d}' % (i + 1, i + j))
i += j
i += 1
self.table.row += 1
def visit_entry(self, node: Element) -> None:
if self.table.col > 0:
self.body.append('&')
self.table.add_cell(node.get('morerows', 0) + 1, node.get('morecols', 0) + 1)
cell = self.table.cell()
context = ''
if cell.width > 1:
if self.config.latex_use_latex_multicolumn:
if self.table.col == 0:
self.body.append('\\multicolumn{%d}{|l|}{%%\n' % cell.width)
else:
self.body.append('\\multicolumn{%d}{l|}{%%\n' % cell.width)
context = '}%\n'
else:
self.body.append('\\sphinxstartmulticolumn{%d}%%\n' % cell.width)
context = '\\sphinxstopmulticolumn\n'
if cell.height > 1:
# \sphinxmultirow 2nd arg "cell_id" will serve as id for LaTeX macros as well
self.body.append('\\sphinxmultirow{%d}{%d}{%%\n' % (cell.height, cell.cell_id))
context = '}%\n' + context
if cell.width > 1 or cell.height > 1:
self.body.append('\\begin{varwidth}[t]{\\sphinxcolwidth{%d}{%d}}\n'
% (cell.width, self.table.colcount))
context = ('\\par\n\\vskip-\\baselineskip'
'\\vbox{\\hbox{\\strut}}\\end{varwidth}%\n') + context
self.needs_linetrimming = 1
if len(node.traverse(nodes.paragraph)) >= 2:
self.table.has_oldproblematic = True
if isinstance(node.parent.parent, nodes.thead) or (cell.col in self.table.stubs):
if len(node) == 1 and isinstance(node[0], nodes.paragraph) and node.astext() == '':
pass
else:
self.body.append('\\sphinxstyletheadfamily ')
if self.needs_linetrimming:
self.pushbody([])
self.context.append(context)
def depart_entry(self, node: Element) -> None:
if self.needs_linetrimming:
self.needs_linetrimming = 0
body = self.popbody()
# Remove empty lines from top of merged cell
while body and body[0] == "\n":
body.pop(0)
self.body.extend(body)
self.body.append(self.context.pop())
cell = self.table.cell()
self.table.col += cell.width
# fill columns if next ones are a bottom of wide-multirow cell
while True:
nextcell = self.table.cell()
if nextcell is None: # not a bottom of multirow cell
break
else: # a bottom part of multirow cell
self.table.col += nextcell.width
self.body.append('&')
if nextcell.width == 1:
# insert suitable strut for equalizing row heights in multirow
# they also serve to clear colour panels which would hide the text
self.body.append('\\sphinxtablestrut{%d}' % nextcell.cell_id)
else:
# use \multicolumn for wide multirow cell
self.body.append('\\multicolumn{%d}{l|}'
'{\\sphinxtablestrut{%d}}' %
(nextcell.width, nextcell.cell_id))
def visit_acks(self, node: Element) -> None:
# this is a list in the source, but should be rendered as a
# comma-separated list here
bullet_list = cast(nodes.bullet_list, node[0])
list_items = cast(Iterable[nodes.list_item], bullet_list)
self.body.append('\n\n')
self.body.append(', '.join(n.astext() for n in list_items) + '.')
self.body.append('\n\n')
raise nodes.SkipNode
def visit_bullet_list(self, node: Element) -> None:
if not self.compact_list:
self.body.append('\\begin{itemize}\n')
if self.table:
self.table.has_problematic = True
def depart_bullet_list(self, node: Element) -> None:
if not self.compact_list:
self.body.append('\\end{itemize}\n')
def visit_enumerated_list(self, node: Element) -> None:
def get_enumtype(node: Element) -> str:
enumtype = node.get('enumtype', 'arabic')
if 'alpha' in enumtype and 26 < node.get('start', 0) + len(node):
# fallback to arabic if alphabet counter overflows
enumtype = 'arabic'
return enumtype
def get_nested_level(node: Element) -> int:
if node is None:
return 0
elif isinstance(node, nodes.enumerated_list):
return get_nested_level(node.parent) + 1
else:
return get_nested_level(node.parent)
enum = "enum%s" % toRoman(get_nested_level(node)).lower()
enumnext = "enum%s" % toRoman(get_nested_level(node) + 1).lower()
style = ENUMERATE_LIST_STYLE.get(get_enumtype(node))
prefix = node.get('prefix', '')
suffix = node.get('suffix', '.')
self.body.append('\\begin{enumerate}\n')
self.body.append('\\sphinxsetlistlabels{%s}{%s}{%s}{%s}{%s}%%\n' %
(style, enum, enumnext, prefix, suffix))
if 'start' in node:
self.body.append('\\setcounter{%s}{%d}\n' % (enum, node['start'] - 1))
if self.table:
self.table.has_problematic = True
def depart_enumerated_list(self, node: Element) -> None:
self.body.append('\\end{enumerate}\n')
def visit_list_item(self, node: Element) -> None:
# Append "{}" in case the next character is "[", which would break
# LaTeX's list environment (no numbering and the "[" is not printed).
self.body.append(r'\item {} ')
def depart_list_item(self, node: Element) -> None:
self.body.append('\n')
def visit_definition_list(self, node: Element) -> None:
self.body.append('\\begin{description}\n')
if self.table:
self.table.has_problematic = True
def depart_definition_list(self, node: Element) -> None:
self.body.append('\\end{description}\n')
def visit_definition_list_item(self, node: Element) -> None:
pass
def depart_definition_list_item(self, node: Element) -> None:
pass
def visit_term(self, node: Element) -> None:
self.in_term += 1
ctx = ''
if node.get('ids'):
ctx = '\\phantomsection'
for node_id in node['ids']:
ctx += self.hypertarget(node_id, anchor=False)
ctx += '}] \\leavevmode'
self.body.append('\\item[{')
self.context.append(ctx)
def depart_term(self, node: Element) -> None:
self.body.append(self.context.pop())
self.in_term -= 1
def visit_classifier(self, node: Element) -> None:
self.body.append('{[}')
def depart_classifier(self, node: Element) -> None:
self.body.append('{]}')
def visit_definition(self, node: Element) -> None:
pass
def depart_definition(self, node: Element) -> None:
self.body.append('\n')
def visit_field_list(self, node: Element) -> None:
self.body.append('\\begin{quote}\\begin{description}\n')
if self.table:
self.table.has_problematic = True
def depart_field_list(self, node: Element) -> None:
self.body.append('\\end{description}\\end{quote}\n')
def visit_field(self, node: Element) -> None:
pass
def depart_field(self, node: Element) -> None:
pass
visit_field_name = visit_term
depart_field_name = depart_term
visit_field_body = visit_definition
depart_field_body = depart_definition
def visit_paragraph(self, node: Element) -> None:
index = node.parent.index(node)
if (index > 0 and isinstance(node.parent, nodes.compound) and
not isinstance(node.parent[index - 1], nodes.paragraph) and
not isinstance(node.parent[index - 1], nodes.compound)):
# insert blank line, if the paragraph follows a non-paragraph node in a compound
self.body.append('\\noindent\n')
elif index == 1 and isinstance(node.parent, (nodes.footnote, footnotetext)):
# don't insert blank line, if the paragraph is second child of a footnote
# (first one is label node)
pass
else:
self.body.append('\n')
def depart_paragraph(self, node: Element) -> None:
self.body.append('\n')
def visit_centered(self, node: Element) -> None:
self.body.append('\n\\begin{center}')
if self.table:
self.table.has_problematic = True
def depart_centered(self, node: Element) -> None:
self.body.append('\n\\end{center}')
def visit_hlist(self, node: Element) -> None:
# for now, we don't support a more compact list format
# don't add individual itemize environments, but one for all columns
self.compact_list += 1
self.body.append('\\begin{itemize}\\setlength{\\itemsep}{0pt}'
'\\setlength{\\parskip}{0pt}\n')
if self.table:
self.table.has_problematic = True
def depart_hlist(self, node: Element) -> None:
self.compact_list -= 1
self.body.append('\\end{itemize}\n')
def visit_hlistcol(self, node: Element) -> None:
pass
def depart_hlistcol(self, node: Element) -> None:
pass
def latex_image_length(self, width_str: str, scale: int = 100) -> str:
try:
return rstdim_to_latexdim(width_str, scale)
except ValueError:
logger.warning(__('dimension unit %s is invalid. Ignored.'), width_str)
return None
def is_inline(self, node: Element) -> bool:
"""Check whether a node represents an inline element."""
return isinstance(node.parent, nodes.TextElement)
def visit_image(self, node: Element) -> None:
attrs = node.attributes
pre = [] # type: List[str]
# in reverse order
post = [] # type: List[str]
include_graphics_options = []
has_hyperlink = isinstance(node.parent, nodes.reference)
if has_hyperlink:
is_inline = self.is_inline(node.parent)
else:
is_inline = self.is_inline(node)
if 'width' in attrs:
if 'scale' in attrs:
w = self.latex_image_length(attrs['width'], attrs['scale'])
else:
w = self.latex_image_length(attrs['width'])
if w:
include_graphics_options.append('width=%s' % w)
if 'height' in attrs:
if 'scale' in attrs:
h = self.latex_image_length(attrs['height'], attrs['scale'])
else:
h = self.latex_image_length(attrs['height'])
if h:
include_graphics_options.append('height=%s' % h)
if 'scale' in attrs:
if not include_graphics_options:
# if no "width" nor "height", \sphinxincludegraphics will fit
# to the available text width if oversized after rescaling.
include_graphics_options.append('scale=%s'
% (float(attrs['scale']) / 100.0))
if 'align' in attrs:
align_prepost = {
# By default latex aligns the top of an image.
(1, 'top'): ('', ''),
(1, 'middle'): ('\\raisebox{-0.5\\height}{', '}'),
(1, 'bottom'): ('\\raisebox{-\\height}{', '}'),
(0, 'center'): ('{\\hspace*{\\fill}', '\\hspace*{\\fill}}'),
(0, 'default'): ('{\\hspace*{\\fill}', '\\hspace*{\\fill}}'),
# These 2 don't exactly do the right thing. The image should
# be floated alongside the paragraph. See
# https://www.w3.org/TR/html4/struct/objects.html#adef-align-IMG
(0, 'left'): ('{', '\\hspace*{\\fill}}'),
(0, 'right'): ('{\\hspace*{\\fill}', '}'),
}
try:
pre.append(align_prepost[is_inline, attrs['align']][0])
post.append(align_prepost[is_inline, attrs['align']][1])
except KeyError:
pass
if self.in_parsed_literal:
pre.append('{\\sphinxunactivateextrasandspace ')
post.append('}')
if not is_inline and not has_hyperlink:
pre.append('\n\\noindent')
post.append('\n')
pre.reverse()
if node['uri'] in self.builder.images:
uri = self.builder.images[node['uri']]
else:
# missing image!
if self.ignore_missing_images:
return
uri = node['uri']
if uri.find('://') != -1:
# ignore remote images
return
self.body.extend(pre)
options = ''
if include_graphics_options:
options = '[%s]' % ','.join(include_graphics_options)
base, ext = path.splitext(uri)
if self.in_title and base:
# Lowercase tokens forcely because some fncychap themes capitalize
# the options of \sphinxincludegraphics unexpectly (ex. WIDTH=...).
self.body.append('\\lowercase{\\sphinxincludegraphics%s}{{%s}%s}' %
(options, base, ext))
else:
self.body.append('\\sphinxincludegraphics%s{{%s}%s}' %
(options, base, ext))
self.body.extend(post)
def depart_image(self, node: Element) -> None:
pass
def visit_figure(self, node: Element) -> None:
align = self.elements['figure_align']
if self.no_latex_floats:
align = "H"
if self.table:
# TODO: support align option
if 'width' in node:
length = self.latex_image_length(node['width'])
if length:
self.body.append('\\begin{sphinxfigure-in-table}[%s]\n'
'\\centering\n' % length)
else:
self.body.append('\\begin{sphinxfigure-in-table}\n\\centering\n')
if any(isinstance(child, nodes.caption) for child in node):
self.body.append('\\capstart')
self.context.append('\\end{sphinxfigure-in-table}\\relax\n')
elif node.get('align', '') in ('left', 'right'):
length = None
if 'width' in node:
length = self.latex_image_length(node['width'])
elif isinstance(node[0], nodes.image) and 'width' in node[0]:
length = self.latex_image_length(node[0]['width'])
self.body.append('\n\n') # Insert a blank line to prevent infinite loop
# https://github.com/sphinx-doc/sphinx/issues/7059
self.body.append('\\begin{wrapfigure}{%s}{%s}\n\\centering' %
('r' if node['align'] == 'right' else 'l', length or '0pt'))
self.context.append('\\end{wrapfigure}\n')
elif self.in_minipage:
self.body.append('\n\\begin{center}')
self.context.append('\\end{center}\n')
else:
self.body.append('\n\\begin{figure}[%s]\n\\centering\n' % align)
if any(isinstance(child, nodes.caption) for child in node):
self.body.append('\\capstart\n')
self.context.append('\\end{figure}\n')
def depart_figure(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_caption(self, node: Element) -> None:
self.in_caption += 1
if isinstance(node.parent, captioned_literal_block):
self.body.append('\\sphinxSetupCaptionForVerbatim{')
elif self.in_minipage and isinstance(node.parent, nodes.figure):
self.body.append('\\captionof{figure}{')
elif self.table and node.parent.tagname == 'figure':
self.body.append('\\sphinxfigcaption{')
else:
self.body.append('\\caption{')
def depart_caption(self, node: Element) -> None:
self.body.append('}')
if isinstance(node.parent, nodes.figure):
labels = self.hypertarget_to(node.parent)
self.body.append(labels)
self.in_caption -= 1
def visit_legend(self, node: Element) -> None:
self.body.append('\n\\begin{sphinxlegend}')
def depart_legend(self, node: Element) -> None:
self.body.append('\\end{sphinxlegend}\n')
def visit_admonition(self, node: Element) -> None:
self.body.append('\n\\begin{sphinxadmonition}{note}')
self.no_latex_floats += 1
def depart_admonition(self, node: Element) -> None:
self.body.append('\\end{sphinxadmonition}\n')
self.no_latex_floats -= 1
def _visit_named_admonition(self, node: Element) -> None:
label = admonitionlabels[node.tagname]
self.body.append('\n\\begin{sphinxadmonition}{%s}{%s:}' %
(node.tagname, label))
self.no_latex_floats += 1
def _depart_named_admonition(self, node: Element) -> None:
self.body.append('\\end{sphinxadmonition}\n')
self.no_latex_floats -= 1
visit_attention = _visit_named_admonition
depart_attention = _depart_named_admonition
visit_caution = _visit_named_admonition
depart_caution = _depart_named_admonition
visit_danger = _visit_named_admonition
depart_danger = _depart_named_admonition
visit_error = _visit_named_admonition
depart_error = _depart_named_admonition
visit_hint = _visit_named_admonition
depart_hint = _depart_named_admonition
visit_important = _visit_named_admonition
depart_important = _depart_named_admonition
visit_note = _visit_named_admonition
depart_note = _depart_named_admonition
visit_tip = _visit_named_admonition
depart_tip = _depart_named_admonition
visit_warning = _visit_named_admonition
depart_warning = _depart_named_admonition
def visit_versionmodified(self, node: Element) -> None:
pass
def depart_versionmodified(self, node: Element) -> None:
pass
def visit_target(self, node: Element) -> None:
def add_target(id: str) -> None:
# indexing uses standard LaTeX index markup, so the targets
# will be generated differently
if id.startswith('index-'):
return
# equations also need no extra blank line nor hypertarget
# TODO: fix this dependency on mathbase extension internals
if id.startswith('equation-'):
return
# insert blank line, if the target follows a paragraph node
index = node.parent.index(node)
if index > 0 and isinstance(node.parent[index - 1], nodes.paragraph):
self.body.append('\n')
# do not generate \phantomsection in \section{}
anchor = not self.in_title
self.body.append(self.hypertarget(id, anchor=anchor))
# skip if visitor for next node supports hyperlink
next_node = node # type: nodes.Node
while isinstance(next_node, nodes.target):
next_node = next_node.next_node(ascend=True)
domain = cast(StandardDomain, self.builder.env.get_domain('std'))
if isinstance(next_node, HYPERLINK_SUPPORT_NODES):
return
elif domain.get_enumerable_node_type(next_node) and domain.get_numfig_title(next_node):
return
if 'refuri' in node:
return
if 'anonymous' in node:
return
if node.get('refid'):
prev_node = get_prev_node(node)
if isinstance(prev_node, nodes.reference) and node['refid'] == prev_node['refid']:
# a target for a hyperlink reference having alias
pass
else:
add_target(node['refid'])
for id in node['ids']:
add_target(id)
def depart_target(self, node: Element) -> None:
pass
def visit_attribution(self, node: Element) -> None:
self.body.append('\n\\begin{flushright}\n')
self.body.append('---')
def depart_attribution(self, node: Element) -> None:
self.body.append('\n\\end{flushright}\n')
def visit_index(self, node: Element) -> None:
def escape(value: str) -> str:
value = self.encode(value)
value = value.replace(r'\{', r'\sphinxleftcurlybrace{}')
value = value.replace(r'\}', r'\sphinxrightcurlybrace{}')
value = value.replace('"', '""')
value = value.replace('@', '"@')
value = value.replace('!', '"!')
value = value.replace('|', r'\textbar{}')
return value
def style(string: str) -> str:
match = EXTRA_RE.match(string)
if match:
return match.expand(r'\\spxentry{\1}\\spxextra{\2}')
else:
return '\\spxentry{%s}' % string
if not node.get('inline', True):
self.body.append('\n')
entries = node['entries']
for type, string, tid, ismain, key_ in entries:
m = ''
if ismain:
m = '|spxpagem'
try:
if type == 'single':
try:
p1, p2 = [escape(x) for x in split_into(2, 'single', string)]
P1, P2 = style(p1), style(p2)
self.body.append(r'\index{%s@%s!%s@%s%s}' % (p1, P1, p2, P2, m))
except ValueError:
p = escape(split_into(1, 'single', string)[0])
P = style(p)
self.body.append(r'\index{%s@%s%s}' % (p, P, m))
elif type == 'pair':
p1, p2 = [escape(x) for x in split_into(2, 'pair', string)]
P1, P2 = style(p1), style(p2)
self.body.append(r'\index{%s@%s!%s@%s%s}\index{%s@%s!%s@%s%s}' %
(p1, P1, p2, P2, m, p2, P2, p1, P1, m))
elif type == 'triple':
p1, p2, p3 = [escape(x) for x in split_into(3, 'triple', string)]
P1, P2, P3 = style(p1), style(p2), style(p3)
self.body.append(
r'\index{%s@%s!%s %s@%s %s%s}'
r'\index{%s@%s!%s, %s@%s, %s%s}'
r'\index{%s@%s!%s %s@%s %s%s}' %
(p1, P1, p2, p3, P2, P3, m,
p2, P2, p3, p1, P3, P1, m,
p3, P3, p1, p2, P1, P2, m))
elif type == 'see':
p1, p2 = [escape(x) for x in split_into(2, 'see', string)]
P1 = style(p1)
self.body.append(r'\index{%s@%s|see{%s}}' % (p1, P1, p2))
elif type == 'seealso':
p1, p2 = [escape(x) for x in split_into(2, 'seealso', string)]
P1 = style(p1)
self.body.append(r'\index{%s@%s|see{%s}}' % (p1, P1, p2))
else:
logger.warning(__('unknown index entry type %s found'), type)
except ValueError as err:
logger.warning(str(err))
if not node.get('inline', True):
self.body.append('\\ignorespaces ')
raise nodes.SkipNode
def visit_raw(self, node: Element) -> None:
if not self.is_inline(node):
self.body.append('\n')
if 'latex' in node.get('format', '').split():
self.body.append(node.astext())
if not self.is_inline(node):
self.body.append('\n')
raise nodes.SkipNode
def visit_reference(self, node: Element) -> None:
if not self.in_title:
for id in node.get('ids'):
anchor = not self.in_caption
self.body += self.hypertarget(id, anchor=anchor)
if not self.is_inline(node):
self.body.append('\n')
uri = node.get('refuri', '')
if not uri and node.get('refid'):
uri = '%' + self.curfilestack[-1] + '#' + node['refid']
if self.in_title or not uri:
self.context.append('')
elif uri.startswith('#'):
# references to labels in the same document
id = self.curfilestack[-1] + ':' + uri[1:]
self.body.append(self.hyperlink(id))
self.body.append(r'\emph{')
if self.config.latex_show_pagerefs and not \
self.in_production_list:
self.context.append('}}} (%s)' % self.hyperpageref(id))
else:
self.context.append('}}}')
elif uri.startswith('%'):
# references to documents or labels inside documents
hashindex = uri.find('#')
if hashindex == -1:
# reference to the document
id = uri[1:] + '::doc'
else:
# reference to a label
id = uri[1:].replace('#', ':')
self.body.append(self.hyperlink(id))
if (len(node) and
isinstance(node[0], nodes.Element) and
'std-term' in node[0].get('classes', [])):
# don't add a pageref for glossary terms
self.context.append('}}}')
# mark up as termreference
self.body.append(r'\sphinxtermref{')
else:
self.body.append(r'\sphinxcrossref{')
if self.config.latex_show_pagerefs and not self.in_production_list:
self.context.append('}}} (%s)' % self.hyperpageref(id))
else:
self.context.append('}}}')
else:
if len(node) == 1 and uri == node[0]:
if node.get('nolinkurl'):
self.body.append('\\sphinxnolinkurl{%s}' % self.encode_uri(uri))
else:
self.body.append('\\sphinxurl{%s}' % self.encode_uri(uri))
raise nodes.SkipNode
else:
self.body.append('\\sphinxhref{%s}{' % self.encode_uri(uri))
self.context.append('}')
def depart_reference(self, node: Element) -> None:
self.body.append(self.context.pop())
if not self.is_inline(node):
self.body.append('\n')
def visit_number_reference(self, node: Element) -> None:
if node.get('refid'):
id = self.curfilestack[-1] + ':' + node['refid']
else:
id = node.get('refuri', '')[1:].replace('#', ':')
title = self.escape(node.get('title', '%s')).replace('\\%s', '%s')
if '\\{name\\}' in title or '\\{number\\}' in title:
# new style format (cf. "Fig.%{number}")
title = title.replace('\\{name\\}', '{name}').replace('\\{number\\}', '{number}')
text = escape_abbr(title).format(name='\\nameref{%s}' % self.idescape(id),
number='\\ref{%s}' % self.idescape(id))
else:
# old style format (cf. "Fig.%{number}")
text = escape_abbr(title) % ('\\ref{%s}' % self.idescape(id))
hyperref = '\\hyperref[%s]{%s}' % (self.idescape(id), text)
self.body.append(hyperref)
raise nodes.SkipNode
def visit_download_reference(self, node: Element) -> None:
pass
def depart_download_reference(self, node: Element) -> None:
pass
def visit_pending_xref(self, node: Element) -> None:
pass
def depart_pending_xref(self, node: Element) -> None:
pass
def visit_emphasis(self, node: Element) -> None:
self.body.append(r'\sphinxstyleemphasis{')
def depart_emphasis(self, node: Element) -> None:
self.body.append('}')
def visit_literal_emphasis(self, node: Element) -> None:
self.body.append(r'\sphinxstyleliteralemphasis{\sphinxupquote{')
def depart_literal_emphasis(self, node: Element) -> None:
self.body.append('}}')
def visit_strong(self, node: Element) -> None:
self.body.append(r'\sphinxstylestrong{')
def depart_strong(self, node: Element) -> None:
self.body.append('}')
def visit_literal_strong(self, node: Element) -> None:
self.body.append(r'\sphinxstyleliteralstrong{\sphinxupquote{')
def depart_literal_strong(self, node: Element) -> None:
self.body.append('}}')
def visit_abbreviation(self, node: Element) -> None:
abbr = node.astext()
self.body.append(r'\sphinxstyleabbreviation{')
# spell out the explanation once
if node.hasattr('explanation') and abbr not in self.handled_abbrs:
self.context.append('} (%s)' % self.encode(node['explanation']))
self.handled_abbrs.add(abbr)
else:
self.context.append('}')
def depart_abbreviation(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_manpage(self, node: Element) -> None:
return self.visit_literal_emphasis(node)
def depart_manpage(self, node: Element) -> None:
return self.depart_literal_emphasis(node)
def visit_title_reference(self, node: Element) -> None:
self.body.append(r'\sphinxtitleref{')
def depart_title_reference(self, node: Element) -> None:
self.body.append('}')
def visit_thebibliography(self, node: Element) -> None:
citations = cast(Iterable[nodes.citation], node)
labels = (cast(nodes.label, citation[0]) for citation in citations)
longest_label = max((label.astext() for label in labels), key=len)
if len(longest_label) > MAX_CITATION_LABEL_LENGTH:
# adjust max width of citation labels not to break the layout
longest_label = longest_label[:MAX_CITATION_LABEL_LENGTH]
self.body.append('\n\\begin{sphinxthebibliography}{%s}\n' %
self.encode(longest_label))
def depart_thebibliography(self, node: Element) -> None:
self.body.append('\\end{sphinxthebibliography}\n')
def visit_citation(self, node: Element) -> None:
label = cast(nodes.label, node[0])
self.body.append('\\bibitem[%s]{%s:%s}' % (self.encode(label.astext()),
node['docname'], node['ids'][0]))
def depart_citation(self, node: Element) -> None:
pass
def visit_citation_reference(self, node: Element) -> None:
if self.in_title:
pass
else:
self.body.append('\\sphinxcite{%s:%s}' % (node['docname'], node['refname']))
raise nodes.SkipNode
def depart_citation_reference(self, node: Element) -> None:
pass
def visit_literal(self, node: Element) -> None:
if self.in_title:
self.body.append(r'\sphinxstyleliteralintitle{\sphinxupquote{')
elif 'kbd' in node['classes']:
self.body.append(r'\sphinxkeyboard{\sphinxupquote{')
else:
self.body.append(r'\sphinxcode{\sphinxupquote{')
def depart_literal(self, node: Element) -> None:
self.body.append('}}')
def visit_footnote_reference(self, node: Element) -> None:
raise nodes.SkipNode
def visit_footnotemark(self, node: Element) -> None:
self.body.append('\\sphinxfootnotemark[')
def depart_footnotemark(self, node: Element) -> None:
self.body.append(']')
def visit_footnotetext(self, node: Element) -> None:
label = cast(nodes.label, node[0])
self.body.append('%%\n\\begin{footnotetext}[%s]'
'\\sphinxAtStartFootnote\n' % label.astext())
def depart_footnotetext(self, node: Element) -> None:
# the \ignorespaces in particular for after table header use
self.body.append('%\n\\end{footnotetext}\\ignorespaces ')
def visit_captioned_literal_block(self, node: Element) -> None:
pass
def depart_captioned_literal_block(self, node: Element) -> None:
pass
def visit_literal_block(self, node: Element) -> None:
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
self.in_parsed_literal += 1
self.body.append('\\begin{sphinxalltt}\n')
else:
labels = self.hypertarget_to(node)
if isinstance(node.parent, captioned_literal_block):
labels += self.hypertarget_to(node.parent)
if labels and not self.in_footnote:
self.body.append('\n\\def\\sphinxLiteralBlockLabel{' + labels + '}')
lang = node.get('language', 'default')
linenos = node.get('linenos', False)
highlight_args = node.get('highlight_args', {})
highlight_args['force'] = node.get('force', False)
opts = self.config.highlight_options.get(lang, {})
hlcode = self.highlighter.highlight_block(
node.rawsource, lang, opts=opts, linenos=linenos,
location=node, **highlight_args
)
if self.in_footnote:
self.body.append('\n\\sphinxSetupCodeBlockInFootnote')
hlcode = hlcode.replace('\\begin{Verbatim}',
'\\begin{sphinxVerbatim}')
# if in table raise verbatim flag to avoid "tabulary" environment
# and opt for sphinxVerbatimintable to handle caption & long lines
elif self.table:
self.table.has_problematic = True
self.table.has_verbatim = True
hlcode = hlcode.replace('\\begin{Verbatim}',
'\\begin{sphinxVerbatimintable}')
else:
hlcode = hlcode.replace('\\begin{Verbatim}',
'\\begin{sphinxVerbatim}')
# get consistent trailer
hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim}
if self.table and not self.in_footnote:
hlcode += '\\end{sphinxVerbatimintable}'
else:
hlcode += '\\end{sphinxVerbatim}'
hllines = str(highlight_args.get('hl_lines', []))[1:-1]
if hllines:
self.body.append('\n\\fvset{hllines={, %s,}}%%' % hllines)
self.body.append('\n' + hlcode + '\n')
if hllines:
self.body.append('\\sphinxresetverbatimhllines\n')
raise nodes.SkipNode
def depart_literal_block(self, node: Element) -> None:
self.body.append('\n\\end{sphinxalltt}\n')
self.in_parsed_literal -= 1
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
def visit_line(self, node: Element) -> None:
self.body.append('\\item[] ')
def depart_line(self, node: Element) -> None:
self.body.append('\n')
def visit_line_block(self, node: Element) -> None:
if isinstance(node.parent, nodes.line_block):
self.body.append('\\item[]\n'
'\\begin{DUlineblock}{\\DUlineblockindent}\n')
else:
self.body.append('\n\\begin{DUlineblock}{0em}\n')
if self.table:
self.table.has_problematic = True
def depart_line_block(self, node: Element) -> None:
self.body.append('\\end{DUlineblock}\n')
def visit_block_quote(self, node: Element) -> None:
# If the block quote contains a single object and that object
# is a list, then generate a list not a block quote.
# This lets us indent lists.
done = 0
if len(node.children) == 1:
child = node.children[0]
if isinstance(child, nodes.bullet_list) or \
isinstance(child, nodes.enumerated_list):
done = 1
if not done:
self.body.append('\\begin{quote}\n')
if self.table:
self.table.has_problematic = True
def depart_block_quote(self, node: Element) -> None:
done = 0
if len(node.children) == 1:
child = node.children[0]
if isinstance(child, nodes.bullet_list) or \
isinstance(child, nodes.enumerated_list):
done = 1
if not done:
self.body.append('\\end{quote}\n')
# option node handling copied from docutils' latex writer
def visit_option(self, node: Element) -> None:
if self.context[-1]:
# this is not the first option
self.body.append(', ')
def depart_option(self, node: Element) -> None:
# flag that the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node: Element) -> None:
"""The delimiter betweeen an option and its argument."""
self.body.append(node.get('delimiter', ' '))
def depart_option_argument(self, node: Element) -> None:
pass
def visit_option_group(self, node: Element) -> None:
self.body.append('\\item [')
# flag for first option
self.context.append(0)
def depart_option_group(self, node: Element) -> None:
self.context.pop() # the flag
self.body.append('] ')
def visit_option_list(self, node: Element) -> None:
self.body.append('\\begin{optionlist}{3cm}\n')
if self.table:
self.table.has_problematic = True
def depart_option_list(self, node: Element) -> None:
self.body.append('\\end{optionlist}\n')
def visit_option_list_item(self, node: Element) -> None:
pass
def depart_option_list_item(self, node: Element) -> None:
pass
def visit_option_string(self, node: Element) -> None:
ostring = node.astext()
self.body.append(self.encode(ostring))
raise nodes.SkipNode
def visit_description(self, node: Element) -> None:
self.body.append(' ')
def depart_description(self, node: Element) -> None:
pass
def visit_superscript(self, node: Element) -> None:
self.body.append('$^{\\text{')
def depart_superscript(self, node: Element) -> None:
self.body.append('}}$')
def visit_subscript(self, node: Element) -> None:
self.body.append('$_{\\text{')
def depart_subscript(self, node: Element) -> None:
self.body.append('}}$')
def visit_inline(self, node: Element) -> None:
classes = node.get('classes', [])
if classes in [['menuselection']]:
self.body.append(r'\sphinxmenuselection{')
self.context.append('}')
elif classes in [['guilabel']]:
self.body.append(r'\sphinxguilabel{')
self.context.append('}')
elif classes in [['accelerator']]:
self.body.append(r'\sphinxaccelerator{')
self.context.append('}')
elif classes and not self.in_title:
self.body.append(r'\DUrole{%s}{' % ','.join(classes))
self.context.append('}')
else:
self.context.append('')
def depart_inline(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_generated(self, node: Element) -> None:
pass
def depart_generated(self, node: Element) -> None:
pass
def visit_compound(self, node: Element) -> None:
pass
def depart_compound(self, node: Element) -> None:
pass
def visit_container(self, node: Element) -> None:
pass
def depart_container(self, node: Element) -> None:
pass
def visit_decoration(self, node: Element) -> None:
pass
def depart_decoration(self, node: Element) -> None:
pass
# docutils-generated elements that we don't support
def visit_header(self, node: Element) -> None:
raise nodes.SkipNode
def visit_footer(self, node: Element) -> None:
raise nodes.SkipNode
def visit_docinfo(self, node: Element) -> None:
raise nodes.SkipNode
# text handling
def encode(self, text: str) -> str:
text = self.escape(text)
if self.literal_whitespace:
# Insert a blank before the newline, to avoid
# ! LaTeX Error: There's no line here to end.
text = text.replace('\n', '~\\\\\n').replace(' ', '~')
return text
def encode_uri(self, text: str) -> str:
# TODO: it is probably wrong that this uses texescape.escape()
# this must be checked against hyperref package exact dealings
# mainly, %, #, {, } and \ need escaping via a \ escape
# in \href, the tilde is allowed and must be represented literally
return self.encode(text).replace('\\textasciitilde{}', '~').\
replace('\\sphinxhyphen{}', '-').\
replace('\\textquotesingle{}', "'")
def visit_Text(self, node: Text) -> None:
text = self.encode(node.astext())
self.body.append(text)
def depart_Text(self, node: Text) -> None:
pass
def visit_comment(self, node: Element) -> None:
raise nodes.SkipNode
def visit_meta(self, node: Element) -> None:
# only valid for HTML
raise nodes.SkipNode
def visit_system_message(self, node: Element) -> None:
pass
def depart_system_message(self, node: Element) -> None:
self.body.append('\n')
def visit_math(self, node: Element) -> None:
if self.in_title:
self.body.append(r'\protect\(%s\protect\)' % node.astext())
else:
self.body.append(r'\(%s\)' % node.astext())
raise nodes.SkipNode
def visit_math_block(self, node: Element) -> None:
if node.get('label'):
label = "equation:%s:%s" % (node['docname'], node['label'])
else:
label = None
if node.get('nowrap'):
if label:
self.body.append(r'\label{%s}' % label)
self.body.append(node.astext())
else:
from sphinx.util.math import wrap_displaymath
self.body.append(wrap_displaymath(node.astext(), label,
self.config.math_number_all))
raise nodes.SkipNode
def visit_math_reference(self, node: Element) -> None:
label = "equation:%s:%s" % (node['docname'], node['target'])
eqref_format = self.config.math_eqref_format
if eqref_format:
try:
ref = r'\ref{%s}' % label
self.body.append(eqref_format.format(number=ref))
except KeyError as exc:
logger.warning(__('Invalid math_eqref_format: %r'), exc,
location=node)
self.body.append(r'\eqref{%s}' % label)
else:
self.body.append(r'\eqref{%s}' % label)
def depart_math_reference(self, node: Element) -> None:
pass
def unknown_visit(self, node: Node) -> None:
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
# --------- METHODS FOR COMPATIBILITY --------------------------------------
def collect_footnotes(self, node: Element) -> Dict[str, List[Union["collected_footnote", bool]]]: # NOQA
def footnotes_under(n: Element) -> Iterator[nodes.footnote]:
if isinstance(n, nodes.footnote):
yield n
else:
for c in n.children:
if isinstance(c, addnodes.start_of_file):
continue
elif isinstance(c, nodes.Element):
yield from footnotes_under(c)
warnings.warn('LaTeXWriter.collected_footnote() is deprecated.',
RemovedInSphinx40Warning, stacklevel=2)
fnotes = {} # type: Dict[str, List[Union[collected_footnote, bool]]]
for fn in footnotes_under(node):
label = cast(nodes.label, fn[0])
num = label.astext().strip()
newnode = collected_footnote('', *fn.children, number=num)
fnotes[num] = [newnode, False]
return fnotes
@property
def no_contractions(self) -> int:
warnings.warn('LaTeXTranslator.no_contractions is deprecated.',
RemovedInSphinx40Warning, stacklevel=2)
return 0
def babel_defmacro(self, name: str, definition: str) -> str:
warnings.warn('babel_defmacro() is deprecated.',
RemovedInSphinx40Warning, stacklevel=2)
if self.elements['babel']:
prefix = '\\addto\\extras%s{' % self.babel.get_language()
suffix = '}'
else: # babel is disabled (mainly for Japanese environment)
prefix = ''
suffix = ''
return ('%s\\def%s{%s}%s\n' % (prefix, name, definition, suffix))
def generate_numfig_format(self, builder: "LaTeXBuilder") -> str:
warnings.warn('generate_numfig_format() is deprecated.',
RemovedInSphinx40Warning, stacklevel=2)
ret = [] # type: List[str]
figure = self.config.numfig_format['figure'].split('%s', 1)
if len(figure) == 1:
ret.append('\\def\\fnum@figure{%s}\n' % self.escape(figure[0]).strip())
else:
definition = escape_abbr(self.escape(figure[0]))
ret.append(self.babel_renewcommand('\\figurename', definition))
ret.append('\\makeatletter\n')
ret.append('\\def\\fnum@figure{\\figurename\\thefigure{}%s}\n' %
self.escape(figure[1]))
ret.append('\\makeatother\n')
table = self.config.numfig_format['table'].split('%s', 1)
if len(table) == 1:
ret.append('\\def\\fnum@table{%s}\n' % self.escape(table[0]).strip())
else:
definition = escape_abbr(self.escape(table[0]))
ret.append(self.babel_renewcommand('\\tablename', definition))
ret.append('\\makeatletter\n')
ret.append('\\def\\fnum@table{\\tablename\\thetable{}%s}\n' %
self.escape(table[1]))
ret.append('\\makeatother\n')
codeblock = self.config.numfig_format['code-block'].split('%s', 1)
if len(codeblock) == 1:
pass # FIXME
else:
definition = self.escape(codeblock[0]).strip()
ret.append(self.babel_renewcommand('\\literalblockname', definition))
if codeblock[1]:
pass # FIXME
return ''.join(ret)
# Import old modules here for compatibility
from sphinx.builders.latex import constants # NOQA
from sphinx.builders.latex.util import ExtBabel # NOQA
deprecated_alias('sphinx.writers.latex',
{
'ADDITIONAL_SETTINGS': constants.ADDITIONAL_SETTINGS,
'DEFAULT_SETTINGS': constants.DEFAULT_SETTINGS,
'LUALATEX_DEFAULT_FONTPKG': constants.LUALATEX_DEFAULT_FONTPKG,
'PDFLATEX_DEFAULT_FONTPKG': constants.PDFLATEX_DEFAULT_FONTPKG,
'SHORTHANDOFF': constants.SHORTHANDOFF,
'XELATEX_DEFAULT_FONTPKG': constants.XELATEX_DEFAULT_FONTPKG,
'XELATEX_GREEK_DEFAULT_FONTPKG': constants.XELATEX_GREEK_DEFAULT_FONTPKG,
'ExtBabel': ExtBabel,
},
RemovedInSphinx40Warning,
{
'ADDITIONAL_SETTINGS':
'sphinx.builders.latex.constants.ADDITIONAL_SETTINGS',
'DEFAULT_SETTINGS':
'sphinx.builders.latex.constants.DEFAULT_SETTINGS',
'LUALATEX_DEFAULT_FONTPKG':
'sphinx.builders.latex.constants.LUALATEX_DEFAULT_FONTPKG',
'PDFLATEX_DEFAULT_FONTPKG':
'sphinx.builders.latex.constants.PDFLATEX_DEFAULT_FONTPKG',
'SHORTHANDOFF':
'sphinx.builders.latex.constants.SHORTHANDOFF',
'XELATEX_DEFAULT_FONTPKG':
'sphinx.builders.latex.constants.XELATEX_DEFAULT_FONTPKG',
'XELATEX_GREEK_DEFAULT_FONTPKG':
'sphinx.builders.latex.constants.XELATEX_GREEK_DEFAULT_FONTPKG',
'ExtBabel': 'sphinx.builders.latex.util.ExtBabel',
})
# FIXME: Workaround to avoid circular import
# refs: https://github.com/sphinx-doc/sphinx/issues/5433
from sphinx.builders.latex.nodes import ( # NOQA isort:skip
HYPERLINK_SUPPORT_NODES, captioned_literal_block, footnotetext,
)
| 39.971283
| 109
| 0.562307
|
3087101bbdbbe4cb8b65614c34b259415a5e1602
| 33
|
py
|
Python
|
sendlog.py
|
NavonLab/power-check
|
95299d4a937c472aa7259543232a749609e12f09
|
[
"MIT"
] | null | null | null |
sendlog.py
|
NavonLab/power-check
|
95299d4a937c472aa7259543232a749609e12f09
|
[
"MIT"
] | null | null | null |
sendlog.py
|
NavonLab/power-check
|
95299d4a937c472aa7259543232a749609e12f09
|
[
"MIT"
] | null | null | null |
from mail import update
update()
| 11
| 23
| 0.787879
|
5603530be56d6ab8ab704076505be572415798a3
| 1,047
|
py
|
Python
|
build_data.py
|
janarthanan-rajendran/l2l-from-related-tasks
|
ce80e21cb21a7bb9a9b44f54d288caa2fa0453ed
|
[
"MIT"
] | 134
|
2017-06-22T19:13:37.000Z
|
2022-02-07T12:32:35.000Z
|
build_data.py
|
janarthanan-rajendran/l2l-from-related-tasks
|
ce80e21cb21a7bb9a9b44f54d288caa2fa0453ed
|
[
"MIT"
] | 12
|
2017-07-31T05:39:21.000Z
|
2021-08-28T10:18:14.000Z
|
build_data.py
|
janarthanan-rajendran/l2l-from-related-tasks
|
ce80e21cb21a7bb9a9b44f54d288caa2fa0453ed
|
[
"MIT"
] | 43
|
2017-06-27T01:37:02.000Z
|
2021-06-22T16:03:38.000Z
|
import os
import requests
import shutil
def download(url, path, fname):
print('downloading ' + fname)
outfile = os.path.join(path, fname)
with requests.Session() as session:
response = session.get(url, stream=True)
CHUNK_SIZE = 32768
with open(outfile, 'wb') as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk:
f.write(chunk)
response.close()
def untar(path, fname):
print('unpacking ' + fname)
fullpath = os.path.join(path, fname)
shutil.unpack_archive(fullpath, path)
os.remove(fullpath)
if __name__ == '__main__':
dpath = os.path.join('data')
print('[building data: ' + dpath + ']')
os.makedirs(dpath, exist_ok=True)
# Download the data from https://www.dropbox.com/s/4i9u4y24pt3paba/personalized-dialog-dataset.tar.gz?dl=1
fname = 'personalized-dialog-dataset.tar.gz'
url = 'https://www.dropbox.com/s/4i9u4y24pt3paba/' + fname + '?dl=1'
download(url, dpath, fname)
untar(dpath, fname)
| 30.794118
| 110
| 0.638013
|
06ff9eceb8444dbcfa16673d5338742f35cf0524
| 22,267
|
py
|
Python
|
contrib/devtools/copyright_header.py
|
hiphopcoin24/hiphopcoin24
|
09b780546ba9e28b452a8641863aafa90def40d1
|
[
"MIT"
] | null | null | null |
contrib/devtools/copyright_header.py
|
hiphopcoin24/hiphopcoin24
|
09b780546ba9e28b452a8641863aafa90def40d1
|
[
"MIT"
] | null | null | null |
contrib/devtools/copyright_header.py
|
hiphopcoin24/hiphopcoin24
|
09b780546ba9e28b452a8641863aafa90def40d1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016-2020 The Hiphopcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import re
import fnmatch
import sys
import subprocess
import datetime
import os
################################################################################
# file filtering
################################################################################
EXCLUDE = [
# auto generated:
'src/qt/hiphopcoinstrings.cpp',
'src/chainparamsseeds.h',
# other external copyrights:
'src/reverse_iterator.h',
'src/test/fuzz/FuzzedDataProvider.h',
'src/tinyformat.h',
'src/bench/nanobench.h',
'test/functional/test_framework/bignum.py',
# python init:
'*__init__.py',
]
EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in EXCLUDE]))
EXCLUDE_DIRS = [
# git subtrees
"src/crypto/ctaes/",
"src/leveldb/",
"src/secp256k1/",
"src/univalue/",
"src/crc32c/",
]
INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.mm', '*.py', '*.sh', '*.bash-completion']
INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in INCLUDE]))
def applies_to_file(filename):
for excluded_dir in EXCLUDE_DIRS:
if filename.startswith(excluded_dir):
return False
return ((EXCLUDE_COMPILED.match(filename) is None) and
(INCLUDE_COMPILED.match(filename) is not None))
################################################################################
# obtain list of files in repo according to INCLUDE and EXCLUDE
################################################################################
GIT_LS_CMD = 'git ls-files --full-name'.split(' ')
GIT_TOPLEVEL_CMD = 'git rev-parse --show-toplevel'.split(' ')
def call_git_ls(base_directory):
out = subprocess.check_output([*GIT_LS_CMD, base_directory])
return [f for f in out.decode("utf-8").split('\n') if f != '']
def call_git_toplevel():
"Returns the absolute path to the project root"
return subprocess.check_output(GIT_TOPLEVEL_CMD).strip().decode("utf-8")
def get_filenames_to_examine(base_directory):
"Returns an array of absolute paths to any project files in the base_directory that pass the include/exclude filters"
root = call_git_toplevel()
filenames = call_git_ls(base_directory)
return sorted([os.path.join(root, filename) for filename in filenames if
applies_to_file(filename)])
################################################################################
# define and compile regexes for the patterns we are looking for
################################################################################
COPYRIGHT_WITH_C = r'Copyright \(c\)'
COPYRIGHT_WITHOUT_C = 'Copyright'
ANY_COPYRIGHT_STYLE = '(%s|%s)' % (COPYRIGHT_WITH_C, COPYRIGHT_WITHOUT_C)
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
YEAR_LIST = '(%s)(, %s)+' % (YEAR, YEAR)
ANY_YEAR_STYLE = '(%s|%s)' % (YEAR_RANGE, YEAR_LIST)
ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE = ("%s %s" % (ANY_COPYRIGHT_STYLE,
ANY_YEAR_STYLE))
ANY_COPYRIGHT_COMPILED = re.compile(ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE)
def compile_copyright_regex(copyright_style, year_style, name):
return re.compile(r'%s %s,? %s( +\*)?\n' % (copyright_style, year_style, name))
EXPECTED_HOLDER_NAMES = [
r"Satoshi Nakamoto",
r"The Hiphopcoin Core developers",
r"BitPay Inc\.",
r"University of Illinois at Urbana-Champaign\.",
r"Pieter Wuille",
r"Wladimir J\. van der Laan",
r"Jeff Garzik",
r"Jan-Klaas Kollhof",
r"ArtForz -- public domain half-a-node",
r"Intel Corporation ?",
r"The Zcash developers",
r"Jeremy Rubin",
]
DOMINANT_STYLE_COMPILED = {}
YEAR_LIST_STYLE_COMPILED = {}
WITHOUT_C_STYLE_COMPILED = {}
for holder_name in EXPECTED_HOLDER_NAMES:
DOMINANT_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_RANGE, holder_name))
YEAR_LIST_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_LIST, holder_name))
WITHOUT_C_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE,
holder_name))
################################################################################
# search file contents for copyright message of particular category
################################################################################
def get_count_of_copyrights_of_any_style_any_holder(contents):
return len(ANY_COPYRIGHT_COMPILED.findall(contents))
def file_has_dominant_style_copyright_for_holder(contents, holder_name):
match = DOMINANT_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_year_list_style_copyright_for_holder(contents, holder_name):
match = YEAR_LIST_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_without_c_style_copyright_for_holder(contents, holder_name):
match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents)
return match is not None
################################################################################
# get file info
################################################################################
def read_file(filename):
return open(filename, 'r', encoding="utf8").read()
def gather_file_info(filename):
info = {}
info['filename'] = filename
c = read_file(filename)
info['contents'] = c
info['all_copyrights'] = get_count_of_copyrights_of_any_style_any_holder(c)
info['classified_copyrights'] = 0
info['dominant_style'] = {}
info['year_list_style'] = {}
info['without_c_style'] = {}
for holder_name in EXPECTED_HOLDER_NAMES:
has_dominant_style = (
file_has_dominant_style_copyright_for_holder(c, holder_name))
has_year_list_style = (
file_has_year_list_style_copyright_for_holder(c, holder_name))
has_without_c_style = (
file_has_without_c_style_copyright_for_holder(c, holder_name))
info['dominant_style'][holder_name] = has_dominant_style
info['year_list_style'][holder_name] = has_year_list_style
info['without_c_style'][holder_name] = has_without_c_style
if has_dominant_style or has_year_list_style or has_without_c_style:
info['classified_copyrights'] = info['classified_copyrights'] + 1
return info
################################################################################
# report execution
################################################################################
SEPARATOR = '-'.join(['' for _ in range(80)])
def print_filenames(filenames, verbose):
if not verbose:
return
for filename in filenames:
print("\t%s" % filename)
def print_report(file_infos, verbose):
print(SEPARATOR)
examined = [i['filename'] for i in file_infos]
print("%d files examined according to INCLUDE and EXCLUDE fnmatch rules" %
len(examined))
print_filenames(examined, verbose)
print(SEPARATOR)
print('')
zero_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 0]
print("%4d with zero copyrights" % len(zero_copyrights))
print_filenames(zero_copyrights, verbose)
one_copyright = [i['filename'] for i in file_infos if
i['all_copyrights'] == 1]
print("%4d with one copyright" % len(one_copyright))
print_filenames(one_copyright, verbose)
two_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 2]
print("%4d with two copyrights" % len(two_copyrights))
print_filenames(two_copyrights, verbose)
three_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 3]
print("%4d with three copyrights" % len(three_copyrights))
print_filenames(three_copyrights, verbose)
four_or_more_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] >= 4]
print("%4d with four or more copyrights" % len(four_or_more_copyrights))
print_filenames(four_or_more_copyrights, verbose)
print('')
print(SEPARATOR)
print('Copyrights with dominant style:\ne.g. "Copyright (c)" and '
'"<year>" or "<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
dominant_style = [i['filename'] for i in file_infos if
i['dominant_style'][holder_name]]
if len(dominant_style) > 0:
print("%4d with '%s'" % (len(dominant_style),
holder_name.replace('\n', '\\n')))
print_filenames(dominant_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with year list style:\ne.g. "Copyright (c)" and '
'"<year1>, <year2>, ...":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
year_list_style = [i['filename'] for i in file_infos if
i['year_list_style'][holder_name]]
if len(year_list_style) > 0:
print("%4d with '%s'" % (len(year_list_style),
holder_name.replace('\n', '\\n')))
print_filenames(year_list_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with no "(c)" style:\ne.g. "Copyright" and "<year>" or '
'"<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
without_c_style = [i['filename'] for i in file_infos if
i['without_c_style'][holder_name]]
if len(without_c_style) > 0:
print("%4d with '%s'" % (len(without_c_style),
holder_name.replace('\n', '\\n')))
print_filenames(without_c_style, verbose)
print('')
print(SEPARATOR)
unclassified_copyrights = [i['filename'] for i in file_infos if
i['classified_copyrights'] < i['all_copyrights']]
print("%d with unexpected copyright holder names" %
len(unclassified_copyrights))
print_filenames(unclassified_copyrights, verbose)
print(SEPARATOR)
def exec_report(base_directory, verbose):
filenames = get_filenames_to_examine(base_directory)
file_infos = [gather_file_info(f) for f in filenames]
print_report(file_infos, verbose)
################################################################################
# report cmd
################################################################################
REPORT_USAGE = """
Produces a report of all copyright header notices found inside the source files
of a repository.
Usage:
$ ./copyright_header.py report <base_directory> [verbose]
Arguments:
<base_directory> - The base directory of a hiphopcoin source code repository.
[verbose] - Includes a list of every file of each subcategory in the report.
"""
def report_cmd(argv):
if len(argv) == 2:
sys.exit(REPORT_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad <base_directory>: %s" % base_directory)
if len(argv) == 3:
verbose = False
elif argv[3] == 'verbose':
verbose = True
else:
sys.exit("*** unknown argument: %s" % argv[2])
exec_report(base_directory, verbose)
################################################################################
# query git for year of last change
################################################################################
GIT_LOG_CMD = "git log --pretty=format:%%ai %s"
def call_git_log(filename):
out = subprocess.check_output((GIT_LOG_CMD % filename).split(' '))
return out.decode("utf-8").split('\n')
def get_git_change_years(filename):
git_log_lines = call_git_log(filename)
if len(git_log_lines) == 0:
return [datetime.date.today().year]
# timestamp is in ISO 8601 format. e.g. "2016-09-05 14:25:32 -0600"
return [line.split(' ')[0].split('-')[0] for line in git_log_lines]
def get_most_recent_git_change_year(filename):
return max(get_git_change_years(filename))
################################################################################
# read and write to file
################################################################################
def read_file_lines(filename):
f = open(filename, 'r', encoding="utf8")
file_lines = f.readlines()
f.close()
return file_lines
def write_file_lines(filename, file_lines):
f = open(filename, 'w', encoding="utf8")
f.write(''.join(file_lines))
f.close()
################################################################################
# update header years execution
################################################################################
COPYRIGHT = r'Copyright \(c\)'
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
HOLDER = 'The Hiphopcoin Core developers'
UPDATEABLE_LINE_COMPILED = re.compile(' '.join([COPYRIGHT, YEAR_RANGE, HOLDER]))
def get_updatable_copyright_line(file_lines):
index = 0
for line in file_lines:
if UPDATEABLE_LINE_COMPILED.search(line) is not None:
return index, line
index = index + 1
return None, None
def parse_year_range(year_range):
year_split = year_range.split('-')
start_year = year_split[0]
if len(year_split) == 1:
return start_year, start_year
return start_year, year_split[1]
def year_range_to_str(start_year, end_year):
if start_year == end_year:
return start_year
return "%s-%s" % (start_year, end_year)
def create_updated_copyright_line(line, last_git_change_year):
copyright_splitter = 'Copyright (c) '
copyright_split = line.split(copyright_splitter)
# Preserve characters on line that are ahead of the start of the copyright
# notice - they are part of the comment block and vary from file-to-file.
before_copyright = copyright_split[0]
after_copyright = copyright_split[1]
space_split = after_copyright.split(' ')
year_range = space_split[0]
start_year, end_year = parse_year_range(year_range)
if end_year == last_git_change_year:
return line
return (before_copyright + copyright_splitter +
year_range_to_str(start_year, last_git_change_year) + ' ' +
' '.join(space_split[1:]))
def update_updatable_copyright(filename):
file_lines = read_file_lines(filename)
index, line = get_updatable_copyright_line(file_lines)
if not line:
print_file_action_message(filename, "No updatable copyright.")
return
last_git_change_year = get_most_recent_git_change_year(filename)
new_line = create_updated_copyright_line(line, last_git_change_year)
if line == new_line:
print_file_action_message(filename, "Copyright up-to-date.")
return
file_lines[index] = new_line
write_file_lines(filename, file_lines)
print_file_action_message(filename,
"Copyright updated! -> %s" % last_git_change_year)
def exec_update_header_year(base_directory):
for filename in get_filenames_to_examine(base_directory):
update_updatable_copyright(filename)
################################################################################
# update cmd
################################################################################
UPDATE_USAGE = """
Updates all the copyright headers of "The Hiphopcoin Core developers" which were
changed in a year more recent than is listed. For example:
// Copyright (c) <firstYear>-<lastYear> The Hiphopcoin Core developers
will be updated to:
// Copyright (c) <firstYear>-<lastModifiedYear> The Hiphopcoin Core developers
where <lastModifiedYear> is obtained from the 'git log' history.
This subcommand also handles copyright headers that have only a single year. In those cases:
// Copyright (c) <year> The Hiphopcoin Core developers
will be updated to:
// Copyright (c) <year>-<lastModifiedYear> The Hiphopcoin Core developers
where the update is appropriate.
Usage:
$ ./copyright_header.py update <base_directory>
Arguments:
<base_directory> - The base directory of a hiphopcoin source code repository.
"""
def print_file_action_message(filename, action):
print("%-52s %s" % (filename, action))
def update_cmd(argv):
if len(argv) != 3:
sys.exit(UPDATE_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad base_directory: %s" % base_directory)
exec_update_header_year(base_directory)
################################################################################
# inserted copyright header format
################################################################################
def get_header_lines(header, start_year, end_year):
lines = header.split('\n')[1:-1]
lines[0] = lines[0] % year_range_to_str(start_year, end_year)
return [line + '\n' for line in lines]
CPP_HEADER = '''
// Copyright (c) %s The Hiphopcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_cpp_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(CPP_HEADER, start_year, end_year))
SCRIPT_HEADER = '''
# Copyright (c) %s The Hiphopcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_script_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(SCRIPT_HEADER, start_year, end_year))
################################################################################
# query git for year of last change
################################################################################
def get_git_change_year_range(filename):
years = get_git_change_years(filename)
return min(years), max(years)
################################################################################
# check for existing core copyright
################################################################################
def file_already_has_core_copyright(file_lines):
index, _ = get_updatable_copyright_line(file_lines)
return index is not None
################################################################################
# insert header execution
################################################################################
def file_has_hashbang(file_lines):
if len(file_lines) < 1:
return False
if len(file_lines[0]) <= 2:
return False
return file_lines[0][:2] == '#!'
def insert_script_header(filename, file_lines, start_year, end_year):
if file_has_hashbang(file_lines):
insert_idx = 1
else:
insert_idx = 0
header_lines = get_script_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(insert_idx, line)
write_file_lines(filename, file_lines)
def insert_cpp_header(filename, file_lines, start_year, end_year):
file_lines.insert(0, '\n')
header_lines = get_cpp_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(0, line)
write_file_lines(filename, file_lines)
def exec_insert_header(filename, style):
file_lines = read_file_lines(filename)
if file_already_has_core_copyright(file_lines):
sys.exit('*** %s already has a copyright by The Hiphopcoin Core developers'
% (filename))
start_year, end_year = get_git_change_year_range(filename)
if style in ['python', 'shell']:
insert_script_header(filename, file_lines, start_year, end_year)
else:
insert_cpp_header(filename, file_lines, start_year, end_year)
################################################################################
# insert cmd
################################################################################
INSERT_USAGE = """
Inserts a copyright header for "The Hiphopcoin Core developers" at the top of the
file in either Python or C++ style as determined by the file extension. If the
file is a Python file and it has a '#!' starting the first line, the header is
inserted in the line below it.
The copyright dates will be set to be:
"<year_introduced>-<current_year>"
where <year_introduced> is according to the 'git log' history. If
<year_introduced> is equal to <current_year>, the date will be set to be:
"<current_year>"
If the file already has a copyright for "The Hiphopcoin Core developers", the
script will exit.
Usage:
$ ./copyright_header.py insert <file>
Arguments:
<file> - A source file in the hiphopcoin repository.
"""
def insert_cmd(argv):
if len(argv) != 3:
sys.exit(INSERT_USAGE)
filename = argv[2]
if not os.path.isfile(filename):
sys.exit("*** bad filename: %s" % filename)
_, extension = os.path.splitext(filename)
if extension not in ['.h', '.cpp', '.cc', '.c', '.py', '.sh']:
sys.exit("*** cannot insert for file extension %s" % extension)
if extension == '.py':
style = 'python'
elif extension == '.sh':
style = 'shell'
else:
style = 'cpp'
exec_insert_header(filename, style)
################################################################################
# UI
################################################################################
USAGE = """
copyright_header.py - utilities for managing copyright headers of 'The Hiphopcoin
Core developers' in repository source files.
Usage:
$ ./copyright_header <subcommand>
Subcommands:
report
update
insert
To see subcommand usage, run them without arguments.
"""
SUBCOMMANDS = ['report', 'update', 'insert']
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit(USAGE)
subcommand = sys.argv[1]
if subcommand not in SUBCOMMANDS:
sys.exit(USAGE)
if subcommand == 'report':
report_cmd(sys.argv)
elif subcommand == 'update':
update_cmd(sys.argv)
elif subcommand == 'insert':
insert_cmd(sys.argv)
| 36.68369
| 121
| 0.601698
|
415276611c57e53bb77f4dc661884a5223cdc6e6
| 748
|
py
|
Python
|
var/spack/repos/builtin/packages/py-vine/package.py
|
robertodr/spack
|
9b809e01b47d48f01b3d257912fe1b752943cd3d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 9
|
2018-04-18T07:51:40.000Z
|
2021-09-10T03:56:57.000Z
|
var/spack/repos/builtin/packages/py-vine/package.py
|
robertodr/spack
|
9b809e01b47d48f01b3d257912fe1b752943cd3d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 907
|
2018-04-18T11:17:57.000Z
|
2022-03-31T13:20:25.000Z
|
var/spack/repos/builtin/packages/py-vine/package.py
|
robertodr/spack
|
9b809e01b47d48f01b3d257912fe1b752943cd3d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 29
|
2018-11-05T16:14:23.000Z
|
2022-02-03T16:07:09.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyVine(PythonPackage):
"""Promises, promises, promises."""
homepage = "https://pypi.org/project/vine/"
url = "https://pypi.io/packages/source/v/vine/vine-1.2.0.tar.gz"
version('5.0.0', sha256='7d3b1624a953da82ef63462013bbd271d3eb75751489f9807598e8f340bd637e')
version('1.3.0', sha256='133ee6d7a9016f177ddeaf191c1f58421a1dcc6ee9a42c58b34bed40e1d2cd87')
version('1.2.0', sha256='ee4813e915d0e1a54e5c1963fde0855337f82655678540a6bc5996bca4165f76')
depends_on('py-setuptools', type='build')
| 37.4
| 95
| 0.754011
|
c857573acf32f712e8dc607f4650617fbacca7fa
| 925
|
py
|
Python
|
dlkit/records/assessment/fbw/assessment_taken_records.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 2
|
2018-02-23T12:16:11.000Z
|
2020-10-08T17:54:24.000Z
|
dlkit/records/assessment/fbw/assessment_taken_records.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 87
|
2017-04-21T18:57:15.000Z
|
2021-12-13T19:43:57.000Z
|
dlkit/records/assessment/fbw/assessment_taken_records.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 1
|
2018-03-01T16:44:25.000Z
|
2018-03-01T16:44:25.000Z
|
"""
records.assessment.fbw.assessment_taken_records.py
"""
from ...osid.base_records import QueryInitRecord,\
ObjectInitRecord
class AdvancedQueryAssessmentTakenRecord(ObjectInitRecord):
"""No new methods on the taken record"""
_implemented_record_type_identifiers = [
'advanced-query'
]
class AdvancedQueryAssessmentTakenFormRecord(ObjectInitRecord):
"""No new methods on the form reqcord"""
_implemented_record_type_identifiers = [
'advanced-query'
]
class AdvancedQueryAssessmentTakenQueryRecord(QueryInitRecord):
"""add some advanced query options"""
def match_start_time(self, start_time, match):
if match:
inin = '$gte'
else:
inin = '$lte'
self._my_osid_query._query_terms['actualStartTime'] = {inin: start_time}
def clear_match_start_time(self):
self._my_osid_query._clear_terms('actualStartTime')
| 27.205882
| 80
| 0.705946
|
01d2232405aa5d4ae2d3719d48330d929246c2ac
| 23,894
|
py
|
Python
|
wavenet_skeleton/model.py
|
Luna86/tensorflow-wavenet_ssp
|
b0b9b55b16284f3810c7ac7f6d7eb0d637a3200d
|
[
"MIT"
] | null | null | null |
wavenet_skeleton/model.py
|
Luna86/tensorflow-wavenet_ssp
|
b0b9b55b16284f3810c7ac7f6d7eb0d637a3200d
|
[
"MIT"
] | null | null | null |
wavenet_skeleton/model.py
|
Luna86/tensorflow-wavenet_ssp
|
b0b9b55b16284f3810c7ac7f6d7eb0d637a3200d
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from .ops import causal_conv
def create_variable(name, shape):
'''Create a convolution filter variable with the specified name and shape,
and initialize it using Xavier initialition.'''
initializer = tf.contrib.layers.xavier_initializer_conv2d()
variable = tf.Variable(initializer(shape=shape), name=name)
return variable
def create_bias_variable(name, shape):
'''Create a bias variable with the specified name and shape and initialize
it to zero.'''
initializer = tf.constant_initializer(value=0.0, dtype=tf.float32)
return tf.Variable(initializer(shape=shape), name)
class WaveNetModel(object):
'''Implements the WaveNet network for generative audio.
Usage (with the architecture as in the DeepMind paper):
dilations = [2**i for i in range(N)] * M
filter_width = 2 # Convolutions just use 2 samples.
residual_channels = 16 # Not specified in the paper.
dilation_channels = 32 # Not specified in the paper.
skip_channels = 16 # Not specified in the paper.
net = WaveNetModel(batch_size, dilations, filter_width,
residual_channels, dilation_channels,
skip_channels)
loss = net.loss(input_batch)
'''
def __init__(self,
batch_size,
dilations,
filter_width,
residual_channels,
dilation_channels,
skip_channels,
skeleton_channels = 42,
use_biases=False,
scalar_input=False,
initial_filter_width=32,
histograms=False):
'''Initializes the WaveNet model.
Args:
batch_size: How many audio files are supplied per batch
(recommended: 1).
dilations: A list with the dilation factor for each layer.
filter_width: The samples that are included in each convolution,
after dilating.
residual_channels: How many filters to learn for the residual.
dilation_channels: How many filters to learn for the dilated
convolution.
skip_channels: How many filters to learn that contribute to the
quantized softmax output.
quantization_channels: How many amplitude values to use for audio
quantization and the corresponding one-hot encoding.
Default: 256 (8-bit quantization).
use_biases: Whether to add a bias layer to each convolution.
Default: False.
scalar_input: Whether to use the quantized waveform directly as
input to the network instead of one-hot encoding it.
Default: False.
initial_filter_width: The width of the initial filter of the
convolution applied to the scalar input. This is only relevant
if scalar_input=True.
histograms: Whether to store histograms in the summary.
Default: False.
'''
self.batch_size = batch_size
self.dilations = dilations
self.filter_width = filter_width
self.residual_channels = residual_channels
self.dilation_channels = dilation_channels
self.skeleton_channels = skeleton_channels
self.use_biases = use_biases
self.skip_channels = skip_channels
self.scalar_input = scalar_input
self.initial_filter_width = initial_filter_width
self.histograms = histograms
self.variables = self._create_variables()
def _create_variables(self):
'''This function creates all variables used by the network.
This allows us to share them between multiple calls to the loss
function and generation function.'''
var = dict()
#var stores a dictionary of major components of the network: {causal_layer, dilated_stack,
with tf.variable_scope('wavenet'):
with tf.variable_scope('causal_layer'):
layer = dict()
#if self.scalar_input:
# initial_channels = 1
# initial_filter_width = self.initial_filter_width
#else:
# initial_channels = self.quantization_channels
# initial_filter_width = self.filter_width
initial_channels = self.skeleton_channels
initial_filter_width = self.filter_width
layer['filter'] = create_variable(
'filter',
[initial_filter_width,
initial_channels,
self.residual_channels]) #todo: filter weight is 3D tensor? what is residual_channels for?
var['causal_layer'] = layer
var['dilated_stack'] = list()
with tf.variable_scope('dilated_stack'):
for i, dilation in enumerate(self.dilations):
with tf.variable_scope('layer{}'.format(i)):
current = dict()
current['filter'] = create_variable(
'filter',
[self.filter_width,
self.residual_channels,
self.dilation_channels])
current['gate'] = create_variable(
'gate',
[self.filter_width,
self.residual_channels,
self.dilation_channels])
current['dense'] = create_variable(
'dense',
[1,
self.dilation_channels,
self.residual_channels])
current['skip'] = create_variable(
'skip',
[1,
self.dilation_channels,
self.skip_channels])
if self.use_biases:
current['filter_bias'] = create_bias_variable(
'filter_bias',
[self.dilation_channels])
current['gate_bias'] = create_bias_variable(
'gate_bias',
[self.dilation_channels])
current['dense_bias'] = create_bias_variable(
'dense_bias',
[self.residual_channels])
current['skip_bias'] = create_bias_variable(
'slip_bias',
[self.skip_channels])
var['dilated_stack'].append(current)
with tf.variable_scope('postprocessing'):
current = dict()
current['postprocess1'] = create_variable(
'postprocess1',
[1, self.skip_channels, self.skip_channels])
current['postprocess2'] = create_variable(
'postprocess2',
[1, self.skip_channels, self.skeleton_channels])
if self.use_biases:
current['postprocess1_bias'] = create_bias_variable(
'postprocess1_bias',
[self.skip_channels])
current['postprocess2_bias'] = create_bias_variable(
'postprocess2_bias',
[self.skeleton_channels])
var['postprocessing'] = current
return var
def _create_causal_layer(self, input_batch):
'''Creates a single causal convolution layer.
The layer can change the number of channels.
'''
with tf.name_scope('causal_layer'):
weights_filter = self.variables['causal_layer']['filter']
return causal_conv(input_batch, weights_filter, 1)
def _create_dilation_layer(self, input_batch, layer_index, dilation):
'''Creates a single causal dilated convolution layer.
The layer contains a gated filter that connects to dense output
and to a skip connection:
|-> [gate] -| |-> 1x1 conv -> skip output
| |-> (*) -|
input -|-> [filter] -| |-> 1x1 conv -|
| |-> (+) -> dense output
|------------------------------------|
Where `[gate]` and `[filter]` are causal convolutions with a
non-linear activation at the output.
'''
variables = self.variables['dilated_stack'][layer_index]
weights_filter = variables['filter']
weights_gate = variables['gate']
conv_filter = causal_conv(input_batch, weights_filter, dilation)
conv_gate = causal_conv(input_batch, weights_gate, dilation)
if self.use_biases:
filter_bias = variables['filter_bias']
gate_bias = variables['gate_bias']
conv_filter = tf.add(conv_filter, filter_bias)
conv_gate = tf.add(conv_gate, gate_bias)
out = tf.tanh(conv_filter) * tf.sigmoid(conv_gate)
# The 1x1 conv to produce the residual output
weights_dense = variables['dense']
transformed = tf.nn.conv1d(
out, weights_dense, stride=1, padding="SAME", name="dense")
# The 1x1 conv to produce the skip output
weights_skip = variables['skip']
skip_contribution = tf.nn.conv1d(
out, weights_skip, stride=1, padding="SAME", name="skip")
if self.use_biases:
dense_bias = variables['dense_bias']
skip_bias = variables['skip_bias']
transformed = transformed + dense_bias
skip_contribution = skip_contribution + skip_bias
if self.histograms:
layer = 'layer{}'.format(layer_index)
tf.histogram_summary(layer + '_filter', weights_filter)
tf.histogram_summary(layer + '_gate', weights_gate)
tf.histogram_summary(layer + '_dense', weights_dense)
tf.histogram_summary(layer + '_skip', weights_skip)
if self.use_biases:
tf.histogram_summary(layer + '_biases_filter', filter_bias)
tf.histogram_summary(layer + '_biases_gate', gate_bias)
tf.histogram_summary(layer + '_biases_dense', dense_bias)
tf.histogram_summary(layer + '_biases_skip', skip_bias)
return skip_contribution, input_batch + transformed
def _generator_conv(self, input_batch, state_batch, weights):
'''Perform convolution for a single convolutional processing step.'''
# TODO generalize to filter_width > 2
past_weights = weights[0, :, :]
curr_weights = weights[1, :, :]
output = tf.matmul(state_batch, past_weights) + tf.matmul(
input_batch, curr_weights)
return output
def _generator_causal_layer(self, input_batch, state_batch):
with tf.name_scope('causal_layer'):
weights_filter = self.variables['causal_layer']['filter']
output = self._generator_conv(
input_batch, state_batch, weights_filter)
return output
def _generator_dilation_layer(self, input_batch, state_batch, layer_index,
dilation):
variables = self.variables['dilated_stack'][layer_index]
weights_filter = variables['filter']
weights_gate = variables['gate']
output_filter = self._generator_conv(
input_batch, state_batch, weights_filter)
output_gate = self._generator_conv(
input_batch, state_batch, weights_gate)
if self.use_biases:
output_filter = output_filter + variables['filter_bias']
output_gate = output_gate + variables['gate_bias']
out = tf.tanh(output_filter) * tf.sigmoid(output_gate)
weights_dense = variables['dense']
transformed = tf.matmul(out, weights_dense[0, :, :])
if self.use_biases:
transformed = transformed + variables['dense_bias']
weights_skip = variables['skip']
skip_contribution = tf.matmul(out, weights_skip[0, :, :])
if self.use_biases:
skip_contribution = skip_contribution + variables['skip_bias']
return skip_contribution, input_batch + transformed
def _create_network(self, input_batch):
'''Construct the WaveNet network.'''
outputs = []
current_layer = input_batch
# Pre-process the input with a regular convolution
#if self.scalar_input:
# initial_channels = 1
#else:
# initial_channels = self.skeleton_channels
current_layer = self._create_causal_layer(current_layer)
# Add all defined dilation layers.
with tf.name_scope('dilated_stack'):
for layer_index, dilation in enumerate(self.dilations):
with tf.name_scope('layer{}'.format(layer_index)):
output, current_layer = self._create_dilation_layer(
current_layer, layer_index, dilation)
outputs.append(output)
with tf.name_scope('postprocessing'):
# Perform (+) -> ReLU -> 1x1 conv -> ReLU -> 1x1 conv to
# postprocess the output.
w1 = self.variables['postprocessing']['postprocess1']
w2 = self.variables['postprocessing']['postprocess2']
if self.use_biases:
b1 = self.variables['postprocessing']['postprocess1_bias']
b2 = self.variables['postprocessing']['postprocess2_bias']
if self.histograms:
tf.histogram_summary('postprocess1_weights', w1)
tf.histogram_summary('postprocess2_weights', w2)
if self.use_biases:
tf.histogram_summary('postprocess1_biases', b1)
tf.histogram_summary('postprocess2_biases', b2)
# We skip connections from the outputs of each layer, adding them
# all up here.
total = sum(outputs)
transformed1 = tf.nn.relu(total)
conv1 = tf.nn.conv1d(transformed1, w1, stride=1, padding="SAME")
if self.use_biases:
conv1 = tf.add(conv1, b1)
transformed2 = tf.nn.relu(conv1)
conv2 = tf.nn.conv1d(transformed2, w2, stride=1, padding="SAME")
if self.use_biases:
conv2 = tf.add(conv2, b2)
return conv2
def _create_generator(self, input_batch):
'''Construct an efficient incremental generator.'''
init_ops = []
push_ops = []
outputs = []
current_layer = input_batch
#todo: change FIFO input shape.
q = tf.FIFOQueue(
1,
dtypes=tf.float32,
#shapes=(self.batch_size, self.quantization_channels))
shapes=(self.batch_size, self.skeleton_channels))
init = q.enqueue_many(
#tf.zeros((1, self.batch_size, self.quantization_channels)))
tf.zeros((1, self.batch_size, self.skeleton_channels)))
current_state = q.dequeue()
push = q.enqueue([current_layer])
init_ops.append(init)
push_ops.append(push)
current_layer = self._generator_causal_layer(
current_layer, current_state)
# Add all defined dilation layers.
with tf.name_scope('dilated_stack'):
for layer_index, dilation in enumerate(self.dilations):
with tf.name_scope('layer{}'.format(layer_index)):
q = tf.FIFOQueue(
dilation,
dtypes=tf.float32,
shapes=(self.batch_size, self.residual_channels))
init = q.enqueue_many(
tf.zeros((dilation, self.batch_size,
self.residual_channels)))
current_state = q.dequeue()
push = q.enqueue([current_layer])
init_ops.append(init)
push_ops.append(push)
output, current_layer = self._generator_dilation_layer(
current_layer, current_state, layer_index, dilation)
outputs.append(output)
self.init_ops = init_ops
self.push_ops = push_ops
with tf.name_scope('postprocessing'):
variables = self.variables['postprocessing']
# Perform (+) -> ReLU -> 1x1 conv -> ReLU -> 1x1 conv to
# postprocess the output.
w1 = variables['postprocess1']
w2 = variables['postprocess2']
if self.use_biases:
b1 = variables['postprocess1_bias']
b2 = variables['postprocess2_bias']
# We skip connections from the outputs of each layer, adding them
# all up here.
total = sum(outputs)
transformed1 = tf.nn.relu(total)
conv1 = tf.matmul(transformed1, w1[0, :, :])
if self.use_biases:
conv1 = conv1 + b1
transformed2 = tf.nn.relu(conv1)
conv2 = tf.matmul(transformed2, w2[0, :, :])
if self.use_biases:
conv2 = conv2 + b2
return conv2
def _one_hot(self, input_batch):
'''One-hot encodes the waveform amplitudes.
This allows the definition of the network as a categorical distribution
over a finite set of possible amplitudes.
'''
with tf.name_scope('one_hot_encode'):
encoded = tf.one_hot(
input_batch,
depth=self.skeleton_channels,
dtype=tf.float32)
shape = [self.batch_size, -1, self.skeleton_channels]
encoded = tf.reshape(encoded, shape)
return encoded
def predict_proba(self, waveform, name='wavenet'):
'''Computes the probability distribution of the next sample based on
all samples in the input waveform.
If you want to generate audio by feeding the output of the network back
as an input, see predict_proba_incremental for a faster alternative.'''
with tf.name_scope(name):
if self.scalar_input:
encoded = tf.cast(waveform, tf.float32)
encoded = tf.reshape(encoded, [-1, 1])
else:
encoded = self._one_hot(waveform)
raw_output = self._create_network(encoded)
out = tf.reshape(raw_output, [-1, self.skeleton_channels])
# Cast to float64 to avoid bug in TensorFlow
proba = tf.cast(
tf.nn.softmax(tf.cast(out, tf.float64)), tf.float32)
last = tf.slice(
proba,
[tf.shape(proba)[0] - 1, 0],
[1, self.skeleton_channels])
return tf.reshape(last, [-1])
def predict_proba_incremental(self, waveform, name='wavenet'):
'''Computes the probability distribution of the next sample
incrementally, based on a single sample and all previously passed
samples.'''
if self.filter_width > 2:
raise NotImplementedError("Incremental generation does not "
"support filter_width > 2.")
if self.scalar_input:
raise NotImplementedError("Incremental generation does not "
"support scalar input yet.")
with tf.name_scope(name):
#suspect the dimension of the encoded to be T x 42
#encoded = tf.one_hot(waveform, self.quantization_channels)
#encoded = tf.reshape(encoded, [-1, self.quantization_channels])
#todo: assume the input dimension of waveform to be T x 42
raw_output = self._create_generator(waveform)
out = tf.reshape(raw_output, [-1, self.skeleton_channels])
#proba = tf.cast(
# tf.nn.softmax(tf.cast(out, tf.float64)), tf.float32)
#last: output only the last time step
last = tf.slice(
out,
[tf.shape(out)[0] - 1, 0],
[1, self.skeleton_channels])
return tf.reshape(last, [-1])
def loss(self,
input_batch,
l2_regularization_strength=None,
name='wavenet'):
'''Creates a WaveNet network and returns the autoencoding loss.
The variables are all scoped to the given name.
'''
with tf.name_scope(name):
# We mu-law encode and quantize the input audioform.
#input_batch = mu_law_encode(input_batch,
# self.quantization_channels)
#encoded = self._one_hot(input_batch)
#if self.scalar_input:
# network_input = tf.reshape(
# tf.cast(input_batch, tf.float32),
# [self.batch_size, -1, 1])
#else:
# network_input = encoded
#network_input as N x T x 42 tensor
network_input = tf.reshape(
tf.cast(input_batch, tf.float32),
[self.batch_size, -1, self.skeleton_channels])
#raw_output = self._create_network(network_input)-
prediction = self._create_network(network_input)
with tf.name_scope('loss'):
# Shift original input left by one sample, which means that
# each output sample has to predict the next input sample.
# shifted is the GT of the prediction.
#shifted = tf.slice(encoded, [0, 1, 0],
# [-1, tf.shape(encoded)[1] - 1, -1])
#shifted = tf.pad(shifted, [[0, 0], [0, 1], [0, 0]])
#prediction = tf.reshape(raw_output,
# [-1, self.quantization_channels])
#loss = tf.nn.softmax_cross_entropy_with_logits(
#prediction,
#tf.reshape(shifted, [-1, self.quantization_channels]))
gt = tf.slice(network_input, [0, 1, 0], [-1, -1, -1])
gt = tf.pad(gt, [[0, 0], [0, 1], [0, 0]])
diff = prediction - gt
#diff = tf.Print(diff, [diff], message='Value of diff')
#diff = tf.Print(diff, [tf.shape(diff)], message='Shape of diff')
#output = sum(t ** 2) / 2
loss = tf.nn.l2_loss(diff)
#reduced_loss = tf.reduce_mean(loss)
reduced_loss = loss / self.batch_size
reduced_loss = reduced_loss / tf.cast(tf.shape(network_input)[1], tf.float32)
#reduced_loss = loss
#tf.scalar_summary('loss', reduced_loss)
tf.summary.scalar('loss', reduced_loss)
if l2_regularization_strength is None:
return reduced_loss
else:
# L2 regularization for all trainable parameters
l2_loss = tf.add_n([tf.nn.l2_loss(v)
for v in tf.trainable_variables()
if not('bias' in v.name)])
# Add the regularization term to the loss
total_loss = (reduced_loss +
l2_regularization_strength * l2_loss)
tf.scalar_summary('l2_loss', l2_loss)
tf.scalar_summary('total_loss', total_loss)
return total_loss
| 43.761905
| 111
| 0.556918
|
9679d8e647cc90d957826e45d7cf74d56dd61ea7
| 941
|
py
|
Python
|
pygp_retina/tests_interactive/show_average.py
|
SimLeek/pglsl-neural
|
8daaffded197cf7be4432754bc5941f1bca3239c
|
[
"MIT"
] | 5
|
2018-03-25T23:43:32.000Z
|
2019-05-18T10:35:21.000Z
|
pygp_retina/tests_interactive/show_average.py
|
PyGPAI/PyGPNeural
|
8daaffded197cf7be4432754bc5941f1bca3239c
|
[
"MIT"
] | 11
|
2017-12-24T20:03:16.000Z
|
2017-12-26T00:18:34.000Z
|
pygp_retina/tests_interactive/show_average.py
|
SimLeek/PyGPNeural
|
8daaffded197cf7be4432754bc5941f1bca3239c
|
[
"MIT"
] | null | null | null |
from cv_pubsubs import webcam_pub as camp
from cv_pubsubs import window_sub as win
from pygp_retina.simple_average import avg_total_color
if False:
from typing import Tuple
def display_average(cam,
request_size=(1280, 720), # type: Tuple[int, int]
fps_limit=60, # type: float
high_speed=True, # type: bool
no_cl=False # type: bool
):
def cam_handler(frame, cam_id):
win.SubscriberWindows.frame_dict[str(cam_id) + "Frame"] = frame
cam_thread = camp.frame_handler_thread(cam, cam_handler, fps_limit=fps_limit,
high_speed=high_speed)
callback = avg_total_color
win.SubscriberWindows(window_names=["avg"],
input_cams=[cam],
input_vid_global_names=[str(cam) + 'Frame'],
callbacks=[callback]).loop()
return cam_thread
| 32.448276
| 81
| 0.600425
|
20ba36560d32edf12087e68ad900c6c2e4eca069
| 1,015
|
py
|
Python
|
test/file/test_file_util.py
|
Future-Walkers/python-mod
|
b472f7dc6636f5d5cc634906b09a51b29a435a87
|
[
"MIT"
] | null | null | null |
test/file/test_file_util.py
|
Future-Walkers/python-mod
|
b472f7dc6636f5d5cc634906b09a51b29a435a87
|
[
"MIT"
] | null | null | null |
test/file/test_file_util.py
|
Future-Walkers/python-mod
|
b472f7dc6636f5d5cc634906b09a51b29a435a87
|
[
"MIT"
] | 3
|
2021-08-17T07:23:16.000Z
|
2021-08-17T07:31:03.000Z
|
# !/usr/bin/env python3
# -*-coding:utf-8 -*-
"""
# File : test_file_util.py
# Time :2020/8/19 15:53
# Author :Rodney Cheung
"""
import os
import unittest
from test.testdata.test_util import TestUtil
from wisbec.file.file import FileUtil
class TestFileUtil(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.test_data_path = os.path.join(TestUtil.get_test_data_path(), 'file', 'file_reader')
def test_last_line(self):
line = FileUtil.last_line(os.path.join(self.test_data_path, 'mitmproxy-ca-cert.pem'))
self.assertEqual(line, '-----END CERTIFICATE-----')
def test_read_file_by_encoding(self):
print(FileUtil.read_file_by_encoding(os.path.join(self.test_data_path, 'mitmproxy-ca-cert.pem')))
def test_first_line(self):
line = FileUtil.first_line(os.path.join(self.test_data_path, 'mitmproxy-ca-cert.pem'))
self.assertEqual(line, '-----BEGIN CERTIFICATE-----')
if __name__ == '__main__':
unittest.main()
| 29
| 105
| 0.682759
|
f64446ce6c783ef845f3e77e7c7f2763fd8c2263
| 167
|
py
|
Python
|
sample/config.py
|
YuMurata/ParameterOptimizer
|
60b172a7a9d3f54213ac3d59e15ebb4d707475c3
|
[
"MIT"
] | null | null | null |
sample/config.py
|
YuMurata/ParameterOptimizer
|
60b172a7a9d3f54213ac3d59e15ebb4d707475c3
|
[
"MIT"
] | null | null | null |
sample/config.py
|
YuMurata/ParameterOptimizer
|
60b172a7a9d3f54213ac3d59e15ebb4d707475c3
|
[
"MIT"
] | null | null | null |
class TargetValue:
MAX = 100
MIN = 0
@classmethod
def normalize(cls, x: int) -> float:
return x * (cls.MAX - cls.MIN) / (2**50 - 1) + cls.MIN
| 20.875
| 62
| 0.538922
|
0ae40b8b691455b9e22e989a785e19b6d5311515
| 11,920
|
py
|
Python
|
pydefect/tests/cli/vasp/test_main.py
|
KazMorita/pydefect
|
681e4bfe92c53edfe8b50cb72768114b28daabc9
|
[
"MIT"
] | 1
|
2021-09-10T05:07:39.000Z
|
2021-09-10T05:07:39.000Z
|
pydefect/tests/cli/vasp/test_main.py
|
obaica/pydefect-1
|
31e5ad774845f436554ef15000b8eba3b168a65c
|
[
"MIT"
] | null | null | null |
pydefect/tests/cli/vasp/test_main.py
|
obaica/pydefect-1
|
31e5ad774845f436554ef15000b8eba3b168a65c
|
[
"MIT"
] | 1
|
2022-01-07T10:14:16.000Z
|
2022-01-07T10:14:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020. Distributed under the terms of the MIT License.
from argparse import Namespace
from pathlib import Path
from pydefect.analyzer.calc_results import CalcResults
from pydefect.analyzer.unitcell import Unitcell
from pydefect.chem_pot_diag.chem_pot_diag import ChemPotDiag
from pydefect.cli.vasp.main import parse_args
from pydefect.corrections.efnv_correction import \
ExtendedFnvCorrection
from pydefect.defaults import defaults
from pydefect.input_maker.supercell_info import SupercellInfo
from pymatgen import Composition
def test_print(mocker):
mock = mocker.patch("pydefect.cli.vasp.main.loadfn")
parsed_args = parse_args(["p", "-f", "a.json"])
expected = Namespace(obj=mock.return_value, func=parsed_args.func)
assert parsed_args == expected
def test_unitcell(mocker):
mock = mocker.patch("pydefect.cli.vasp.main.Vasprun")
mock_outcar = mocker.patch("pydefect.cli.vasp.main.Outcar")
parsed_args = parse_args(["u",
"-vb", "vasprun.xml",
"-ob", "OUTCAR-1",
"-odc", "OUTCAR-2",
"-odi", "OUTCAR-3"])
# func is a pointer so need to point the same address.
expected = Namespace(
vasprun_band=mock.return_value,
outcar_band=mock_outcar.return_value,
outcar_dielectric_clamped=mock_outcar.return_value,
outcar_dielectric_ionic=mock_outcar.return_value,
func=parsed_args.func,
)
assert parsed_args == expected
mock.assert_called_once_with("vasprun.xml")
mock_outcar.assert_any_call("OUTCAR-1")
mock_outcar.assert_any_call("OUTCAR-2")
mock_outcar.assert_called_with("OUTCAR-3")
def test_make_poscars_wo_options():
parsed_args = parse_args(["mp",
"-e", "Mg", "O"])
expected = Namespace(
elements=["Mg", "O"],
e_above_hull=defaults.e_above_hull,
func=parsed_args.func)
assert parsed_args == expected
def test_make_poscars_w_options():
parsed_args = parse_args(["mp",
"-e", "Mg", "O",
"--e_above_hull", "0.1"])
expected = Namespace(
elements=["Mg", "O"],
e_above_hull=0.1,
func=parsed_args.func)
assert parsed_args == expected
def test_make_cpd_options():
parsed_args = parse_args(["mcpd",
"-d", "Mg", "O",
"-t", "MgO"])
expected = Namespace(
dirs=[Path("Mg"), Path("O")],
target=Composition("MgO"),
elements=None,
atom_energy_yaml=None,
yaml="cpd.yaml",
update=False,
func=parsed_args.func)
assert parsed_args == expected
def test_plot_cpd_options():
parsed_args = parse_args(["pcpd",
"-y", "a.yaml"])
expected = Namespace(
yaml="a.yaml",
func=parsed_args.func)
assert parsed_args == expected
def test_make_supercell_wo_options(mocker):
mock = mocker.patch("pydefect.cli.vasp.main.IStructure")
parsed_args = parse_args(["s"])
# func is a pointer so need to point the same address.
expected = Namespace(
unitcell=mock.from_file.return_value,
matrix=None,
min_num_atoms=50,
max_num_atoms=300,
func=parsed_args.func,
)
assert parsed_args == expected
mock.from_file.assert_called_once_with("POSCAR")
def test_make_supercell_w_options(mocker):
mock = mocker.patch("pydefect.cli.vasp.main.IStructure")
parsed_args = parse_args(["s",
"-p", "POSCAR-tmp",
"--matrix", "1", "2", "3",
"--min_atoms", "1000",
"--max_atoms", "2000"])
# func is a pointer so need to point the same address.
expected = Namespace(
unitcell=mock.from_file.return_value,
matrix=[1, 2, 3],
min_num_atoms=1000,
max_num_atoms=2000,
func=parsed_args.func,
)
assert parsed_args == expected
mock.from_file.assert_called_once_with("POSCAR-tmp")
def test_defect_set_wo_options():
parsed_args = parse_args(["ds"])
expected = Namespace(
oxi_states=None,
dopants=None,
kwargs=None,
func=parsed_args.func,
)
assert parsed_args == expected
def test_defect_set_w_options():
parsed_args = parse_args(["ds",
"-o", "He", "1",
"-d", "Li",
"-k", "Li_H1", "Va_H1_0"])
expected = Namespace(
oxi_states=["He", 1],
dopants=["Li"],
kwargs=["Li_H1", "Va_H1_0"],
func=parsed_args.func,
)
assert parsed_args == expected
def test_defect_entries():
parsed_args = parse_args(["de"])
expected = Namespace(
func=parsed_args.func,
)
assert parsed_args == expected
def test_append_interstitial(mocker):
mock_loadfn = mocker.patch("pydefect.cli.vasp.main.loadfn")
mock_supercell_info = mocker.Mock(spec=SupercellInfo, autospec=True)
mock_loadfn.return_value = mock_supercell_info
mock_structure = mocker.patch("pydefect.cli.vasp.main.Structure")
parsed_args = parse_args(["ai",
"-s", "supercell_info.json",
"-p", "POSCAR",
"-c", "0.1", "0.2", "0.3"])
expected = Namespace(
supercell_info=mock_supercell_info,
base_structure=mock_structure.from_file.return_value,
frac_coords=[0.1, 0.2, 0.3],
func=parsed_args.func,
)
assert parsed_args == expected
def test_pop_interstitial(mocker):
mock_loadfn = mocker.patch("pydefect.cli.vasp.main.loadfn")
mock_supercell_info = mocker.Mock(spec=SupercellInfo, autospec=True)
mock_loadfn.return_value = mock_supercell_info
parsed_args = parse_args(["pi",
"-s", "supercell_info.json",
"-i", "1000"])
expected = Namespace(
supercell_info=mock_supercell_info,
index=1000,
func=parsed_args.func,
)
assert parsed_args == expected
def test_calc_results():
parsed_args = parse_args(["cr", "-d", "Va_O1_0", "Va_O1_1"])
expected = Namespace(
dirs=[Path("Va_O1_0"), Path("Va_O1_1")],
func=parsed_args.func,
)
assert parsed_args == expected
def test_efnv_correction(mocker):
mock_calc_results = mocker.Mock(spec=CalcResults, autospec=True)
mock_unitcell = mocker.Mock(spec=Unitcell, autospec=True)
def side_effect(filename):
if filename == "perfect/calc_results.json":
return mock_calc_results
elif filename == "unitcell.json":
return mock_unitcell
else:
raise ValueError
mocker.patch("pydefect.cli.vasp.main.loadfn", side_effect=side_effect)
parsed_args = parse_args(["efnv",
"-d", "Va_O1_0", "Va_O1_1",
"-pcr", "perfect/calc_results.json",
"-u", "unitcell.json"])
expected = Namespace(
dirs=[Path("Va_O1_0"), Path("Va_O1_1")],
perfect_calc_results=mock_calc_results,
unitcell=mock_unitcell,
func=parsed_args.func)
assert parsed_args == expected
def test_gkfo_correction(mocker):
mock_i_correction = mocker.Mock(spec=ExtendedFnvCorrection, autospec=True)
mock_i_calc_results = mocker.Mock(spec=CalcResults, autospec=True)
mock_f_calc_results = mocker.Mock(spec=CalcResults, autospec=True)
mock_unitcell = mocker.Mock(spec=Unitcell, autospec=True)
def side_effect(filename):
if filename == "a/correction.json":
return mock_i_correction
elif filename == "a/calc_results.json":
return mock_i_calc_results
elif filename == "a/absorption/calc_results.json":
return mock_f_calc_results
elif filename == "unitcell.json":
return mock_unitcell
else:
raise ValueError
mocker.patch("pydefect.cli.vasp.main.loadfn", side_effect=side_effect)
parsed_args = parse_args(["gkfo",
"-iefnv", "a/correction.json",
"-cd", "1",
"-icr", "a/calc_results.json",
"-fcr", "a/absorption/calc_results.json",
"-u", "unitcell.json"])
expected = Namespace(
initial_efnv_correction=mock_i_correction,
initial_calc_results=mock_i_calc_results,
final_calc_results=mock_f_calc_results,
charge_diff=1,
unitcell=mock_unitcell,
func=parsed_args.func)
assert parsed_args == expected
def test_defect_eigenvalues(mocker):
mock_calc_results = mocker.Mock(spec=CalcResults, autospec=True)
def side_effect(filename):
if filename == "perfect/calc_results.json":
return mock_calc_results
else:
raise ValueError
mocker.patch("pydefect.cli.vasp.main.loadfn", side_effect=side_effect)
parsed_args = parse_args(["eig",
"-d", "Va_O1_0", "Va_O1_1",
"-pcr", "perfect/calc_results.json"])
expected = Namespace(
dirs=[Path("Va_O1_0"), Path("Va_O1_1")],
perfect_calc_results=mock_calc_results,
func=parsed_args.func)
assert parsed_args == expected
def test_band_edge_characters(mocker):
mock = mocker.patch("pydefect.cli.vasp.main.loadfn")
parsed_args = parse_args(["mec",
"-d", "Va_O1_0", "Va_O1_1",
"-pcr", "perfect/calc_results.json"])
expected = Namespace(
dirs=[Path("Va_O1_0"), Path("Va_O1_1")],
perfect_calc_results=mock.return_value,
func=parsed_args.func)
assert parsed_args == expected
mock.assert_any_call("perfect/calc_results.json")
def test_band_edge_states(mocker):
mock = mocker.patch("pydefect.cli.vasp.main.loadfn")
parsed_args = parse_args(["es",
"-d", "Va_O1_0", "Va_O1_1",
"-p", "perfect/edge_characters.json"])
expected = Namespace(
dirs=[Path("Va_O1_0"), Path("Va_O1_1")],
perfect_edge_characters=mock.return_value,
func=parsed_args.func)
assert parsed_args == expected
mock.assert_any_call("perfect/edge_characters.json")
def test_defect_formation_energy(mocker):
mock_calc_results = mocker.Mock(spec=CalcResults, autospec=True)
mock_unitcell = mocker.Mock(spec=Unitcell, autospec=True)
mock_chem_pot_diag = mocker.Mock(spec=ChemPotDiag, autospec=True)
def side_effect(filename):
if filename == "perfect/calc_results.json":
return mock_calc_results
elif filename == "unitcell.json":
return mock_unitcell
else:
raise ValueError
mocker.patch("pydefect.cli.vasp.main.loadfn", side_effect=side_effect)
parsed_args = parse_args(["e",
"-d", "Va_O1_0", "Va_O1_1",
"-pcr", "perfect/calc_results.json",
"-u", "unitcell.json",
"-c", "cpd.yaml",
"-l", "A",
"-y", "-5", "5",
"-s",
"-p"
])
expected = Namespace(
dirs=[Path("Va_O1_0"), Path("Va_O1_1")],
perfect_calc_results=mock_calc_results,
unitcell=mock_unitcell,
cpd_yaml="cpd.yaml",
label="A",
y_range=[-5, 5],
skip_shallow=True,
print=True,
func=parsed_args.func)
assert parsed_args == expected
| 34.550725
| 78
| 0.592701
|
9d3448b0e6a35eae42034adaa84f70e0e4db5370
| 72
|
py
|
Python
|
spotdl/metadata/embedders/__init__.py
|
khjxiaogu/spotify-downloader
|
a8dcb8d998da0769bbe210f2808d16b346453c23
|
[
"MIT"
] | 4,698
|
2017-06-20T22:37:10.000Z
|
2022-03-28T13:38:07.000Z
|
spotdl/metadata/embedders/__init__.py
|
Delgan/spotify-downloader
|
8adf3e8d6b98269b1538dd91c9a44ed345c77545
|
[
"MIT"
] | 690
|
2017-06-20T20:08:42.000Z
|
2022-02-26T23:36:07.000Z
|
spotdl/metadata/embedders/__init__.py
|
Delgan/spotify-downloader
|
8adf3e8d6b98269b1538dd91c9a44ed345c77545
|
[
"MIT"
] | 741
|
2017-06-21T23:32:51.000Z
|
2022-03-07T12:11:54.000Z
|
from spotdl.metadata.embedders.default_embedder import EmbedderDefault
| 24
| 70
| 0.888889
|
7e198302ffe33fbc3fed3bd0a31229c658da07f9
| 500
|
py
|
Python
|
cryptoblotter/trades/__init__.py
|
PAV-Laboratory/cryptoblotter
|
f573592a3638fbc6cae24d76305de36b932949c6
|
[
"MIT"
] | 1
|
2021-08-01T19:16:02.000Z
|
2021-08-01T19:16:02.000Z
|
cryptoblotter/trades/__init__.py
|
PAV-Laboratory/cryptoblotter
|
f573592a3638fbc6cae24d76305de36b932949c6
|
[
"MIT"
] | null | null | null |
cryptoblotter/trades/__init__.py
|
PAV-Laboratory/cryptoblotter
|
f573592a3638fbc6cae24d76305de36b932949c6
|
[
"MIT"
] | null | null | null |
from .candles import CandleCallback
from .firestore import FirestoreTradeCallback
from .gcppubsub import GCPPubSubTradeCallback
from .thresh import ThreshCallback
from .trades import (
NonSequentialIntegerTradeCallback,
SequentialIntegerTradeCallback,
TradeCallback,
)
__all__ = [
"FirestoreTradeCallback",
"GCPPubSubTradeCallback",
"CandleCallback",
"TradeCallback",
"ThreshCallback",
"SequentialIntegerTradeCallback",
"NonSequentialIntegerTradeCallback",
]
| 25
| 45
| 0.78
|
de3a26f4ebe4d3a6c55742ddf002e1e5a7531af6
| 3,123
|
py
|
Python
|
app/app/settings.py
|
samueltcsantos/recipe-app-api
|
aea5f1349dada1700b59b16346d3daedd67bb7e4
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
samueltcsantos/recipe-app-api
|
aea5f1349dada1700b59b16346d3daedd67bb7e4
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
samueltcsantos/recipe-app-api
|
aea5f1349dada1700b59b16346d3daedd67bb7e4
|
[
"MIT"
] | null | null | null |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9ueeac((uj-)m#9qx&(z&5rb3c_&0gfca5yest2@s&(qt7)c3p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| 25.185484
| 91
| 0.693884
|
4df205db387336eeb7a1fb7d3d14ec7d648acf05
| 2,745
|
py
|
Python
|
Applications/ImageSegment/ImgSegCode.py
|
abefrandsen/numerical_computing
|
90559f7c4f387885eb44ea7b1fa19bb602f496cb
|
[
"CC-BY-3.0"
] | null | null | null |
Applications/ImageSegment/ImgSegCode.py
|
abefrandsen/numerical_computing
|
90559f7c4f387885eb44ea7b1fa19bb602f496cb
|
[
"CC-BY-3.0"
] | null | null | null |
Applications/ImageSegment/ImgSegCode.py
|
abefrandsen/numerical_computing
|
90559f7c4f387885eb44ea7b1fa19bb602f496cb
|
[
"CC-BY-3.0"
] | null | null | null |
#Applications: Image Segmentation
def imgAdj(img,radius, sigmaI, sigmaX):
nodes = img.flatten()
height,width=img.shape
W = spar.lil_matrix((nodes.size, nodes.size), dtype=float)
#Here we generate the values that go into the adjacency matrix W. For the most part we don't have to worry to
#much and we will be in the final else statement. However, in the case that we are on the boundaries we need to make sure
#that we aren't looking at pixels that aren't there. There are four more elif statements needed, fill them in.
for row in xrange(height):
for col in xrange(width):
#top right
rowcol = row * width + col
if (row < radius) and (col < radius):
for k in xrange(row + radius):
for l in xrange(col + radius):
#top left
elif (row < radius) and (col > width - radius):
#subMat = img(1:i+r,j-r:width)
for k in xrange(row + radius):
for l in xrange(col - radius, width):
#bottom right
elif (row > height - radius) and (col < radius):
#subMat = img(i-r:height,1:j+r);
for k in xrange(row - radius, height):
for l in xrange(col + radius):
#bottom left
elif (row > height - radius) and (col > width - radius):
#subMat = img(i-r:height,j-r:width);
for k in xrange(row - radius, height):
for l in xrange(col - radius, width):
#top middle
elif (row < radius):# and (col > radius and col < width-radius):
for k in xrange(row + radius):
for l in xrange(col - radius, col + radius):
#middle left
elif (col < radius):
for k in xrange(row - radius, row + radius):
for l in xrange(col + radius):
#middle right
elif (col > height - radius):
for k in xrange(row - radius, row + radius):
for l in xrange(col - radius, width):
#bottom middle
elif (row > height - radius):
for k in xrange(row - radius, height):
for l in xrange(col + radius):
else: # (row > radius and row < height-radius) and (col > radius and col < width-radius):
#subMat = img(i-r:i+r,j-r:j+r);
for k in xrange(row - radius, row + radius):
for l in xrange(col - radius, col + radius):
W = W.tocsc()
return W
| 40.970149
| 126
| 0.494718
|
ac6d9c8dc689c6d9960400ef2c0fda5c90663c9b
| 567
|
py
|
Python
|
cogs/music.py
|
TKLProjects/TravBot.py
|
b09114a0f5e6b3b5caef75305818780b38027dbe
|
[
"Apache-2.0"
] | null | null | null |
cogs/music.py
|
TKLProjects/TravBot.py
|
b09114a0f5e6b3b5caef75305818780b38027dbe
|
[
"Apache-2.0"
] | 3
|
2020-05-11T14:59:34.000Z
|
2020-05-23T10:13:02.000Z
|
cogs/music.py
|
TKLProjects/TravBot.py
|
b09114a0f5e6b3b5caef75305818780b38027dbe
|
[
"Apache-2.0"
] | null | null | null |
# Basic imports:
import discord
from discord.ext import commands
# Cog class:
class Music(commands.Cog):
# Forgot what this does, add it:
def __init__(self, client):
self.client = client
# This is an event:
# @commands.Cog.listener()
# async def on_ready(self):
# print('This will be printed to the console.')
# This is a command:
@commands.command()
async def ping3(self, ctx):
await ctx.send("pong nigga")
# This always needs to be at the end of a cog file:
def setup(client):
client.add_cog(Music(client))
| 23.625
| 55
| 0.657848
|
0a8c44e88d882f3febaac34f458268324a1148d8
| 1,152
|
py
|
Python
|
setup.py
|
lfigueirasfdc/sfdclib
|
a772ee925adf222d7205ca824a685d08e238b31a
|
[
"MIT"
] | null | null | null |
setup.py
|
lfigueirasfdc/sfdclib
|
a772ee925adf222d7205ca824a685d08e238b31a
|
[
"MIT"
] | null | null | null |
setup.py
|
lfigueirasfdc/sfdclib
|
a772ee925adf222d7205ca824a685d08e238b31a
|
[
"MIT"
] | null | null | null |
"""sfdclib_alt package setup"""
import textwrap
from setuptools import setup
setup(
name='sfdclib_alt',
version='0.1',
author='Luis Figueira',
author_email='lfigueira@salesforce.com',
packages=['sfdclib_alt'],
url='https://github.com/lfigueirasfdc/sfdclib',
license='MIT',
description=("SFDClib_alt is a fork of rbauction/sfdclib, to include the ability to retrieve packaged metadata"),
long_description=textwrap.dedent(open('README.rst', 'r').read()),
package_data={'': ['LICENSE']},
package_dir={'sfdclib_alt': 'sfdclib_alt'},
install_requires=[
'requests[security]'
],
keywords="python salesforce salesforce.com metadata tooling api",
classifiers=[
'Development Status :: 1 - Beta',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'
]
)
| 33.882353
| 117
| 0.642361
|
51fac2904f9fcbfac5e589ad47c42fe57f13b3c1
| 150
|
py
|
Python
|
saas/system/api/resource/backend-framework/webpy/urls.py
|
iuskye/SREWorks
|
a2a7446767d97ec5f6d15bd00189c42150d6c894
|
[
"Apache-2.0"
] | 407
|
2022-03-16T08:09:38.000Z
|
2022-03-31T12:27:10.000Z
|
saas/system/api/resource/backend-framework/webpy/urls.py
|
Kwafoor/SREWorks
|
37a64a0a84b29c65cf6b77424bd2acd0c7b42e2b
|
[
"Apache-2.0"
] | 25
|
2022-03-22T04:27:31.000Z
|
2022-03-30T08:47:28.000Z
|
saas/system/api/resource/backend-framework/webpy/urls.py
|
Kwafoor/SREWorks
|
37a64a0a84b29c65cf6b77424bd2acd0c7b42e2b
|
[
"Apache-2.0"
] | 109
|
2022-03-21T17:30:44.000Z
|
2022-03-31T09:36:28.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
可以在handler中使用app_route来动态注册路由,也可以使用老模式在urls.py里面显式声明
"""
urls = (
r'/demo/(.+)', "DemoHandler"
)
| 15
| 56
| 0.646667
|
8d641db3dd0be4f3405b083cff4fea14eba6704c
| 1,476
|
py
|
Python
|
packages/monomanage/src/monomanage/docs/api.py
|
0mars/graphx
|
8f58df3979b5fd96e4183811c9d8339c92367d00
|
[
"Apache-2.0"
] | null | null | null |
packages/monomanage/src/monomanage/docs/api.py
|
0mars/graphx
|
8f58df3979b5fd96e4183811c9d8339c92367d00
|
[
"Apache-2.0"
] | null | null | null |
packages/monomanage/src/monomanage/docs/api.py
|
0mars/graphx
|
8f58df3979b5fd96e4183811c9d8339c92367d00
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2019 Simon Biggs
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version (the "AGPL-3.0+").
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License and the additional terms for more
# details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ADDITIONAL TERMS are also included as allowed by Section 7 of the GNU
# Affero General Public License. These additional terms are Sections 1, 5,
# 6, 7, 8, and 9 from the Apache License, Version 2.0 (the "Apache-2.0")
# where all references to the definition "License" are instead defined to
# mean the AGPL-3.0+.
# You should have received a copy of the Apache-2.0 along with this
# program. If not, see <http://www.apache.org/licenses/LICENSE-2.0>.
import os
from ..draw import draw_all
from .graphs import write_graphs_rst
def pre_docs_build(pymedphys_dir):
docs_directory = os.path.join(pymedphys_dir, 'docs')
docs_graphs = os.path.join(docs_directory, 'graphs')
draw_all(docs_graphs)
write_graphs_rst(docs_graphs)
| 36.9
| 74
| 0.754065
|
cdc7219b6c1b7039e26d7da65de1be083c5a17a4
| 15,833
|
py
|
Python
|
pymc3/distributions/transforms.py
|
katosh/pymc3
|
38fa415c15b0c0469fbd9cad3a3b9ba974fc8733
|
[
"Apache-2.0"
] | 1
|
2019-10-31T11:45:13.000Z
|
2019-10-31T11:45:13.000Z
|
pymc3/distributions/transforms.py
|
katosh/pymc3
|
38fa415c15b0c0469fbd9cad3a3b9ba974fc8733
|
[
"Apache-2.0"
] | null | null | null |
pymc3/distributions/transforms.py
|
katosh/pymc3
|
38fa415c15b0c0469fbd9cad3a3b9ba974fc8733
|
[
"Apache-2.0"
] | null | null | null |
import theano
import theano.tensor as tt
from ..model import FreeRV
from ..theanof import gradient, floatX
from . import distribution
from ..math import logit, invlogit
from .distribution import draw_values
import numpy as np
from scipy.special import logit as nplogit
from scipy.special import expit
__all__ = [
"transform",
"stick_breaking",
"logodds",
"interval",
"log_exp_m1",
"lowerbound",
"upperbound",
"ordered",
"log",
"sum_to_1",
"t_stick_breaking",
]
class Transform:
"""A transformation of a random variable from one space into another.
Attributes
----------
name : str
"""
name = ""
def forward(self, x):
"""Applies transformation forward to input variable `x`.
When transform is used on some distribution `p`, it will transform the random variable `x` after sampling
from `p`.
Parameters
----------
x : tensor
Input tensor to be transformed.
Returns
--------
tensor
Transformed tensor.
"""
raise NotImplementedError
def forward_val(self, x, point):
"""Applies transformation forward to input array `x`.
Similar to `forward` but for constant data.
Parameters
----------
x : array_like
Input array to be transformed.
point : array_like, optional
Test value used to draw (fix) bounds-like transformations
Returns
--------
array_like
Transformed array.
"""
raise NotImplementedError
def backward(self, z):
"""Applies inverse of transformation to input variable `z`.
When transform is used on some distribution `p`, which has observed values `z`, it is used to
transform the values of `z` correctly to the support of `p`.
Parameters
----------
z : tensor
Input tensor to be inverse transformed.
Returns
-------
tensor
Inverse transformed tensor.
"""
raise NotImplementedError
def jacobian_det(self, x):
"""Calculates logarithm of the absolute value of the Jacobian determinant for input `x`.
Parameters
----------
x : tensor
Input to calculate Jacobian determinant of.
Returns
-------
tensor
The log abs Jacobian determinant of `x` w.r.t. this transform.
"""
raise NotImplementedError
def apply(self, dist):
# avoid circular import
return TransformedDistribution.dist(dist, self)
def __str__(self):
return self.name + " transform"
class ElemwiseTransform(Transform):
def jacobian_det(self, x):
grad = tt.reshape(gradient(tt.sum(self.backward(x)), [x]), x.shape)
return tt.log(tt.abs_(grad))
class TransformedDistribution(distribution.Distribution):
"""A distribution that has been transformed from one space into another."""
def __init__(self, dist, transform, *args, **kwargs):
"""
Parameters
----------
dist : Distribution
transform : Transform
args, kwargs
arguments to Distribution"""
forward = transform.forward
testval = forward(dist.default())
forward_val = transform.forward_val
self.dist = dist
self.transform_used = transform
v = forward(FreeRV(name="v", distribution=dist))
self.type = v.type
super().__init__(v.shape.tag.test_value, v.dtype, testval, dist.defaults, *args, **kwargs)
if transform.name == "stickbreaking":
b = np.hstack(((np.atleast_1d(self.shape) == 1)[:-1], False))
# force the last dim not broadcastable
self.type = tt.TensorType(v.dtype, b)
def logp(self, x):
"""
Calculate log-probability of Transformed distribution at specified value.
Parameters
----------
x : numeric
Value for which log-probability is calculated.
Returns
-------
TensorVariable
"""
logp_nojac = self.logp_nojac(x)
jacobian_det = self.transform_used.jacobian_det(x)
if logp_nojac.ndim > jacobian_det.ndim:
logp_nojac = logp_nojac.sum(axis=-1)
return logp_nojac + jacobian_det
def logp_nojac(self, x):
"""
Calculate log-probability of Transformed distribution at specified value
without jacobian term for transforms.
Parameters
----------
x : numeric
Value for which log-probability is calculated.
Returns
-------
TensorVariable
"""
return self.dist.logp(self.transform_used.backward(x))
transform = Transform
class Log(ElemwiseTransform):
name = "log"
def backward(self, x):
return tt.exp(x)
def backward_val(self, x):
return np.exp(x)
def forward(self, x):
return tt.log(x)
def forward_val(self, x, point=None):
return np.log(x)
def jacobian_det(self, x):
return x
log = Log()
class LogExpM1(ElemwiseTransform):
name = "log_exp_m1"
def backward(self, x):
return tt.nnet.softplus(x)
def backward_val(self, x):
return np.log(1 + np.exp(-np.abs(x))) + np.max([x, 0])
def forward(self, x):
"""Inverse operation of softplus
y = Log(Exp(x) - 1)
= Log(1 - Exp(-x)) + x
"""
return tt.log(1.0 - tt.exp(-x)) + x
def forward_val(self, x, point=None):
return np.log(1.0 - np.exp(-x)) + x
def jacobian_det(self, x):
return -tt.nnet.softplus(-x)
log_exp_m1 = LogExpM1()
class LogOdds(ElemwiseTransform):
name = "logodds"
def backward(self, x):
return invlogit(x, 0.0)
def backward_val(self, x):
return invlogit(x, 0.0)
def forward(self, x):
return logit(x)
def forward_val(self, x, point=None):
return nplogit(x)
logodds = LogOdds()
class Interval(ElemwiseTransform):
"""Transform from real line interval [a,b] to whole real line."""
name = "interval"
def __init__(self, a, b):
self.a = tt.as_tensor_variable(a)
self.b = tt.as_tensor_variable(b)
self.a_ = a
self.b_ = b
def backward(self, x):
a, b = self.a, self.b
r = (b - a) * tt.nnet.sigmoid(x) + a
return r
def backward_val(self, x):
a, b = self.a_, self.b_
r = (b - a) * 1 / (1 + np.exp(-x)) + a
return r
def forward(self, x):
a, b = self.a, self.b
return tt.log(x - a) - tt.log(b - x)
def forward_val(self, x, point=None):
# 2017-06-19
# the `self.a-0.` below is important for the testval to propagates
# For an explanation see pull/2328#issuecomment-309303811
a, b = draw_values([self.a - 0.0, self.b - 0.0], point=point)
return floatX(np.log(x - a) - np.log(b - x))
def jacobian_det(self, x):
s = tt.nnet.softplus(-x)
return tt.log(self.b - self.a) - 2 * s - x
interval = Interval
class LowerBound(ElemwiseTransform):
"""Transform from real line interval [a,inf] to whole real line."""
name = "lowerbound"
def __init__(self, a):
self.a = tt.as_tensor_variable(a)
self.a_ = a
def backward(self, x):
a = self.a_
r = tt.exp(x) + a
return r
def backward_val(self, x):
a = self.a
r = np.exp(x) + a
return r
def forward(self, x):
a = self.a
return tt.log(x - a)
def forward_val(self, x, point=None):
# 2017-06-19
# the `self.a-0.` below is important for the testval to propagates
# For an explanation see pull/2328#issuecomment-309303811
a = draw_values([self.a - 0.0], point=point)[0]
return floatX(np.log(x - a))
def jacobian_det(self, x):
return x
lowerbound = LowerBound
class UpperBound(ElemwiseTransform):
"""Transform from real line interval [-inf,b] to whole real line."""
name = "upperbound"
def __init__(self, b):
self.b = tt.as_tensor_variable(b)
self.b_ = b
def backward(self, x):
b = self.b
r = b - tt.exp(x)
return r
def backward_val(self, x):
b = self.b_
r = b - np.exp(x)
return r
def forward(self, x):
b = self.b
return tt.log(b - x)
def forward_val(self, x, point=None):
# 2017-06-19
# the `self.b-0.` below is important for the testval to propagates
# For an explanation see pull/2328#issuecomment-309303811
b = draw_values([self.b - 0.0], point=point)[0]
return floatX(np.log(b - x))
def jacobian_det(self, x):
return x
upperbound = UpperBound
class Ordered(Transform):
name = "ordered"
def backward(self, y):
x = tt.zeros(y.shape)
x = tt.inc_subtensor(x[..., 0], y[..., 0])
x = tt.inc_subtensor(x[..., 1:], tt.exp(y[..., 1:]))
return tt.cumsum(x, axis=-1)
def backward_val(self, y):
x = np.zeros(y.shape)
x[..., 0] += y[..., 0]
x[..., 1:] += np.exp(y[..., 1:])
return np.cumsum(x, axis=-1)
def forward(self, x):
y = tt.zeros(x.shape)
y = tt.inc_subtensor(y[..., 0], x[..., 0])
y = tt.inc_subtensor(y[..., 1:], tt.log(x[..., 1:] - x[..., :-1]))
return y
def forward_val(self, x, point=None):
y = np.zeros_like(x)
y[..., 0] = x[..., 0]
y[..., 1:] = np.log(x[..., 1:] - x[..., :-1])
return y
def jacobian_det(self, y):
return tt.sum(y[..., 1:], axis=-1)
ordered = Ordered()
class SumTo1(Transform):
"""
Transforms K - 1 dimensional simplex space (k values in [0,1] and that sum to 1) to a K - 1 vector of values in [0,1]
This Transformation operates on the last dimension of the input tensor.
"""
name = "sumto1"
def backward(self, y):
remaining = 1 - tt.sum(y[..., :], axis=-1, keepdims=True)
return tt.concatenate([y[..., :], remaining], axis=-1)
def backward_val(self, y):
remaining = 1 - np.sum(y[..., :], axis=-1, keepdims=True)
return np.concatenate([y[..., :], remaining], axis=-1)
def forward(self, x):
return x[..., :-1]
def forward_val(self, x, point=None):
return x[..., :-1]
def jacobian_det(self, x):
y = tt.zeros(x.shape)
return tt.sum(y, axis=-1)
sum_to_1 = SumTo1()
class StickBreaking(Transform):
"""
Transforms K - 1 dimensional simplex space (k values in [0,1] and that sum to 1) to a K - 1 vector of real values.
Primarily borrowed from the STAN implementation.
Parameters
----------
eps : float, positive value
A small value for numerical stability in invlogit.
"""
name = "stickbreaking"
def __init__(self, eps=floatX(np.finfo(theano.config.floatX).eps)):
self.eps = eps
def forward(self, x_):
x = x_.T
# reverse cumsum
x0 = x[:-1]
s = tt.extra_ops.cumsum(x0[::-1], 0)[::-1] + x[-1]
z = x0 / s
Km1 = x.shape[0] - 1
k = tt.arange(Km1)[(slice(None),) + (None,) * (x.ndim - 1)]
eq_share = logit(1.0 / (Km1 + 1 - k).astype(str(x_.dtype)))
y = logit(z) - eq_share
return floatX(y.T)
def forward_val(self, x_, point=None):
x = x_.T
# reverse cumsum
x0 = x[:-1]
s = np.cumsum(x0[::-1], 0)[::-1] + x[-1]
z = x0 / s
Km1 = x.shape[0] - 1
k = np.arange(Km1)[(slice(None),) + (None,) * (x.ndim - 1)]
eq_share = nplogit(1.0 / (Km1 + 1 - k).astype(str(x_.dtype)))
y = nplogit(z) - eq_share
return floatX(y.T)
def backward(self, y_):
y = y_.T
Km1 = y.shape[0]
k = tt.arange(Km1)[(slice(None),) + (None,) * (y.ndim - 1)]
eq_share = logit(1.0 / (Km1 + 1 - k).astype(str(y_.dtype)))
z = invlogit(y + eq_share, self.eps)
yl = tt.concatenate([z, tt.ones(y[:1].shape)])
yu = tt.concatenate([tt.ones(y[:1].shape), 1 - z])
S = tt.extra_ops.cumprod(yu, 0)
x = S * yl
return floatX(x.T)
def backward_val(self, y_):
y = y_.T
Km1 = y.shape[0]
k = np.arange(Km1)[(slice(None),) + (None,) * (y.ndim - 1)]
eq_share = nplogit(1.0 / (Km1 + 1 - k).astype(str(y_.dtype)))
z = expit(y + eq_share)
yl = np.concatenate([z, np.ones(y[:1].shape)])
yu = np.concatenate([np.ones(y[:1].shape), 1 - z])
S = np.cumprod(yu, 0)
x = S * yl
return floatX(x.T)
def jacobian_det(self, y_):
y = y_.T
Km1 = y.shape[0]
k = tt.arange(Km1)[(slice(None),) + (None,) * (y.ndim - 1)]
eq_share = logit(1.0 / (Km1 + 1 - k).astype(str(y_.dtype)))
yl = y + eq_share
yu = tt.concatenate([tt.ones(y[:1].shape), 1 - invlogit(yl, self.eps)])
S = tt.extra_ops.cumprod(yu, 0)
return tt.sum(tt.log(S[:-1]) - tt.log1p(tt.exp(yl)) - tt.log1p(tt.exp(-yl)), 0).T
stick_breaking = StickBreaking()
def t_stick_breaking(eps):
return StickBreaking(eps)
class Circular(ElemwiseTransform):
"""Transforms a linear space into a circular one.
"""
name = "circular"
def backward(self, y):
return tt.arctan2(tt.sin(y), tt.cos(y))
def backward_val(self, y):
return y
def forward(self, x):
return tt.as_tensor_variable(x)
def forward_val(self, x, point=None):
return x
def jacobian_det(self, x):
return tt.zeros(x.shape)
circular = Circular()
class CholeskyCovPacked(Transform):
name = "cholesky-cov-packed"
def __init__(self, n):
self.diag_idxs = np.arange(1, n + 1).cumsum() - 1
def backward(self, x):
return tt.advanced_set_subtensor1(x, tt.exp(x[self.diag_idxs]), self.diag_idxs)
def backward_val(self, x):
x[..., self.diag_idxs] = np.exp(x[..., self.diag_idxs])
return x
def forward(self, y):
return tt.advanced_set_subtensor1(y, tt.log(y[self.diag_idxs]), self.diag_idxs)
def forward_val(self, y, point=None):
y[..., self.diag_idxs] = np.log(y[..., self.diag_idxs])
return y
def jacobian_det(self, y):
return tt.sum(y[self.diag_idxs])
class Chain(Transform):
def __init__(self, transform_list):
self.transform_list = transform_list
self.name = "+".join([transf.name for transf in self.transform_list])
def forward(self, x):
y = x
for transf in self.transform_list:
y = transf.forward(y)
return y
def forward_val(self, x, point=None):
y = x
for transf in self.transform_list:
y = transf.forward_val(y)
return y
def backward(self, y):
x = y
for transf in reversed(self.transform_list):
x = transf.backward(x)
return x
def backward_val(self, y):
x = y
for transf in reversed(self.transform_list):
x = transf.backward_val(x)
return x
def jacobian_det(self, y):
y = tt.as_tensor_variable(y)
det_list = []
ndim0 = y.ndim
for transf in reversed(self.transform_list):
det_ = transf.jacobian_det(y)
det_list.append(det_)
y = transf.backward(y)
ndim0 = min(ndim0, det_.ndim)
# match the shape of the smallest jacobian_det
det = 0.0
for det_ in det_list:
if det_.ndim > ndim0:
det += det_.sum(axis=-1)
else:
det += det_
return det
| 26.041118
| 121
| 0.557064
|
d1a669486b2869af6eb05154caf4428f672a9ae8
| 12,936
|
py
|
Python
|
tests/test_agents.py
|
DaoyiG/aima-python
|
446963c1047a1c1139cd33c03fb3ebf0d677750d
|
[
"MIT"
] | 6,946
|
2016-02-27T19:28:07.000Z
|
2022-03-31T21:21:35.000Z
|
tests/test_agents.py
|
indhumathi1422/aima-python
|
9ea91c1d3a644fdb007e8dd0870202dcd9d078b6
|
[
"MIT"
] | 733
|
2016-02-29T20:12:12.000Z
|
2022-02-19T11:56:13.000Z
|
tests/test_agents.py
|
indhumathi1422/aima-python
|
9ea91c1d3a644fdb007e8dd0870202dcd9d078b6
|
[
"MIT"
] | 3,880
|
2016-02-24T21:13:35.000Z
|
2022-03-31T17:09:57.000Z
|
import random
import pytest
from agents import (ReflexVacuumAgent, ModelBasedVacuumAgent, TrivialVacuumEnvironment, compare_agents,
RandomVacuumAgent, TableDrivenVacuumAgent, TableDrivenAgentProgram, RandomAgentProgram,
SimpleReflexAgentProgram, ModelBasedReflexAgentProgram, Wall, Gold, Explorer, Thing, Bump, Glitter,
WumpusEnvironment, Pit, VacuumEnvironment, Dirt, Direction, Agent)
# random seed may affect the placement
# of things in the environment which may
# lead to failure of tests. Please change
# the seed if the tests are failing with
# current changes in any stochastic method
# function or variable.
random.seed(9)
def test_move_forward():
d = Direction("up")
l1 = d.move_forward((0, 0))
assert l1 == (0, -1)
d = Direction(Direction.R)
l1 = d.move_forward((0, 0))
assert l1 == (1, 0)
d = Direction(Direction.D)
l1 = d.move_forward((0, 0))
assert l1 == (0, 1)
d = Direction("left")
l1 = d.move_forward((0, 0))
assert l1 == (-1, 0)
l2 = d.move_forward((1, 0))
assert l2 == (0, 0)
def test_add():
d = Direction(Direction.U)
l1 = d + "right"
l2 = d + "left"
assert l1.direction == Direction.R
assert l2.direction == Direction.L
d = Direction("right")
l1 = d.__add__(Direction.L)
l2 = d.__add__(Direction.R)
assert l1.direction == "up"
assert l2.direction == "down"
d = Direction("down")
l1 = d.__add__("right")
l2 = d.__add__("left")
assert l1.direction == Direction.L
assert l2.direction == Direction.R
d = Direction(Direction.L)
l1 = d + Direction.R
l2 = d + Direction.L
assert l1.direction == Direction.U
assert l2.direction == Direction.D
def test_RandomAgentProgram():
# create a list of all the actions a Vacuum cleaner can perform
list = ['Right', 'Left', 'Suck', 'NoOp']
# create a program and then an object of the RandomAgentProgram
program = RandomAgentProgram(list)
agent = Agent(program)
# create an object of TrivialVacuumEnvironment
environment = TrivialVacuumEnvironment()
# add agent to the environment
environment.add_thing(agent)
# run the environment
environment.run()
# check final status of the environment
assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
def test_RandomVacuumAgent():
# create an object of the RandomVacuumAgent
agent = RandomVacuumAgent()
# create an object of TrivialVacuumEnvironment
environment = TrivialVacuumEnvironment()
# add agent to the environment
environment.add_thing(agent)
# run the environment
environment.run()
# check final status of the environment
assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
def test_TableDrivenAgent():
random.seed(10)
loc_A, loc_B = (0, 0), (1, 0)
# table defining all the possible states of the agent
table = {((loc_A, 'Clean'),): 'Right',
((loc_A, 'Dirty'),): 'Suck',
((loc_B, 'Clean'),): 'Left',
((loc_B, 'Dirty'),): 'Suck',
((loc_A, 'Dirty'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_B, 'Dirty')): 'Suck',
((loc_B, 'Clean'), (loc_A, 'Dirty')): 'Suck',
((loc_B, 'Dirty'), (loc_B, 'Clean')): 'Left',
((loc_A, 'Dirty'), (loc_A, 'Clean'), (loc_B, 'Dirty')): 'Suck',
((loc_B, 'Dirty'), (loc_B, 'Clean'), (loc_A, 'Dirty')): 'Suck'}
# create an program and then an object of the TableDrivenAgent
program = TableDrivenAgentProgram(table)
agent = Agent(program)
# create an object of TrivialVacuumEnvironment
environment = TrivialVacuumEnvironment()
# initializing some environment status
environment.status = {loc_A: 'Dirty', loc_B: 'Dirty'}
# add agent to the environment
environment.add_thing(agent)
# run the environment by single step everytime to check how environment evolves using TableDrivenAgentProgram
environment.run(steps=1)
assert environment.status == {(1, 0): 'Clean', (0, 0): 'Dirty'}
environment.run(steps=1)
assert environment.status == {(1, 0): 'Clean', (0, 0): 'Dirty'}
environment.run(steps=1)
assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
def test_ReflexVacuumAgent():
# create an object of the ReflexVacuumAgent
agent = ReflexVacuumAgent()
# create an object of TrivialVacuumEnvironment
environment = TrivialVacuumEnvironment()
# add agent to the environment
environment.add_thing(agent)
# run the environment
environment.run()
# check final status of the environment
assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
def test_SimpleReflexAgentProgram():
class Rule:
def __init__(self, state, action):
self.__state = state
self.action = action
def matches(self, state):
return self.__state == state
loc_A = (0, 0)
loc_B = (1, 0)
# create rules for a two state Vacuum Environment
rules = [Rule((loc_A, "Dirty"), "Suck"), Rule((loc_A, "Clean"), "Right"),
Rule((loc_B, "Dirty"), "Suck"), Rule((loc_B, "Clean"), "Left")]
def interpret_input(state):
return state
# create a program and then an object of the SimpleReflexAgentProgram
program = SimpleReflexAgentProgram(rules, interpret_input)
agent = Agent(program)
# create an object of TrivialVacuumEnvironment
environment = TrivialVacuumEnvironment()
# add agent to the environment
environment.add_thing(agent)
# run the environment
environment.run()
# check final status of the environment
assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
def test_ModelBasedReflexAgentProgram():
class Rule:
def __init__(self, state, action):
self.__state = state
self.action = action
def matches(self, state):
return self.__state == state
loc_A = (0, 0)
loc_B = (1, 0)
# create rules for a two-state Vacuum Environment
rules = [Rule((loc_A, "Dirty"), "Suck"), Rule((loc_A, "Clean"), "Right"),
Rule((loc_B, "Dirty"), "Suck"), Rule((loc_B, "Clean"), "Left")]
def update_state(state, action, percept, model):
return percept
# create a program and then an object of the ModelBasedReflexAgentProgram class
program = ModelBasedReflexAgentProgram(rules, update_state, None)
agent = Agent(program)
# create an object of TrivialVacuumEnvironment
environment = TrivialVacuumEnvironment()
# add agent to the environment
environment.add_thing(agent)
# run the environment
environment.run()
# check final status of the environment
assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
def test_ModelBasedVacuumAgent():
# create an object of the ModelBasedVacuumAgent
agent = ModelBasedVacuumAgent()
# create an object of TrivialVacuumEnvironment
environment = TrivialVacuumEnvironment()
# add agent to the environment
environment.add_thing(agent)
# run the environment
environment.run()
# check final status of the environment
assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
def test_TableDrivenVacuumAgent():
# create an object of the TableDrivenVacuumAgent
agent = TableDrivenVacuumAgent()
# create an object of the TrivialVacuumEnvironment
environment = TrivialVacuumEnvironment()
# add agent to the environment
environment.add_thing(agent)
# run the environment
environment.run()
# check final status of the environment
assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
def test_compare_agents():
environment = TrivialVacuumEnvironment
agents = [ModelBasedVacuumAgent, ReflexVacuumAgent]
result = compare_agents(environment, agents)
performance_ModelBasedVacuumAgent = result[0][1]
performance_ReflexVacuumAgent = result[1][1]
# The performance of ModelBasedVacuumAgent will be at least as good as that of
# ReflexVacuumAgent, since ModelBasedVacuumAgent can identify when it has
# reached the terminal state (both locations being clean) and will perform
# NoOp leading to 0 performance change, whereas ReflexVacuumAgent cannot
# identify the terminal state and thus will keep moving, leading to worse
# performance compared to ModelBasedVacuumAgent.
assert performance_ReflexVacuumAgent <= performance_ModelBasedVacuumAgent
def test_TableDrivenAgentProgram():
table = {(('foo', 1),): 'action1',
(('foo', 2),): 'action2',
(('bar', 1),): 'action3',
(('bar', 2),): 'action1',
(('foo', 1), ('foo', 1),): 'action2',
(('foo', 1), ('foo', 2),): 'action3'}
agent_program = TableDrivenAgentProgram(table)
assert agent_program(('foo', 1)) == 'action1'
assert agent_program(('foo', 2)) == 'action3'
assert agent_program(('invalid percept',)) is None
def test_Agent():
def constant_prog(percept):
return percept
agent = Agent(constant_prog)
result = agent.program(5)
assert result == 5
def test_VacuumEnvironment():
# initialize Vacuum Environment
v = VacuumEnvironment(6, 6)
# get an agent
agent = ModelBasedVacuumAgent()
agent.direction = Direction(Direction.R)
v.add_thing(agent)
v.add_thing(Dirt(), location=(2, 1))
# check if things are added properly
assert len([x for x in v.things if isinstance(x, Wall)]) == 20
assert len([x for x in v.things if isinstance(x, Dirt)]) == 1
# let the action begin!
assert v.percept(agent) == ("Clean", "None")
v.execute_action(agent, "Forward")
assert v.percept(agent) == ("Dirty", "None")
v.execute_action(agent, "TurnLeft")
v.execute_action(agent, "Forward")
assert v.percept(agent) == ("Dirty", "Bump")
v.execute_action(agent, "Suck")
assert v.percept(agent) == ("Clean", "None")
old_performance = agent.performance
v.execute_action(agent, "NoOp")
assert old_performance == agent.performance
def test_WumpusEnvironment():
def constant_prog(percept):
return percept
# initialize Wumpus Environment
w = WumpusEnvironment(constant_prog)
# check if things are added properly
assert len([x for x in w.things if isinstance(x, Wall)]) == 20
assert any(map(lambda x: isinstance(x, Gold), w.things))
assert any(map(lambda x: isinstance(x, Explorer), w.things))
assert not any(map(lambda x: not isinstance(x, Thing), w.things))
# check that gold and wumpus are not present on (1,1)
assert not any(map(lambda x: isinstance(x, Gold) or isinstance(x, WumpusEnvironment), w.list_things_at((1, 1))))
# check if w.get_world() segments objects correctly
assert len(w.get_world()) == 6
for row in w.get_world():
assert len(row) == 6
# start the game!
agent = [x for x in w.things if isinstance(x, Explorer)][0]
gold = [x for x in w.things if isinstance(x, Gold)][0]
pit = [x for x in w.things if isinstance(x, Pit)][0]
assert not w.is_done()
# check Walls
agent.location = (1, 2)
percepts = w.percept(agent)
assert len(percepts) == 5
assert any(map(lambda x: isinstance(x, Bump), percepts[0]))
# check Gold
agent.location = gold.location
percepts = w.percept(agent)
assert any(map(lambda x: isinstance(x, Glitter), percepts[4]))
agent.location = (gold.location[0], gold.location[1] + 1)
percepts = w.percept(agent)
assert not any(map(lambda x: isinstance(x, Glitter), percepts[4]))
# check agent death
agent.location = pit.location
assert w.in_danger(agent)
assert not agent.alive
assert agent.killed_by == Pit.__name__
assert agent.performance == -1000
assert w.is_done()
def test_WumpusEnvironmentActions():
random.seed(9)
def constant_prog(percept):
return percept
# initialize Wumpus Environment
w = WumpusEnvironment(constant_prog)
agent = [x for x in w.things if isinstance(x, Explorer)][0]
gold = [x for x in w.things if isinstance(x, Gold)][0]
pit = [x for x in w.things if isinstance(x, Pit)][0]
agent.location = (1, 1)
assert agent.direction.direction == "right"
w.execute_action(agent, 'TurnRight')
assert agent.direction.direction == "down"
w.execute_action(agent, 'TurnLeft')
assert agent.direction.direction == "right"
w.execute_action(agent, 'Forward')
assert agent.location == (2, 1)
agent.location = gold.location
w.execute_action(agent, 'Grab')
assert agent.holding == [gold]
agent.location = (1, 1)
w.execute_action(agent, 'Climb')
assert not any(map(lambda x: isinstance(x, Explorer), w.things))
assert w.is_done()
if __name__ == "__main__":
pytest.main()
| 33.340206
| 119
| 0.65453
|
23d98df36096dbca926a47492e63e84234dd522e
| 340
|
py
|
Python
|
python/cursoemvideo/060.1.py
|
Gustavo-Martins/learning
|
a2167b894ab3a4bac5e3d7d4ac6671e1ee89e155
|
[
"Unlicense"
] | null | null | null |
python/cursoemvideo/060.1.py
|
Gustavo-Martins/learning
|
a2167b894ab3a4bac5e3d7d4ac6671e1ee89e155
|
[
"Unlicense"
] | null | null | null |
python/cursoemvideo/060.1.py
|
Gustavo-Martins/learning
|
a2167b894ab3a4bac5e3d7d4ac6671e1ee89e155
|
[
"Unlicense"
] | null | null | null |
# Factorial calculator
n = int(input('Digite um número para calcular seu fatorial: '))
counter = n
f = 1 # Prevents multiplication by zero
print('Calculando {}! = '.format(n), end='')
while counter > 0:
print('{}'.format(counter), end='')
print(' x ' if counter > 1 else ' = ', end='')
f *= counter
counter -= 1
print('{}'.format(f))
| 24.285714
| 63
| 0.626471
|
b7c78e775f1b922b7940e23bd23fa059c3501a77
| 1,342
|
py
|
Python
|
pipeline/reach-es-extractor/refparse/utils/exact_match.py
|
wellcometrust/reach
|
1aa42c7d8aaf0a91d033af8448a33f37563b0365
|
[
"MIT"
] | 11
|
2019-11-04T08:24:00.000Z
|
2021-12-16T23:11:47.000Z
|
split_reach/extracter/refparse/utils/exact_match.py
|
AzizIlyosov/reach
|
9cd8e9ab11231cc3f761bff58df1c78d355e4d78
|
[
"MIT"
] | 274
|
2019-10-30T15:37:17.000Z
|
2021-03-25T16:13:36.000Z
|
split_reach/extracter/refparse/utils/exact_match.py
|
AzizIlyosov/reach
|
9cd8e9ab11231cc3f761bff58df1c78d355e4d78
|
[
"MIT"
] | 3
|
2019-11-12T13:38:14.000Z
|
2020-04-16T07:49:04.000Z
|
import re
class ExactMatcher:
def __init__(self, sectioned_documents, title_length_threshold):
self.texts = [
(doc.id, self.clean_text(doc.section))
for doc in sectioned_documents
]
self.title_length_threshold = title_length_threshold
def clean_text(self, string):
"""
Input:
-A string
Output:
-A string, with white space normalised and
non-alphanumeric characters removed
Cleans up text such that it can easily be searched
"""
string = re.sub("\\n", " ", string)
string = re.sub("\s{1,}", " ", string)
string = re.sub("[^A-Za-z0-9 ]", "", string)
string = string.lower()
return string
def match(self, publication):
"""
Input:
publication: dict that contains title and uber_id of academic publication
Output:
matched_reference: dict that links an academic publication with a policy document
"""
publication_title = self.clean_text(publication['title'])
if len(publication_title) < self.title_length_threshold:
return
for doc_id, text in self.texts:
if publication_title in text:
yield {
'Document id': doc_id,
'Matched title': publication_title,
'Matched publication id': publication['uber_id'],
'Match algorithm': 'Exact match'
}
return
| 26.313725
| 87
| 0.645306
|
ac4b2945dbd71a90b0ad2ab26f60561186dade66
| 3,443
|
py
|
Python
|
satchmo/payment/modules/protx/views.py
|
sankroh/satchmo
|
e48df0c2a4be4ce14785d0a5d6dd1e516c57a838
|
[
"BSD-3-Clause"
] | 1
|
2016-05-09T12:21:04.000Z
|
2016-05-09T12:21:04.000Z
|
satchmo/payment/modules/protx/views.py
|
sankroh/satchmo
|
e48df0c2a4be4ce14785d0a5d6dd1e516c57a838
|
[
"BSD-3-Clause"
] | null | null | null |
satchmo/payment/modules/protx/views.py
|
sankroh/satchmo
|
e48df0c2a4be4ce14785d0a5d6dd1e516c57a838
|
[
"BSD-3-Clause"
] | null | null | null |
"""Protx checkout custom views"""
from django.utils.translation import ugettext as _
from satchmo.configuration import config_get_group
from satchmo.payment.views import payship, confirm
import logging
log = logging.getLogger('protx.views')
def pay_ship_info(request):
return payship.credit_pay_ship_info(request, config_get_group('PAYMENT_PROTX'), template="checkout/protx/pay_ship.html")
def confirm_info(request, template='checkout/protx/confirm.html', extra_context={}):
payment_module = config_get_group('PAYMENT_PROTX')
controller = confirm.ConfirmController(request, payment_module)
controller.templates['CONFIRM'] = template
controller.extra_context = extra_context
controller.onForm = secure3d_form_handler
controller.confirm()
return controller.response
def confirm_secure3d(request, secure3d_template='checkout/secure3d_form.html',
confirm_template='checkout/confirm.html', extra_context={}):
"""Handles confirming an order and processing the charges when secured by secure3d.
"""
payment_module = config_get_group('PAYMENT_PROTX')
controller = confirm.ConfirmController(request, payment_module, extra_context=extra_context)
controller.template['CONFIRM'] = confirm_template
if not controller.sanity_check():
return controller.response
auth3d = request.session.get('3D', None)
if not auth3d:
controller.processorMessage = _('3D Secure transaction expired. Please try again.')
else:
if request.method == "POST":
returnMD = request.POST.get('MD', None)
if not returnMD:
template = payment_module.lookup_template(secure3d_template)
ctx ={'order': controller.order, 'auth': auth3d }
return render_to_response(template, ctx, RequestContext(request))
elif returnMD == auth3d['MD']:
pares = request.POST.get('PaRes', None)
controller.processor.prepareData(controller.order)
controller.processor.prepareData3d(returnMD, pares)
if controller.process():
return controller.onSuccess(controller)
else:
controller.processorMessage = _('3D Secure transaction was not approved by payment gateway. Please contact us.')
else:
template = lookup_template(payment_module, secure3d_template)
ctx =RequestContext(request, {
'order': controller.order, 'auth': auth3d
})
return render_to_response(template, ctx)
return secure3d_form_handler(controller)
def secure3d_form_handler(controller):
"""At the confirmation step, protx may ask for a secure3d authentication. This method
catches that, and if so, sends to that step, otherwise the form as normal"""
if controller.processorReasonCode == '3DAUTH':
log.debug('caught secure 3D request for order #%i, putting 3D into session as %s',
controller.order.id, controller.processorReasonCode)
redirectUrl = controller.lookup_url('satchmo_checkout-secure3d')
controller.processor.response['TermUrl'] = redirectUrl
request.session['3D'] = controller.processorReasonCode
return http.HttpResponseRedirect(redirectUrl)
return controller._onForm(controller)
| 45.302632
| 132
| 0.680511
|
1053020e8d1b9b1dd6c966a8064dd37fb89b37a5
| 5,620
|
py
|
Python
|
currencies/management/commands/currencies.py
|
zdanozdan/django-currencies
|
bf31996427eab55b2012597708d910d5dc67f454
|
[
"BSD-3-Clause"
] | null | null | null |
currencies/management/commands/currencies.py
|
zdanozdan/django-currencies
|
bf31996427eab55b2012597708d910d5dc67f454
|
[
"BSD-3-Clause"
] | null | null | null |
currencies/management/commands/currencies.py
|
zdanozdan/django-currencies
|
bf31996427eab55b2012597708d910d5dc67f454
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
from collections import OrderedDict
from importlib import import_module
from django.conf import settings
from django.core.management.base import BaseCommand
from ...models import Currency
# The list of available backend currency sources
sources = OrderedDict([
# oxr must remain first for backward compatibility
('oxr', '._openexchangerates'),
('yahoo', '._yahoofinance'),
('iso', '._currencyiso'),
#TODO:
#('google', '._googlecalculator.py'),
#('ecb', '._europeancentralbank.py'),
])
logger = logging.getLogger("django.currencies")
class Command(BaseCommand):
help = "Create all missing db currencies available from the chosen source"
_package_name = __name__.rsplit('.', 1)[0]
_source_param = 'source'
_source_default = next(iter(sources))
_source_kwargs = {'action': 'store', 'nargs': '?', 'default': _source_default,
'choices': sources.keys(),
'help': 'Select the desired currency source, default is ' + _source_default}
def add_arguments(self, parser):
"""Add command arguments"""
parser.add_argument(self._source_param, **self._source_kwargs)
parser.add_argument('--force', '-f', action='store_true', default=False,
help='Update database even if currency already exists')
parser.add_argument('--import', '-i', action='append', default=[],
help= 'Selectively import currencies by supplying the currency codes (e.g. USD) one per switch, '
'or supply an uppercase settings variable name with an iterable (once only), '
'or looks for settings CURRENCIES or SHOP_CURRENCIES.')
def get_imports(self, option):
"""
See if we have been passed a set of currencies or a setting variable
or look for settings CURRENCIES or SHOP_CURRENCIES.
"""
if not option:
for attr in ('CURRENCIES', 'SHOP_CURRENCIES'):
try:
return getattr(settings, attr)
except AttributeError:
continue
return option
elif len(option) == 1 and option[0].isupper() and len(option[0]) != 3:
return getattr(settings, option[0])
else:
return [e for e in option if e]
@property
def verbosity(self):
return getattr(self, '_verbosity', logging.INFO)
@verbosity.setter
def verbosity(self, value):
self._verbosity = {
0: logging.ERROR,
1: logging.INFO,
2: logging.DEBUG,
3: 0
}.get(value)
def log(self, lvl, msg, *args, **kwargs):
"""Both prints to stdout/stderr and the django.currencies logger"""
logger.log(lvl, msg, *args, **kwargs)
if lvl >= self.verbosity:
if args:
fmsg = msg % args
else:
fmsg = msg % kwargs
if lvl >= logging.WARNING:
self.stderr.write(fmsg)
else:
self.stdout.write(fmsg)
def get_handler(self, options):
"""Return the specified handler"""
# Import the CurrencyHandler and get an instance
handler_module = import_module(sources[options[self._source_param]], self._package_name)
return handler_module.CurrencyHandler(self.log)
def handle(self, *args, **options):
"""Handle the command"""
# get the command arguments
self.verbosity = int(options.get('verbosity', 1))
force = options['force']
imports = self.get_imports(options['import'])
# Import the CurrencyHandler and get an instance
handler = self.get_handler(options)
self.log(logging.INFO, "Getting currency data from %s", handler.endpoint)
# find available codes
if imports:
allcodes = set(handler.get_allcurrencycodes())
reqcodes = set(imports)
available = reqcodes & allcodes
unavailable = reqcodes - allcodes
else:
self.log(logging.WARNING, "Importing all. Some currencies may be out-of-date (MTL) or spurious (XPD)")
available = handler.get_allcurrencycodes()
unavailable = None
for code in available:
obj, created = Currency._default_manager.get_or_create(code=code)
name = handler.get_currencyname(code)
description = "%r (%s)" % (name, code)
if created or force:
kwargs = {}
if created:
kwargs['is_active'] = False
msg = "Creating %s"
else:
msg = "Updating %s"
if name:
kwargs['name'] = name
symbol = handler.get_currencysymbol(code)
if symbol:
kwargs['symbol'] = symbol
try:
infodict = handler.get_info(code)
except AttributeError:
pass
else:
if infodict:
obj.info.update(infodict)
kwargs['info'] = obj.info
self.log(logging.INFO, msg, description)
Currency._default_manager.filter(pk=obj.pk).update(**kwargs)
else:
msg = "Skipping %s"
self.log(logging.INFO, msg, description)
if unavailable:
self.log(logging.ERROR, "Currencies %s not found in %s source", unavailable, handler.name)
| 36.732026
| 114
| 0.569751
|
5ab04aff758c57df0615c5e483bd620d7bbb9323
| 2,168
|
py
|
Python
|
aiida/backends/tests/control/test_computer_ctrl.py
|
iriberri/aiida_core
|
c4a1ec5dac92ee62c59d39ca580bde449f3abf73
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/backends/tests/control/test_computer_ctrl.py
|
iriberri/aiida_core
|
c4a1ec5dac92ee62c59d39ca580bde449f3abf73
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/backends/tests/control/test_computer_ctrl.py
|
iriberri/aiida_core
|
c4a1ec5dac92ee62c59d39ca580bde449f3abf73
|
[
"BSD-2-Clause"
] | 1
|
2018-12-21T11:10:09.000Z
|
2018-12-21T11:10:09.000Z
|
"""Computer control module unit tests."""
from aiida.backends.testbase import AiidaTestCase
from aiida.control.computer import ComputerBuilder, configure_computer
class TestComputerControl(AiidaTestCase):
def setUp(self):
"""Prepare current user and computer builder with common properties."""
from aiida.orm.backend import construct_backend
from aiida.scheduler import SchedulerFactory
backend = construct_backend()
self.comp_builder = ComputerBuilder(label='test', description='Test Computer', enabled=True, hostname='localhost')
self.comp_builder.scheduler = 'direct'
self.comp_builder.work_dir = '/tmp/aiida'
self.comp_builder.prepend_text = ''
self.comp_builder.append_text = ''
self.comp_builder.mpiprocs_per_machine = 8
self.comp_builder.mpirun_command = 'mpirun'
self.comp_builder.shebang = '#!xonsh'
self.user = backend.users.get_automatic_user()
def test_configure_local(self):
"""Configure a computer for local transport and check it is configured."""
self.comp_builder.label = 'test_configure_local'
self.comp_builder.transport = 'local'
comp = self.comp_builder.new()
comp.store()
configure_computer(comp)
self.assertTrue(comp.is_user_configured(self.user))
def test_configure_ssh(self):
"""Configure a computer for ssh transport and check it is configured."""
self.comp_builder.label = 'test_configure_ssh'
self.comp_builder.transport = 'ssh'
comp = self.comp_builder.new()
comp.store()
configure_computer(comp, username='radames', port='22')
self.assertTrue(comp.is_user_configured(self.user))
def test_configure_ssh_invalid(self):
"""Try to configure computer with invalid auth params and check it fails."""
self.comp_builder.label = 'test_configure_ssh_invalid'
self.comp_builder.transport = 'ssh'
comp = self.comp_builder.new()
comp.store()
with self.assertRaises(ValueError):
configure_computer(comp, username='radames', invalid_auth_param='TEST')
| 41.692308
| 122
| 0.692804
|
bff51a1b14c6f1ba323999bf61e98fadc525464d
| 2,353
|
py
|
Python
|
multitiers/__main__.py
|
tresoldi/multitiers
|
e6ee43043e8655b170df18ca158212fd82ca1fa0
|
[
"MIT"
] | null | null | null |
multitiers/__main__.py
|
tresoldi/multitiers
|
e6ee43043e8655b170df18ca158212fd82ca1fa0
|
[
"MIT"
] | null | null | null |
multitiers/__main__.py
|
tresoldi/multitiers
|
e6ee43043e8655b170df18ca158212fd82ca1fa0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
__main__.py
"""
# Import Python standard libraries
import argparse
# Import our library
import multitiers
def parse_arguments():
"""
Parse arguments and return a namespace.
"""
# TODO: add clts repos
parser = argparse.ArgumentParser()
parser.add_argument("filename", type=str, help="Path to the datafile.")
parser.add_argument(
"--csv",
action="store_true",
help="Whether to assume comma-delimited fields in datafile (default: false)",
)
args = parser.parse_args()
return args
def main():
"""
Main function for multitier operation from the command line.
"""
# Parse command line arguments
args = parse_arguments()
# Read data file and build an MT object from it
# TODO: rename `comma` to `sep`
data = read_wordlist_data(args.filename, comma=args.csv)
# data = data[:10]
mt = MultiTiers(data, left=2, right=1, models=["cv"])
print(str(mt))
# TODO: have a small language for includes/excludes
study = [
# initial position
{"tier_name": "index", "includes": [1], "excludes": None, "unknown": False},
# All Proto-Germanic /s/
{
"tier_name": "Proto-Germanic",
"includes": ["s"],
"excludes": None,
"unknown": False,
},
# No German r /s/
{"tier_name": "German", "includes": None, "excludes": ["r"], "unknown": False},
# Proto-Germanic CV to the left
{
"tier_name": "Proto-Germanic_cv_L1",
"includes": None,
"excludes": None,
"unknown": True,
},
# Proto-Germanic CV to the right
{
"tier_name": "Proto-Germanic_cv_R1",
"includes": None,
"excludes": None,
"unknown": True,
},
]
data = mt.filter(study)
study_result = mt.study(study)
from pprint import pprint
pprint(study_result)
# extract X/y
X_tiers = {
"index": {"includes": [1]},
"Proto-Germanic": {"includes": ["s"]},
"Proto-Germanic_cv_L1": {},
"Proto-Germanic_cv_R1": {},
}
y_tiers = {"German": {"excludes": ["r"]}}
X, y = mt.filter_Xy(X_tiers, y_tiers)
print(X)
print(y)
if __name__ == "__main__":
main()
| 23.068627
| 87
| 0.554186
|
1a6de1b2293592acf3a4120cd54b9126f5e250a8
| 945
|
py
|
Python
|
numpy1.py
|
rwik/python_practice
|
e489988bf95c1781f22a1830cd4c136932405bed
|
[
"MIT"
] | null | null | null |
numpy1.py
|
rwik/python_practice
|
e489988bf95c1781f22a1830cd4c136932405bed
|
[
"MIT"
] | null | null | null |
numpy1.py
|
rwik/python_practice
|
e489988bf95c1781f22a1830cd4c136932405bed
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
my_2nd_array = np.array([[1,2],[3,4]])
print(my_2nd_array)
print(my_2nd_array.itemsize)
print(my_2nd_array.dtype)
#to optimize space used you may specify data type
a = np.array([[1,2],[3,4]], dtype='int16')
print(a.itemsize)
print(a.dtype)
#to access particular element
print(a[1,1])
#to access particular column
print(a[:,1])
#to have an array of all zeros
zArr = np.zeros((5,5))
print(zArr)
#to have an array of all same number
someArr = np.full((5,5),89)
print(someArr)
#load file data
filedata = np.genfromtxt('data.txt', delimiter=',')
filedata = filedata.astype('int32')
print(filedata)
#load Image
pic = Image.open('image1.jpg')
print(pic)
picAr = np.asarray(pic)
print(picAr.shape)
plt.imshow(picAr)
picArr2 = picAr.copy()
picArr2[:,:,1] = 0 # for all pixel make green channel 0
plt.imshow(picArr2)
plt.show()#don't forget this .
| 21.976744
| 55
| 0.71746
|
d37a396be9582e190940a78184cee8c3891d9a90
| 1,231
|
py
|
Python
|
test/test_quota_reports.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
test/test_quota_reports.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
test/test_quota_reports.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.quota_reports import QuotaReports
class TestQuotaReports(unittest.TestCase):
""" QuotaReports unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testQuotaReports(self):
"""
Test QuotaReports
"""
model = swagger_client.models.quota_reports.QuotaReports()
if __name__ == '__main__':
unittest.main()
| 25.122449
| 75
| 0.726239
|
97afe008e6495116860015c6417ff21e197ea792
| 1,356
|
py
|
Python
|
python_basics/9.data_types/python_lists.py
|
edilsonmatola/Python_Master
|
cef88a19e641f6454944bab358841b380c64699e
|
[
"MIT"
] | 2
|
2022-03-12T07:53:23.000Z
|
2022-03-14T16:09:06.000Z
|
python_basics/9.data_types/python_lists.py
|
edilsonmatola/Python_Master
|
cef88a19e641f6454944bab358841b380c64699e
|
[
"MIT"
] | 18
|
2022-03-13T19:45:48.000Z
|
2022-03-31T06:04:12.000Z
|
python_basics/9.data_types/python_lists.py
|
edilsonmatola/Python_Master
|
cef88a19e641f6454944bab358841b380c64699e
|
[
"MIT"
] | null | null | null |
# Lists are one of the most versatile data types that allow us to work with multiple elements.
# A list is created by placing items(elements) inside square brackets[], separated by commas. For example,
# empty list
list1 = []
print(list1) # Output: []
# list of integers
numbers = [1, 2, 3]
print(numbers) # Output: [1, 2, 3]
# list with mixed data types
mixed_list = [1, "Hello", 3.4]
print(mixed_list) # Output: [1, 'Hello', 3.4]
# list with duplicate items
list2 = [1, 2, 3, 1, 3]
print(list2) # Output: [1, 2, 3, 1, 3]
"""
* Acessing List Items
"""
languages = ['Python', 'Dart', 'JavaScript', 'Java']
# First language
first_language = languages[0]
print(first_language) # Output: Python
# Second language
second_language = languages[1]
print(second_language) # Output: Dart
# Third language
third_language = languages[2]
print(third_language) # Output: JavaScript
"""
* Negative Indexing
"""
# Third language
third_language = languages[-3]
print(third_language) # Output: JavaScript
# Last language
last_language = languages[-1]
print(last_language) # Output: 'Java'
"""
* CHALLENGE
Create a list containing 3 items: 9, 11, and 15, and assign it to the odd_numbers variable.
Print the second item of the list (using a positive index).
"""
odd_numbers = [9, 11, 15]
second_item = odd_numbers[1]
print(second_item)
| 20.861538
| 106
| 0.696903
|
1b8d06059de57dd247504fa4e3a0f6e2bc87ecaf
| 48,107
|
bzl
|
Python
|
tensorflow/workspace.bzl
|
EmilWine/tensorflow
|
fe0e121bb8231307700114bf29b5ffa52d9a4f19
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/workspace.bzl
|
EmilWine/tensorflow
|
fe0e121bb8231307700114bf29b5ffa52d9a4f19
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/workspace.bzl
|
EmilWine/tensorflow
|
fe0e121bb8231307700114bf29b5ffa52d9a4f19
|
[
"Apache-2.0"
] | null | null | null |
# TensorFlow external dependencies that can be loaded in WORKSPACE files.
load("//third_party/gpus:cuda_configure.bzl", "cuda_configure")
load("//third_party/gpus:rocm_configure.bzl", "rocm_configure")
load("//third_party/tensorrt:tensorrt_configure.bzl", "tensorrt_configure")
load("//third_party/nccl:nccl_configure.bzl", "nccl_configure")
load("//third_party/mkl:build_defs.bzl", "mkl_repository")
load("//third_party/git:git_configure.bzl", "git_configure")
load("//third_party/py:python_configure.bzl", "python_configure")
load("//third_party/mlir:mlir_configure.bzl", "mlir_configure")
load("//third_party/sycl:sycl_configure.bzl", "sycl_configure")
load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure")
load("//third_party/toolchains/remote:configure.bzl", "remote_execution_configure")
load("//third_party/toolchains/clang6:repo.bzl", "clang6_configure")
load("//third_party/toolchains/cpus/arm:arm_compiler_configure.bzl", "arm_compiler_configure")
load("//third_party:repo.bzl", "tf_http_archive")
load("//third_party/clang_toolchain:cc_configure_clang.bzl", "cc_download_clang_toolchain")
load("@bazel_tools//tools/build_defs/repo:java.bzl", "java_import_external")
load("@io_bazel_rules_closure//closure:defs.bzl", "filegroup_external")
load(
"//tensorflow/tools/def_file_filter:def_file_filter_configure.bzl",
"def_file_filter_configure",
)
load("//third_party/FP16:workspace.bzl", FP16 = "repo")
load("//third_party/aws:workspace.bzl", aws = "repo")
load("//third_party/flatbuffers:workspace.bzl", flatbuffers = "repo")
load("//third_party/highwayhash:workspace.bzl", highwayhash = "repo")
load("//third_party/hwloc:workspace.bzl", hwloc = "repo")
load("//third_party/icu:workspace.bzl", icu = "repo")
load("//third_party/jpeg:workspace.bzl", jpeg = "repo")
load("//third_party/nasm:workspace.bzl", nasm = "repo")
load("//third_party/opencl_headers:workspace.bzl", opencl_headers = "repo")
load("//third_party/kissfft:workspace.bzl", kissfft = "repo")
load("//third_party/keras_applications_archive:workspace.bzl", keras_applications = "repo")
load("//third_party/pasta:workspace.bzl", pasta = "repo")
def initialize_third_party():
""" Load third party repositories. See above load() statements. """
FP16()
aws()
flatbuffers()
highwayhash()
hwloc()
icu()
keras_applications()
kissfft()
jpeg()
nasm()
opencl_headers()
pasta()
# Sanitize a dependency so that it works correctly from code that includes
# TensorFlow as a submodule.
def clean_dep(dep):
return str(Label(dep))
# If TensorFlow is linked as a submodule.
# path_prefix is no longer used.
# tf_repo_name is thought to be under consideration.
def tf_workspace(path_prefix = "", tf_repo_name = ""):
tf_repositories(path_prefix, tf_repo_name)
tf_bind()
# Define all external repositories required by TensorFlow
def tf_repositories(path_prefix = "", tf_repo_name = ""):
"""All external dependencies for TF builds."""
# Note that we check the minimum bazel version in WORKSPACE.
clang6_configure(name = "local_config_clang6")
cc_download_clang_toolchain(name = "local_config_download_clang")
cuda_configure(name = "local_config_cuda")
tensorrt_configure(name = "local_config_tensorrt")
nccl_configure(name = "local_config_nccl")
git_configure(name = "local_config_git")
sycl_configure(name = "local_config_sycl")
syslibs_configure(name = "local_config_syslibs")
python_configure(name = "local_config_python")
rocm_configure(name = "local_config_rocm")
mlir_configure(name = "local_config_mlir")
remote_execution_configure(name = "local_config_remote_execution")
initialize_third_party()
# For windows bazel build
# TODO: Remove def file filter when TensorFlow can export symbols properly on Windows.
def_file_filter_configure(name = "local_config_def_file_filter")
# Point //external/local_config_arm_compiler to //external/arm_compiler
arm_compiler_configure(
name = "local_config_arm_compiler",
build_file = clean_dep("//third_party/toolchains/cpus/arm:BUILD"),
remote_config_repo = "../arm_compiler",
)
mkl_repository(
name = "mkl_linux",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "a936d6b277a33d2a027a024ea8e65df62bd2e162c7ca52c48486ed9d5dc27160",
strip_prefix = "mklml_lnx_2019.0.5.20190502",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.21/mklml_lnx_2019.0.5.20190502.tgz",
"https://github.com/intel/mkl-dnn/releases/download/v0.21/mklml_lnx_2019.0.5.20190502.tgz",
],
)
mkl_repository(
name = "mkl_windows",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "33cc27652df3b71d7cb84b26718b5a2e8965e2c864a502347db02746d0430d57",
strip_prefix = "mklml_win_2020.0.20190813",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.21/mklml_win_2020.0.20190813.zip",
"https://github.com/intel/mkl-dnn/releases/download/v0.21/mklml_win_2020.0.20190813.zip",
],
)
mkl_repository(
name = "mkl_darwin",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "2fbb71a0365d42a39ea7906568d69b1db3bfc9914fee75eedb06c5f32bf5fa68",
strip_prefix = "mklml_mac_2019.0.5.20190502",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.21/mklml_mac_2019.0.5.20190502.tgz",
"https://github.com/intel/mkl-dnn/releases/download/v0.21/mklml_mac_2019.0.5.20190502.tgz",
],
)
if path_prefix:
print("path_prefix was specified to tf_workspace but is no longer used " +
"and will be removed in the future.")
# Important: If you are upgrading MKL-DNN, then update the version numbers
# in third_party/mkl_dnn/mkldnn.BUILD. In addition, the new version of
# MKL-DNN might require upgrading MKL ML libraries also. If they need to be
# upgraded then update the version numbers on all three versions above
# (Linux, Mac, Windows).
tf_http_archive(
name = "mkl_dnn",
build_file = clean_dep("//third_party/mkl_dnn:mkldnn.BUILD"),
sha256 = "ed56652dd237deb86ee9bf102c18de5f2625c059e5ab1d7512c8dc01e316b694",
strip_prefix = "mkl-dnn-0.21.2",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/archive/v0.21.2.tar.gz",
"https://github.com/intel/mkl-dnn/archive/v0.21.2.tar.gz",
],
)
tf_http_archive(
name = "mkl_dnn_v1",
build_file = clean_dep("//third_party/mkl_dnn:mkldnn.BUILD"),
sha256 = "fcc2d951f7170eade0cfdd0d8d1d58e3e7785bd326bca6555f3722f8cba71811",
strip_prefix = "mkl-dnn-1.0-pc2",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/archive/v1.0-pc2.tar.gz",
"https://github.com/intel/mkl-dnn/archive/v1.0-pc2.tar.gz",
],
)
tf_http_archive(
name = "com_google_absl",
build_file = clean_dep("//third_party:com_google_absl.BUILD"),
# TODO: Remove the patch when https://github.com/abseil/abseil-cpp/issues/326 is resolved
# and when TensorFlow is build against CUDA 10.2
patch_file = clean_dep("//third_party:com_google_absl_fix_mac_and_nvcc_build.patch"),
sha256 = "acd93f6baaedc4414ebd08b33bebca7c7a46888916101d8c0b8083573526d070",
strip_prefix = "abseil-cpp-43ef2148c0936ebf7cb4be6b19927a9d9d145b8f",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-cpp/archive/43ef2148c0936ebf7cb4be6b19927a9d9d145b8f.tar.gz",
"https://github.com/abseil/abseil-cpp/archive/43ef2148c0936ebf7cb4be6b19927a9d9d145b8f.tar.gz",
],
)
#tf_http_archive(
# name = "eigen_archive",
# build_file = clean_dep("//third_party:eigen.BUILD"),
# patch_file = clean_dep("//third_party/eigen3:gpu_packet_math.patch"),
# sha256 = "6d8ed482addd14892d7b0bd98fec2c02f18fdab97775bda68c3f2a99ffb190fb",
# strip_prefix = "eigen-eigen-66be6c76fc01",
# urls = [
# "https://storage.googleapis.com/mirror.tensorflow.org/bitbucket.org/eigen/eigen/get/66be6c76fc01.tar.gz",
# "https://bitbucket.org/eigen/eigen/get/66be6c76fc01.tar.gz",
# ],
#)
tf_http_archive(
name = "eigen_archive",
build_file = clean_dep("//third_party:eigen.BUILD"),
patch_file = clean_dep("//third_party/eigen3:gpu_packet_math.patch"),
sha256 = "2b02e73465de24b75344ef443e79f9a88a58c169d0f31091c94ce6f37690d1fb",
strip_prefix = "eigen-git-mirror-master_tf_79b7412",
urls = [
"https://github.com/EmilWine/eigen-git-mirror/archive/master_tf_79b7412.tar.gz",
"https://storage.googleapis.com/mirror.tensorflow.org/.tar.gz",
],
)
tf_http_archive(
name = "arm_compiler",
build_file = clean_dep("//:arm_compiler.BUILD"),
sha256 = "b9e7d50ffd9996ed18900d041d362c99473b382c0ae049b2fce3290632d2656f",
strip_prefix = "rpi-newer-crosstools-eb68350c5c8ec1663b7fe52c742ac4271e3217c5/x64-gcc-6.5.0/arm-rpi-linux-gnueabihf/",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz",
"https://github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz",
],
)
tf_http_archive(
name = "libxsmm_archive",
build_file = clean_dep("//third_party:libxsmm.BUILD"),
sha256 = "9c0af4509ea341d1ee2c6c19fc6f19289318c3bd4b17844efeb9e7f9691abf76",
strip_prefix = "libxsmm-1.14",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/hfp/libxsmm/archive/1.14.tar.gz",
"https://github.com/hfp/libxsmm/archive/1.14.tar.gz",
],
)
tf_http_archive(
name = "com_googlesource_code_re2",
sha256 = "d070e2ffc5476c496a6a872a6f246bfddce8e7797d6ba605a7c8d72866743bf9",
strip_prefix = "re2-506cfa4bffd060c06ec338ce50ea3468daa6c814",
system_build_file = clean_dep("//third_party/systemlibs:re2.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz",
"https://github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz",
],
)
tf_http_archive(
name = "com_github_googlecloudplatform_google_cloud_cpp",
sha256 = "295754023a44eb69d0ff5ee2c0ac11ff1b7adcd617f122d57fc7a5a49fac612d",
strip_prefix = "google-cloud-cpp-0.14.0",
system_build_file = clean_dep("//third_party/systemlibs:google_cloud_cpp.BUILD"),
system_link_files = {
"//third_party/systemlibs:google_cloud_cpp.google.cloud.bigtable.BUILD": "google/cloud/bigtable/BUILD",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/google-cloud-cpp/archive/v0.14.0.tar.gz",
"https://github.com/googleapis/google-cloud-cpp/archive/v0.14.0.tar.gz",
],
)
tf_http_archive(
name = "com_github_googleapis_googleapis",
build_file = clean_dep("//third_party:googleapis.BUILD"),
sha256 = "824870d87a176f26bcef663e92051f532fac756d1a06b404055dc078425f4378",
strip_prefix = "googleapis-f81082ea1e2f85c43649bee26e0d9871d4b41cdb",
system_build_file = clean_dep("//third_party/systemlibs:googleapis.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/googleapis/archive/f81082ea1e2f85c43649bee26e0d9871d4b41cdb.zip",
"https://github.com/googleapis/googleapis/archive/f81082ea1e2f85c43649bee26e0d9871d4b41cdb.zip",
],
)
tf_http_archive(
name = "gemmlowp",
sha256 = "6678b484d929f2d0d3229d8ac4e3b815a950c86bb9f17851471d143f6d4f7834",
strip_prefix = "gemmlowp-12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/gemmlowp/archive/12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3.zip",
"https://github.com/google/gemmlowp/archive/12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3.zip",
],
)
tf_http_archive(
name = "farmhash_archive",
build_file = clean_dep("//third_party:farmhash.BUILD"),
sha256 = "6560547c63e4af82b0f202cb710ceabb3f21347a4b996db565a411da5b17aba0",
strip_prefix = "farmhash-816a4ae622e964763ca0862d9dbd19324a1eaf45",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
"https://github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
],
)
tf_http_archive(
name = "png",
build_file = clean_dep("//third_party:png.BUILD"),
patch_file = clean_dep("//third_party:png_fix_rpi.patch"),
sha256 = "ca74a0dace179a8422187671aee97dd3892b53e168627145271cad5b5ac81307",
strip_prefix = "libpng-1.6.37",
system_build_file = clean_dep("//third_party/systemlibs:png.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/glennrp/libpng/archive/v1.6.37.tar.gz",
"https://github.com/glennrp/libpng/archive/v1.6.37.tar.gz",
],
)
tf_http_archive(
name = "org_sqlite",
build_file = clean_dep("//third_party:sqlite.BUILD"),
sha256 = "adf051d4c10781ea5cfabbbc4a2577b6ceca68590d23b58b8260a8e24cc5f081",
strip_prefix = "sqlite-amalgamation-3300100",
system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2019/sqlite-amalgamation-3300100.zip",
"https://www.sqlite.org/2019/sqlite-amalgamation-3300100.zip",
],
)
tf_http_archive(
name = "gif",
build_file = clean_dep("//third_party:gif.BUILD"),
patch_file = clean_dep("//third_party:gif_fix_strtok_r.patch"),
sha256 = "31da5562f44c5f15d63340a09a4fd62b48c45620cd302f77a6d9acf0077879bd",
strip_prefix = "giflib-5.2.1",
system_build_file = clean_dep("//third_party/systemlibs:gif.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz",
"http://pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz",
],
)
tf_http_archive(
name = "six_archive",
build_file = clean_dep("//third_party:six.BUILD"),
sha256 = "d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73",
strip_prefix = "six-1.12.0",
system_build_file = clean_dep("//third_party/systemlibs:six.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/source/s/six/six-1.12.0.tar.gz",
"https://pypi.python.org/packages/source/s/six/six-1.12.0.tar.gz",
],
)
tf_http_archive(
name = "astor_archive",
build_file = clean_dep("//third_party:astor.BUILD"),
sha256 = "95c30d87a6c2cf89aa628b87398466840f0ad8652f88eb173125a6df8533fb8d",
strip_prefix = "astor-0.7.1",
system_build_file = clean_dep("//third_party/systemlibs:astor.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
"https://pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
],
)
tf_http_archive(
name = "functools32_archive",
build_file = clean_dep("//third_party:functools32.BUILD"),
sha256 = "f6253dfbe0538ad2e387bd8fdfd9293c925d63553f5813c4e587745416501e6d",
strip_prefix = "functools32-3.2.3-2",
system_build_file = clean_dep("//third_party/systemlibs:functools32.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz",
"https://pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz",
],
)
tf_http_archive(
name = "gast_archive",
build_file = clean_dep("//third_party:gast.BUILD"),
sha256 = "fe939df4583692f0512161ec1c880e0a10e71e6a232da045ab8edd3756fbadf0",
strip_prefix = "gast-0.2.2",
system_build_file = clean_dep("//third_party/systemlibs:gast.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/4e/35/11749bf99b2d4e3cceb4d55ca22590b0d7c2c62b9de38ac4a4a7f4687421/gast-0.2.2.tar.gz",
"https://files.pythonhosted.org/packages/4e/35/11749bf99b2d4e3cceb4d55ca22590b0d7c2c62b9de38ac4a4a7f4687421/gast-0.2.2.tar.gz",
],
)
tf_http_archive(
name = "termcolor_archive",
build_file = clean_dep("//third_party:termcolor.BUILD"),
sha256 = "1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b",
strip_prefix = "termcolor-1.1.0",
system_build_file = clean_dep("//third_party/systemlibs:termcolor.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
"https://pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
],
)
tf_http_archive(
name = "opt_einsum_archive",
build_file = clean_dep("//third_party:opt_einsum.BUILD"),
sha256 = "d3d464b4da7ef09e444c30e4003a27def37f85ff10ff2671e5f7d7813adac35b",
strip_prefix = "opt_einsum-2.3.2",
system_build_file = clean_dep("//third_party/systemlibs:opt_einsum.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz",
"https://pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz",
],
)
tf_http_archive(
name = "absl_py",
sha256 = "280c76ec0c9ab7a1dff550cdc37b7c7cd28551103dc3955202760ea8e381aa9d",
strip_prefix = "abseil-py-pypi-v0.8.0",
system_build_file = clean_dep("//third_party/systemlibs:absl_py.BUILD"),
system_link_files = {
"//third_party/systemlibs:absl_py.absl.BUILD": "absl/BUILD",
"//third_party/systemlibs:absl_py.absl.flags.BUILD": "absl/flags/BUILD",
"//third_party/systemlibs:absl_py.absl.testing.BUILD": "absl/testing/BUILD",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-py/archive/pypi-v0.8.0.tar.gz",
"https://github.com/abseil/abseil-py/archive/pypi-v0.8.0.tar.gz",
],
)
tf_http_archive(
name = "enum34_archive",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
"https://pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
],
sha256 = "8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1",
build_file = clean_dep("//third_party:enum34.BUILD"),
system_build_file = clean_dep("//third_party/systemlibs:enum34.BUILD"),
strip_prefix = "enum34-1.1.6/enum",
)
tf_http_archive(
name = "org_python_pypi_backports_weakref",
build_file = clean_dep("//third_party:backports_weakref.BUILD"),
sha256 = "8813bf712a66b3d8b85dc289e1104ed220f1878cf981e2fe756dfaabe9a82892",
strip_prefix = "backports.weakref-1.0rc1/src",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
"https://pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
],
)
filegroup_external(
name = "org_python_license",
licenses = ["notice"], # Python 2.0
sha256_urls = {
"e76cacdf0bdd265ff074ccca03671c33126f597f39d0ed97bc3e5673d9170cf6": [
"https://storage.googleapis.com/mirror.tensorflow.org/docs.python.org/2.7/_sources/license.rst.txt",
"https://docs.python.org/2.7/_sources/license.rst.txt",
],
},
)
# 310ba5ee72661c081129eb878c1bbcec936b20f0 is based on 3.8.0 with a fix for protobuf.bzl.
PROTOBUF_URLS = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/protocolbuffers/protobuf/archive/310ba5ee72661c081129eb878c1bbcec936b20f0.tar.gz",
"https://github.com/protocolbuffers/protobuf/archive/310ba5ee72661c081129eb878c1bbcec936b20f0.tar.gz",
]
PROTOBUF_SHA256 = "b9e92f9af8819bbbc514e2902aec860415b70209f31dfc8c4fa72515a5df9d59"
PROTOBUF_STRIP_PREFIX = "protobuf-310ba5ee72661c081129eb878c1bbcec936b20f0"
# protobuf depends on @zlib, it has to be renamed to @zlib_archive because "zlib" is already
# defined using bind for grpc.
PROTOBUF_PATCH = "//third_party/protobuf:protobuf.patch"
tf_http_archive(
name = "com_google_protobuf",
patch_file = clean_dep(PROTOBUF_PATCH),
sha256 = PROTOBUF_SHA256,
strip_prefix = PROTOBUF_STRIP_PREFIX,
system_build_file = clean_dep("//third_party/systemlibs:protobuf.BUILD"),
system_link_files = {
"//third_party/systemlibs:protobuf.bzl": "protobuf.bzl",
},
urls = PROTOBUF_URLS,
)
tf_http_archive(
name = "nsync",
sha256 = "caf32e6b3d478b78cff6c2ba009c3400f8251f646804bcb65465666a9cea93c4",
strip_prefix = "nsync-1.22.0",
system_build_file = clean_dep("//third_party/systemlibs:nsync.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/nsync/archive/1.22.0.tar.gz",
"https://github.com/google/nsync/archive/1.22.0.tar.gz",
],
)
tf_http_archive(
name = "com_google_googletest",
sha256 = "ff7a82736e158c077e76188232eac77913a15dac0b22508c390ab3f88e6d6d86",
strip_prefix = "googletest-b6cd405286ed8635ece71c72f118e659f4ade3fb",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
"https://github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
],
)
tf_http_archive(
name = "com_github_gflags_gflags",
sha256 = "ae27cdbcd6a2f935baa78e4f21f675649271634c092b1be01469440495609d0e",
strip_prefix = "gflags-2.2.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/gflags/gflags/archive/v2.2.1.tar.gz",
"https://github.com/gflags/gflags/archive/v2.2.1.tar.gz",
],
)
tf_http_archive(
name = "pcre",
build_file = clean_dep("//third_party:pcre.BUILD"),
sha256 = "69acbc2fbdefb955d42a4c606dfde800c2885711d2979e356c0636efde9ec3b5",
strip_prefix = "pcre-8.42",
system_build_file = clean_dep("//third_party/systemlibs:pcre.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/ftp.exim.org/pub/pcre/pcre-8.42.tar.gz",
"http://ftp.exim.org/pub/pcre/pcre-8.42.tar.gz",
],
)
tf_http_archive(
name = "swig",
build_file = clean_dep("//third_party:swig.BUILD"),
sha256 = "58a475dbbd4a4d7075e5fe86d4e54c9edde39847cdb96a3053d87cb64a23a453",
strip_prefix = "swig-3.0.8",
system_build_file = clean_dep("//third_party/systemlibs:swig.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
"http://ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
"http://pilotfiber.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
],
)
tf_http_archive(
name = "curl",
build_file = clean_dep("//third_party:curl.BUILD"),
sha256 = "d0393da38ac74ffac67313072d7fe75b1fa1010eb5987f63f349b024a36b7ffb",
strip_prefix = "curl-7.66.0",
system_build_file = clean_dep("//third_party/systemlibs:curl.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.66.0.tar.gz",
"https://curl.haxx.se/download/curl-7.66.0.tar.gz",
],
)
# WARNING: make sure ncteisen@ and vpai@ are cc-ed on any CL to change the below rule
tf_http_archive(
name = "grpc",
sha256 = "67a6c26db56f345f7cee846e681db2c23f919eba46dd639b09462d1b6203d28c",
strip_prefix = "grpc-4566c2a29ebec0835643b972eb99f4306c4234a3",
system_build_file = clean_dep("//third_party/systemlibs:grpc.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/grpc/grpc/archive/4566c2a29ebec0835643b972eb99f4306c4234a3.tar.gz",
"https://github.com/grpc/grpc/archive/4566c2a29ebec0835643b972eb99f4306c4234a3.tar.gz",
],
)
tf_http_archive(
name = "com_github_nanopb_nanopb",
sha256 = "8bbbb1e78d4ddb0a1919276924ab10d11b631df48b657d960e0c795a25515735",
build_file = "@grpc//third_party:nanopb.BUILD",
strip_prefix = "nanopb-f8ac463766281625ad710900479130c7fcb4d63b",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nanopb/nanopb/archive/f8ac463766281625ad710900479130c7fcb4d63b.tar.gz",
"https://github.com/nanopb/nanopb/archive/f8ac463766281625ad710900479130c7fcb4d63b.tar.gz",
],
)
tf_http_archive(
name = "linenoise",
build_file = clean_dep("//third_party:linenoise.BUILD"),
sha256 = "7f51f45887a3d31b4ce4fa5965210a5e64637ceac12720cfce7954d6a2e812f7",
strip_prefix = "linenoise-c894b9e59f02203dbe4e2be657572cf88c4230c3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
"https://github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
],
)
tf_http_archive(
name = "llvm",
build_file = clean_dep("//third_party/llvm:llvm.autogenerated.BUILD"),
sha256 = "6bafd092643a7208a6539d355e5f99fc7bbd6424e14288d9354515a5231fd2e8",
strip_prefix = "llvm-project-4e8231b5cf0f5f62c7a51a857e29f5be5cb55734/llvm",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/4e8231b5cf0f5f62c7a51a857e29f5be5cb55734.tar.gz",
"https://github.com/llvm/llvm-project/archive/4e8231b5cf0f5f62c7a51a857e29f5be5cb55734.tar.gz",
],
)
tf_http_archive(
name = "lmdb",
build_file = clean_dep("//third_party:lmdb.BUILD"),
sha256 = "f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28",
strip_prefix = "lmdb-LMDB_0.9.22/libraries/liblmdb",
system_build_file = clean_dep("//third_party/systemlibs:lmdb.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
"https://github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
],
)
tf_http_archive(
name = "jsoncpp_git",
build_file = clean_dep("//third_party:jsoncpp.BUILD"),
sha256 = "c49deac9e0933bcb7044f08516861a2d560988540b23de2ac1ad443b219afdb6",
strip_prefix = "jsoncpp-1.8.4",
system_build_file = clean_dep("//third_party/systemlibs:jsoncpp.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/open-source-parsers/jsoncpp/archive/1.8.4.tar.gz",
"https://github.com/open-source-parsers/jsoncpp/archive/1.8.4.tar.gz",
],
)
tf_http_archive(
name = "boringssl",
sha256 = "1188e29000013ed6517168600fc35a010d58c5d321846d6a6dfee74e4c788b45",
strip_prefix = "boringssl-7f634429a04abc48e2eb041c81c5235816c96514",
system_build_file = clean_dep("//third_party/systemlibs:boringssl.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/boringssl/archive/7f634429a04abc48e2eb041c81c5235816c96514.tar.gz",
"https://github.com/google/boringssl/archive/7f634429a04abc48e2eb041c81c5235816c96514.tar.gz",
],
)
tf_http_archive(
name = "zlib_archive",
build_file = clean_dep("//third_party:zlib.BUILD"),
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
system_build_file = clean_dep("//third_party/systemlibs:zlib.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/zlib.net/zlib-1.2.11.tar.gz",
"https://zlib.net/zlib-1.2.11.tar.gz",
],
)
tf_http_archive(
name = "fft2d",
build_file = clean_dep("//third_party/fft2d:fft2d.BUILD"),
sha256 = "ada7e99087c4ed477bfdf11413f2ba8db8a840ba9bbf8ac94f4f3972e2a7cec9",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/www.kurims.kyoto-u.ac.jp/~ooura/fft2d.tgz",
"http://www.kurims.kyoto-u.ac.jp/~ooura/fft2d.tgz",
],
)
tf_http_archive(
name = "snappy",
build_file = clean_dep("//third_party:snappy.BUILD"),
sha256 = "3dfa02e873ff51a11ee02b9ca391807f0c8ea0529a4924afa645fbf97163f9d4",
strip_prefix = "snappy-1.1.7",
system_build_file = clean_dep("//third_party/systemlibs:snappy.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/snappy/archive/1.1.7.tar.gz",
"https://github.com/google/snappy/archive/1.1.7.tar.gz",
],
)
tf_http_archive(
name = "nccl_archive",
build_file = clean_dep("//third_party:nccl/archive.BUILD"),
patch_file = clean_dep("//third_party/nccl:archive.patch"),
sha256 = "9a7633e224982e2b60fa6b397d895d20d6b7498e3e02f46f98a5a4e187c5a44c",
strip_prefix = "nccl-0ceaec9cee96ae7658aa45686853286651f36384",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nvidia/nccl/archive/0ceaec9cee96ae7658aa45686853286651f36384.tar.gz",
"https://github.com/nvidia/nccl/archive/0ceaec9cee96ae7658aa45686853286651f36384.tar.gz",
],
)
java_import_external(
name = "junit",
jar_sha256 = "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"http://repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"http://maven.ibiblio.org/maven2/junit/junit/4.12/junit-4.12.jar",
],
licenses = ["reciprocal"], # Common Public License Version 1.0
testonly_ = True,
deps = ["@org_hamcrest_core"],
)
java_import_external(
name = "org_hamcrest_core",
jar_sha256 = "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"http://repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"http://maven.ibiblio.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
)
java_import_external(
name = "com_google_testing_compile",
jar_sha256 = "edc180fdcd9f740240da1a7a45673f46f59c5578d8cd3fbc912161f74b5aebb8",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
"http://repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
deps = ["@com_google_guava", "@com_google_truth"],
)
java_import_external(
name = "com_google_truth",
jar_sha256 = "032eddc69652b0a1f8d458f999b4a9534965c646b8b5de0eba48ee69407051df",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
"http://repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
],
licenses = ["notice"], # Apache 2.0
testonly_ = True,
deps = ["@com_google_guava"],
)
java_import_external(
name = "org_checkerframework_qual",
jar_sha256 = "d261fde25d590f6b69db7721d469ac1b0a19a17ccaaaa751c31f0d8b8260b894",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar",
"http://repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
java_import_external(
name = "com_squareup_javapoet",
jar_sha256 = "5bb5abdfe4366c15c0da3332c57d484e238bd48260d6f9d6acf2b08fdde1efea",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
"http://repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
tf_http_archive(
name = "com_google_pprof",
build_file = clean_dep("//third_party:pprof.BUILD"),
sha256 = "e0928ca4aa10ea1e0551e2d7ce4d1d7ea2d84b2abbdef082b0da84268791d0c4",
strip_prefix = "pprof-c0fb62ec88c411cc91194465e54db2632845b650",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
"https://github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
],
)
tf_http_archive(
name = "cub_archive",
build_file = clean_dep("//third_party:cub.BUILD"),
patch_file = clean_dep("//third_party:cub.pr170.patch"),
sha256 = "6bfa06ab52a650ae7ee6963143a0bbc667d6504822cbd9670369b598f18c58c3",
strip_prefix = "cub-1.8.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NVlabs/cub/archive/1.8.0.zip",
"https://github.com/NVlabs/cub/archive/1.8.0.zip",
],
)
tf_http_archive(
name = "cython",
build_file = clean_dep("//third_party:cython.BUILD"),
delete = ["BUILD.bazel"],
sha256 = "bccc9aa050ea02595b2440188813b936eaf345e85fb9692790cecfe095cf91aa",
strip_prefix = "cython-0.28.4",
system_build_file = clean_dep("//third_party/systemlibs:cython.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/cython/cython/archive/0.28.4.tar.gz",
"https://github.com/cython/cython/archive/0.28.4.tar.gz",
],
)
tf_http_archive(
name = "arm_neon_2_x86_sse",
build_file = clean_dep("//third_party:arm_neon_2_x86_sse.BUILD"),
sha256 = "213733991310b904b11b053ac224fee2d4e0179e46b52fe7f8735b8831e04dcc",
strip_prefix = "ARM_NEON_2_x86_SSE-1200fe90bb174a6224a525ee60148671a786a71f",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
"https://github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
],
)
tf_http_archive(
name = "double_conversion",
build_file = clean_dep("//third_party:double_conversion.BUILD"),
sha256 = "2f7fbffac0d98d201ad0586f686034371a6d152ca67508ab611adc2386ad30de",
strip_prefix = "double-conversion-3992066a95b823efc8ccc1baf82a1cfc73f6e9b8",
system_build_file = clean_dep("//third_party/systemlibs:double_conversion.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
"https://github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_float",
build_file = clean_dep("//third_party:tflite_mobilenet_float.BUILD"),
sha256 = "2fadeabb9968ec6833bee903900dda6e61b3947200535874ce2fe42a8493abc0",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_quant",
build_file = clean_dep("//third_party:tflite_mobilenet_quant.BUILD"),
sha256 = "d32432d28673a936b2d6281ab0600c71cf7226dfe4cdcef3012555f691744166",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "767057f2837a46d97882734b03428e8dd640b93236052b312b2f0e45613c1cf0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "a809cd290b4d6a2e8a9d5dad076e0bd695b8091974e0eed1052b480b2f21b6dc",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant_protobuf",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "09280972c5777f1aa775ef67cb4ac5d5ed21970acd8535aeca62450ef14f0d79",
strip_prefix = "ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
"https://storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
],
)
tf_http_archive(
name = "tflite_conv_actions_frozen",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "d947b38cba389b5e2d0bfc3ea6cc49c784e187b41a071387b3742d1acac7691e",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
],
)
tf_http_archive(
name = "tflite_smartreply",
build_file = clean_dep("//third_party:tflite_smartreply.BUILD"),
sha256 = "8980151b85a87a9c1a3bb1ed4748119e4a85abd3cb5744d83da4d4bd0fbeef7c",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/smartreply_1.0_2017_11_01.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/smartreply_1.0_2017_11_01.zip",
],
)
tf_http_archive(
name = "tflite_ovic_testdata",
build_file = clean_dep("//third_party:tflite_ovic_testdata.BUILD"),
sha256 = "033c941b7829b05ca55a124a26a6a0581b1ececc154a2153cafcfdb54f80dca2",
strip_prefix = "ovic",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
"https://storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
],
)
tf_http_archive(
name = "build_bazel_rules_android",
sha256 = "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806",
strip_prefix = "rules_android-0.1.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
"https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
],
)
tf_http_archive(
name = "tbb",
build_file = clean_dep("//third_party/ngraph:tbb.BUILD"),
sha256 = "c3245012296f09f1418b78a8c2f17df5188b3bd0db620f7fd5fabe363320805a",
strip_prefix = "tbb-2019_U1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/01org/tbb/archive/2019_U1.zip",
"https://github.com/01org/tbb/archive/2019_U1.zip",
],
)
tf_http_archive(
name = "ngraph",
build_file = clean_dep("//third_party/ngraph:ngraph.BUILD"),
sha256 = "a1780f24a1381fc25e323b4b2d08b6ef5129f42e011305b2a34dcf43a48030d5",
strip_prefix = "ngraph-0.11.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NervanaSystems/ngraph/archive/v0.11.0.tar.gz",
"https://github.com/NervanaSystems/ngraph/archive/v0.11.0.tar.gz",
],
)
tf_http_archive(
name = "nlohmann_json_lib",
build_file = clean_dep("//third_party/ngraph:nlohmann_json.BUILD"),
sha256 = "c377963a95989270c943d522bfefe7b889ef5ed0e1e15d535fd6f6f16ed70732",
strip_prefix = "json-3.4.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nlohmann/json/archive/v3.4.0.tar.gz",
"https://github.com/nlohmann/json/archive/v3.4.0.tar.gz",
],
)
tf_http_archive(
name = "ngraph_tf",
build_file = clean_dep("//third_party/ngraph:ngraph_tf.BUILD"),
sha256 = "742a642d2c6622277df4c902b6830d616d0539cc8cd843d6cdb899bb99e66e36",
strip_prefix = "ngraph-tf-0.9.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NervanaSystems/ngraph-tf/archive/v0.9.0.zip",
"https://github.com/NervanaSystems/ngraph-tf/archive/v0.9.0.zip",
],
)
tf_http_archive(
name = "pybind11",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/pybind/pybind11/archive/v2.3.0.tar.gz",
"https://github.com/pybind/pybind11/archive/v2.3.0.tar.gz",
],
sha256 = "0f34838f2c8024a6765168227ba587b3687729ebf03dc912f88ff75c7aa9cfe8",
strip_prefix = "pybind11-2.3.0",
build_file = clean_dep("//third_party:pybind11.BUILD"),
)
tf_http_archive(
name = "wrapt",
build_file = clean_dep("//third_party:wrapt.BUILD"),
sha256 = "8a6fb40e8f8b6a66b4ba81a4044c68e6a7b1782f21cfabc06fb765332b4c3e51",
strip_prefix = "wrapt-1.11.1/src/wrapt",
system_build_file = clean_dep("//third_party/systemlibs:wrapt.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz",
"https://github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz",
],
)
def tf_bind():
"""Bind targets for some external repositories"""
##############################################################################
# BIND DEFINITIONS
#
# Please do not add bind() definitions unless we have no other choice.
# If that ends up being the case, please leave a comment explaining
# why we can't depend on the canonical build target.
# gRPC wants a cares dependency but its contents is not actually
# important since we have set GRPC_ARES=0 in .bazelrc
native.bind(
name = "cares",
actual = "@com_github_nanopb_nanopb//:nanopb",
)
# Needed by Protobuf
native.bind(
name = "grpc_cpp_plugin",
actual = "@grpc//:grpc_cpp_plugin",
)
native.bind(
name = "grpc_python_plugin",
actual = "@grpc//:grpc_python_plugin",
)
native.bind(
name = "grpc_lib",
actual = "@grpc//:grpc++",
)
native.bind(
name = "grpc_lib_unsecure",
actual = "@grpc//:grpc++_unsecure",
)
# Needed by gRPC
native.bind(
name = "libssl",
actual = "@boringssl//:ssl",
)
# Needed by gRPC
native.bind(
name = "nanopb",
actual = "@com_github_nanopb_nanopb//:nanopb",
)
# Needed by gRPC
native.bind(
name = "protobuf",
actual = "@com_google_protobuf//:protobuf",
)
# gRPC expects //external:protobuf_clib and //external:protobuf_compiler
# to point to Protobuf's compiler library.
native.bind(
name = "protobuf_clib",
actual = "@com_google_protobuf//:protoc_lib",
)
# Needed by gRPC
native.bind(
name = "protobuf_headers",
actual = "@com_google_protobuf//:protobuf_headers",
)
# Needed by Protobuf
native.bind(
name = "python_headers",
actual = clean_dep("//third_party/python_runtime:headers"),
)
# Needed by Protobuf
native.bind(
name = "six",
actual = "@six_archive//:six",
)
# Needed by gRPC
native.bind(
name = "zlib",
actual = "@zlib_archive//:zlib",
)
| 46.979492
| 203
| 0.683331
|
f3cb10f8fcc951be6abb06293e4e7c66135d270b
| 2,630
|
py
|
Python
|
analysis/scripts/.ipynb_checkpoints/project_functions-checkpoint.py
|
data301-2020-winter2/course-project-group_1040
|
d5214ff361e210996618b5c2edd4f3f882d3df07
|
[
"MIT"
] | null | null | null |
analysis/scripts/.ipynb_checkpoints/project_functions-checkpoint.py
|
data301-2020-winter2/course-project-group_1040
|
d5214ff361e210996618b5c2edd4f3f882d3df07
|
[
"MIT"
] | 1
|
2021-03-24T08:27:48.000Z
|
2021-03-30T16:56:01.000Z
|
analysis/scripts/project_functions.py
|
data301-2020-winter2/course-project-group_1040
|
d5214ff361e210996618b5c2edd4f3f882d3df07
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import csv
def load_and_process(address):
#Method Chain #1: Load Data, clean out missing
df1 = (
pd.read_csv(address)
.rename(columns={"team1" : "home_team",
"team2" : "away_team",
"elo1_pre" : "home_pre",
"elo2_pre" : "away_pre",
"elo_prob1" : "home_prob",
"elo_prob2" : "away_prob",
"elo1_post" : "home_post",
"elo2_post" : "away_post",
"pitcher1" : "home_pitcher",
"pitcher2" : "away_pitcher",
"pitcher1_adj" : "home_pitcher_rating",
"pitcher2_adj" : "away_pitcher_rating",
"pitcher1_rgs" : "home_pitcher_rgs",
"pitcher2_rgs" : "away_pitcher_rgs",
"score1" : "home_score",
"score2" : "away_score"}) # Creates a more readable dataframe
.round(3) #Round all values to three decimals
.fillna("s") #The only NaN values are if they are playoffs so we can change them to s for regular season
)
#Method Chain #2: Removing undesired rows and columns and create new columns]
df2 = (
df1.drop(["rating1_pre",
"rating2_pre",
"rating_prob1",
"rating_prob2",
"rating1_post",
"rating2_post", ], axis="columns") #We will only be worrying about elo ratings for this project, and adjusted pitcher ratings
)
#Returned DataFrame
return df2
def variables():
teams = {"current_teams" : ["LAD", "TBD", "ATL", "HOU", "SDP", "NYY", "OAK", "FLA", "CHC", "MIL", "CHW", "CIN", "CLE", "STL", "TOR", "MIN", "KCR", "SFG", "SEA", "ARI", "TEX", "WSN", "PHI", "BAL", "NYM", "PIT", "ANA", "BOS", "DET", "COL"],
"national_league" : ["CHC", "WSN", "STL", "SFG", "ATL", "PHI","CIN","PIT", "SDP", "NYM", "MIA", "ARI", "MIL", "COL", "LAD"],
"american_league" : ["NYY", "HOU", "BOS", "TOR", "CLE", "ANA", "CHW", "BAL", "DET", "MIN", "TBD", "OAK", "SEA", "KCR", "TEX"],
"al_east" : ["BAL", "BOS", "NYY", "TBD", "TOR"],
"al_central" : ["CHW", "CLE", "DET", "KCR", "MIN"],
"al_west" : ["HOU", "ANA", "OAK", "SEA", "TEX"],
"nl_east" : ["ATL", "FLA", "NYM", "PHI", "WSN"],
"nl_central" : ["CHC", "CIN", "MIL", "PIT", "STL"],
"nl_west" : ["ARI", "COL", "LAD", "SDP", "SFG"]}
return teams
| 47.818182
| 242
| 0.471863
|
3a64126a2e69e85a7e7f9e1720246bba1f5c8fc5
| 1,861
|
py
|
Python
|
RRMaps/RRMapDownload.py
|
ferdn4ndo/rrmaps
|
54e96144b9a3956d4570bd8c2414634bf673d472
|
[
"MIT"
] | null | null | null |
RRMaps/RRMapDownload.py
|
ferdn4ndo/rrmaps
|
54e96144b9a3956d4570bd8c2414634bf673d472
|
[
"MIT"
] | null | null | null |
RRMaps/RRMapDownload.py
|
ferdn4ndo/rrmaps
|
54e96144b9a3956d4570bd8c2414634bf673d472
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
## Tile downloading
##
import os, sys, urllib2, time, random
from PIL import Image
class RRMapDownload():
def __init__(self):
pass
# self.DownloadTile();
def SetParams(self, TileX = 0, TileY = 0, Zoom = 10, Lyrs = 's'):
self.TileX = TileX
self.TileY = TileY
self.Zoom = Zoom
self.Lyrs = Lyrs
def DownloadXYZ(self, TileX = 0, TileY = 0, Zoom = 10, Lyrs = 's'):
folder = 'cache/'
self.SetParams(TileX, TileY, Zoom, Lyrs)
user_agent = 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; de-at) AppleWebKit/533.21.1 (KHTML, like Gecko) Version/5.0.5 Safari/533.21.1'
headers = { 'User-Agent' : user_agent }
url = None
filename = None
#should work for mt0 to mt3
url = "https://mt2.google.com/vt/lyrs=%s&?x=%d&s=&y=%d&z=%d" % (Lyrs, TileX, TileY, Zoom)
print url
if not(os.path.isdir(folder)):
os.mkdir(folder, 0755 );
filename = folder+"%d_%d_%d_%s.jpg" % (Zoom, TileX, TileY, Lyrs)
if not os.path.exists(filename):
bytes = None
try:
req = urllib2.Request(url, data=None, headers=headers)
response = urllib2.urlopen(req)
bytes = response.read()
except Exception, e:
print "--", filename, "->", e
sys.exit(1)
if bytes.startswith("<html>"):
print "-- forbidden", filename
sys.exit(1)
print "-- saving", filename
f = open(filename,'wb')
f.write(bytes)
f.close()
time.sleep(1 + random.random())
return Image.open(filename)
| 26.585714
| 150
| 0.497582
|
1494be16c40609d0444c2f9668f0d63a12878f4c
| 887
|
py
|
Python
|
BackendServer/handlers/BaseHandler.py
|
valiro21/mlc
|
428719b8589b3ca9922ae0c6fa527f47f8a98690
|
[
"MIT"
] | null | null | null |
BackendServer/handlers/BaseHandler.py
|
valiro21/mlc
|
428719b8589b3ca9922ae0c6fa527f47f8a98690
|
[
"MIT"
] | null | null | null |
BackendServer/handlers/BaseHandler.py
|
valiro21/mlc
|
428719b8589b3ca9922ae0c6fa527f47f8a98690
|
[
"MIT"
] | null | null | null |
"""BaseHandler for BackendServer."""
# Copyright © 2017 Alexandru Miron <mironalex96@gmail.com>
# Copyright © 2017 Valentin Rosca <rosca.valentin2012@gmail.com>
# Copyright © 2017 Cosmin Pascaru <cosmin.pascaru2@gmail.com>
import tornado.web
from DB import session_factory
class BaseHandler(tornado.web.RequestHandler):
"""BaseHandler for tornado RequestHandler. Adds option for users."""
def data_received(self, chunk):
pass
def get_current_user(self):
user = self.get_secure_cookie("user")
if isinstance(user, str):
return user
if isinstance(user, bytearray) or isinstance(user, bytes):
return user.decode()
return user
def __init__(self, application, request, **kwargs):
super().__init__(application, request, **kwargs)
def acquire_sql_session(self):
return session_factory()
| 29.566667
| 72
| 0.691094
|
e9abd93b8e50a4acf3a1459f5a8cf9f78c20fdd9
| 879
|
py
|
Python
|
Program Files/Python/getgeorefinfo.py
|
mariannecowherd/SnowPALM
|
17eaca86590c2b2da8cd53a6ca687c20dac41f56
|
[
"MIT"
] | null | null | null |
Program Files/Python/getgeorefinfo.py
|
mariannecowherd/SnowPALM
|
17eaca86590c2b2da8cd53a6ca687c20dac41f56
|
[
"MIT"
] | null | null | null |
Program Files/Python/getgeorefinfo.py
|
mariannecowherd/SnowPALM
|
17eaca86590c2b2da8cd53a6ca687c20dac41f56
|
[
"MIT"
] | 1
|
2022-02-16T19:26:52.000Z
|
2022-02-16T19:26:52.000Z
|
import sys,os
import gdal
from gdalconst import *
# Get georeferencing information from a raster file and print to text file
src = sys.argv[1]
fname_out = sys.argv[2]
ds = gdal.Open(src, GA_ReadOnly)
if ds is None:
print('Content-Type: text/html\n')
print('Could not open ' + src)
sys.exit(1)
# Get the geotransform, the georeferencing, and the dimensions of the raster to match
transform = ds.GetGeoTransform()
wkt = ds.GetProjection()
rows = ds.RasterYSize
cols = ds.RasterXSize
ulx = transform[0]
uly = transform[3]
pixelWidth = transform[1]
pixelHeight = transform[5]
lrx = ulx + (cols * pixelWidth)
lry = uly + (rows * pixelHeight)
f_out = open(fname_out, 'w')
f_out.write(str(pixelWidth) + '\n')
f_out.write(str(pixelHeight) + '\n')
f_out.write(str(ulx) + '\n')
f_out.write(str(uly) + '\n')
f_out.write(str(lrx) + '\n')
f_out.write(str(lry))
f_out.close()
| 24.416667
| 85
| 0.700796
|
143913569192476050cc1b13722ece41aef16963
| 6,603
|
py
|
Python
|
incognitosdk/Response.py
|
abduramann/incognitosdk-python
|
67d6e4467567798a3104c203374543ecb17c775f
|
[
"MIT"
] | 1
|
2021-05-03T00:41:40.000Z
|
2021-05-03T00:41:40.000Z
|
incognitosdk/Response.py
|
abduramann/incognitosdk-python
|
67d6e4467567798a3104c203374543ecb17c775f
|
[
"MIT"
] | 1
|
2020-12-20T12:08:14.000Z
|
2020-12-20T12:08:14.000Z
|
incognitosdk/Response.py
|
abduramann/incognitosdk-python
|
67d6e4467567798a3104c203374543ecb17c775f
|
[
"MIT"
] | 1
|
2020-11-27T00:44:11.000Z
|
2020-11-27T00:44:11.000Z
|
import json
import re
import logging
class Response:
def __init__(self, response, more_info=None):
self.response = response
self.more_info = more_info
if more_info is not None:
logging.debug(more_info)
logging.debug(self.__str__())
def __str__(self):
return f'\n{json.dumps(self.data(), indent=3)}'
def data(self):
if type(self.response) is str:
return json.loads(self.response) # response from WebSocket
return json.loads(self.response.text) # response from rpc
def params(self):
return Params(self)
def size(self):
if self.response is str: # response from WebSocket
return len(self.response)
return len(self.response.content) # response from rpc
def response_time(self):
if self.response is str: # response from WebSocket
return None
return self.response.elapsed.total_seconds() # response from rpc
def is_success(self):
if self.data()['Error'] is None:
return True
return False
def get_error_trace(self):
if self.data()['Error'] is None:
return ''
return StackTrace(self.data()['Error']['StackTrace'][0:256])
def get_error_msg(self):
if self.data()['Error'] is None:
return None
return self.data()['Error']['Message']
def find_in_result(self, string):
for k, v in self.data()["Result"].items():
if k == str(string):
return True
return False
def get_result(self, string=None):
try:
if string is None:
return self.data()['Result']
return self.data()['Result']['Result'][string] if 'Result' in self.data()['Result'] else \
self.data()['Result'][string]
except(KeyError, TypeError):
return None
def get_hash(self):
return self.get_result("Hash")
def get_tx_id(self):
return self.get_result("TxID")
def get_beacon_height(self):
return self.get_result("BeaconHeight")
def get_pde_pool_pairs(self):
return self.get_result("PDEPoolPairs")
def get_pde_share(self):
return self.get_result("PDEShares")
def get_token_id_1_str(self):
return self.get_result("TokenID1Str")
def get_token_id_2_str(self):
return self.get_result("TokenID2Str")
def get_token_id(self):
return self.get_result("TokenID")
def get_returned_1_amount(self):
return self.get_result("Returned1Amount")
def get_returned_2_amount(self):
return self.get_result("Returned2Amount")
def get_contributed_1_amount(self):
return self.get_result("Contributed1Amount")
def get_contributed_2_amount(self):
return self.get_result("Contributed2Amount")
def get_fee(self):
try:
return self.data()['Result']['Result']['Fee']
except KeyError:
return self.data()['Result']['Fee']
def get_privacy(self):
return self.get_result("IsPrivacy")
def get_custom_token_privacy(self):
return self.get_result("PrivacyCustomTokenIsPrivacy")
def get_privacy_custom_token_data(self):
cdata = self.get_result("PrivacyCustomTokenData")
return {} if cdata is None or cdata.strip() == "" else json.loads(cdata)
def get_metadata(self):
metadata = self.get_result("Metadata")
return {} if metadata is None or metadata.strip() == "" else json.loads(metadata)
def get_balance(self):
return self.get_result()
def get_block_height(self):
blockHeight = self.get_result("BlockHeight")
return self.get_result("Height") if blockHeight is None else blockHeight
def get_tx_hashes(self):
# for retrieveblockbyheight database v1
ret = self.get_result("TxHashes")
if ret is None and 0 in self.get_result():
# for retrieveblockbyheight database v2
ret = self.get_result()[0]["TxHashes"]
return ret
def get_list_txs(self):
return self.get_result("ListTxs")
def get_block_hash(self):
return self.get_result("BlockHash")
def get_shard_id(self):
return self.get_result('ShardID')
def get_subscription_type(self):
return self.get_result()['Subscription']
def get_accepted_trades(self):
return self.get_result('PDEAcceptedTradesV2')
def get_proof_detail_input_coin_value_prv(self):
try:
return self.get_result('ProofDetail')['InputCoins'][0]['CoinDetails']['Value']
except TypeError:
return None
def get_proof_detail_output_coin_value_prv(self):
try:
return self.get_result('ProofDetail')['OutputCoins'][0]['CoinDetails']['Value']
except TypeError:
return None
def is_prv_privacy(self):
"""
check if prv transaction is privacy or not
:return: True = privacy, False = no privacy
"""
result = self.get_transaction_by_hash()
if result.get_privacy() is True and result.get_proof_detail_input_coin_value_prv() == 0:
return True
return False
def get_proof_detail_input_coin_value_custom_token(self):
try:
return self.get_result()['PrivacyCustomTokenProofDetail']['InputCoins'][0]['CoinDetails']['Value']
except TypeError:
return None
def get_mem_pool_transactions_id_list(self) -> list:
hashes = self.get_list_txs()
if hashes is None:
return []
tx_id_list = list()
for entry in hashes:
tx_id_list.append(entry['TxID'])
return tx_id_list
class StackTrace:
def __init__(self, stack_string):
self.stack_string = stack_string
def __str__(self):
return self.stack_string
def get_error_codes(self):
code_list = re.findall("(-[0-9]\\w+: )", self.stack_string)
return ''.join([str(elem) for elem in code_list])
def get_message(self):
i_start = len(self.get_error_codes())
i_end = str.index(self.stack_string, 'github.com')
return str(self.stack_string[i_start:i_end])
def get_estimated_fee(self):
return re.search("fee=(.*)", self.stack_string).group(1)
class Params:
def __init__(self, response: Response):
self.response = response
def params(self):
return self.response.data()['Params']
def get_beacon_height(self):
return self.params()[0]["BeaconHeight"]
| 30.150685
| 110
| 0.629259
|
b24a9299278ae69bd8c63c36613ccc8aaf0dd0de
| 2,141
|
py
|
Python
|
cnn/unet_model/unet.py
|
SoonerRobotics/igvc_software_2021
|
31d2f65fa9876cc7f9b2eb3c72a82817ef7a177c
|
[
"MIT"
] | 4
|
2020-07-07T14:56:56.000Z
|
2021-08-13T23:31:07.000Z
|
cnn/unet_model/unet.py
|
pradumn203/igvc-winners-2021
|
658233609054eafac59603a77b2a092dc002e145
|
[
"MIT"
] | 4
|
2020-09-22T01:53:48.000Z
|
2021-07-17T01:02:31.000Z
|
cnn/unet_model/unet.py
|
pradumn203/igvc-winners-2021
|
658233609054eafac59603a77b2a092dc002e145
|
[
"MIT"
] | 3
|
2021-06-29T05:21:18.000Z
|
2021-08-23T05:03:27.000Z
|
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets, layers, models
def SCRUNet():
inp = layers.Input(shape=(256, 256, 3))
# Convolution layers to help learn some basic kernels
pre_conv = layers.Conv2D(8, (3, 3), strides=(1, 1), padding='same', activation='relu')(inp)
# Down sampling
down_sample_0 = layers.Conv2D(16, kernel_size=(3, 3), strides=(2,2), padding='same', activation='relu')(pre_conv)
down_sample_1 = layers.Conv2D(24, kernel_size=(3, 3), strides=(2,2), padding='same', activation='relu')(down_sample_0)
down_sample_2 = layers.Conv2D(24, kernel_size=(3, 3), strides=(2,2), padding='same', activation='relu')(down_sample_1)
# Most compressed layer in the network
latent = layers.Conv2D(32, kernel_size=(3, 3), strides=(2,2), padding='same', activation='relu')(down_sample_2)
# Upsampling with skip connections
up_sample_0 = layers.Conv2DTranspose(24, (3, 3), strides=(2,2), padding='same', activation='relu')(latent)
skip_0 = layers.Concatenate()([up_sample_0, down_sample_2])
up_sample_1 = layers.Conv2DTranspose(24, (3, 3), strides=(2,2), padding='same', activation='relu')(skip_0)
skip_1 = layers.Concatenate()([up_sample_1, down_sample_1])
up_sample_2 = layers.Conv2DTranspose(16, (3, 3), strides=(2,2), padding='same', activation='relu')(skip_1)
skip_2 = layers.Concatenate()([up_sample_2, down_sample_0])
up_sample_3 = layers.Conv2DTranspose(16, (3, 3), strides=(2,2), padding='same', activation='relu')(skip_2)
skip_3 = layers.Concatenate()([up_sample_3, pre_conv])
# Post convolution layers
post_conv = layers.Conv2DTranspose(8, (3, 3), strides=(1, 1), padding='same', activation='relu')(skip_3)
output = layers.Conv2DTranspose(1, (1, 1), strides=(1,1), padding='same', activation='sigmoid')(post_conv)
model = models.Model(inputs=inp, outputs=output)
# Bind the optimizer and the loss function to the model
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryAccuracy()])
return model
| 48.659091
| 122
| 0.692667
|
bcd38b4d7ae190ff1c3f222bc2c26ae8de3bd766
| 9,516
|
py
|
Python
|
res/scripts/client/gui/mods/gambiter/flash.py
|
chipsi007/GUIFlash
|
78d711d336cf55d73e62d0ab996fc18bbfbd893f
|
[
"MIT"
] | 5
|
2017-12-24T13:04:11.000Z
|
2020-11-16T09:02:01.000Z
|
res/scripts/client/gui/mods/gambiter/flash.py
|
chipsi007/GUIFlash
|
78d711d336cf55d73e62d0ab996fc18bbfbd893f
|
[
"MIT"
] | null | null | null |
res/scripts/client/gui/mods/gambiter/flash.py
|
chipsi007/GUIFlash
|
78d711d336cf55d73e62d0ab996fc18bbfbd893f
|
[
"MIT"
] | 2
|
2017-12-25T23:01:46.000Z
|
2021-05-25T20:36:36.000Z
|
# -*- coding: utf-8 -*-
__all__ = ['COMPONENT_TYPE', 'COMPONENT_ALIGN', 'COMPONENT_EVENT']
import Event
import BattleReplay
import json, codecs
from gui.app_loader import g_appLoader
from gui.app_loader.settings import GUI_GLOBAL_SPACE_ID as SPACE_ID
from gui.shared import g_eventBus, events, EVENT_BUS_SCOPE
from gui.Scaleform.framework.entities.View import View
from gui.Scaleform.framework.managers.loaders import ViewLoadParams
from gui.Scaleform.framework import g_entitiesFactories, ViewSettings, ViewTypes, ScopeTemplates
from utils import LOG_NOTE, LOG_DEBUG, LOG_ERROR
class CONSTANTS(object):
FILE_NAME = 'GUIFlash.swf'
VIEW_ALIAS = 'GUIFlash'
class COMPONENT_TYPE(object):
PANEL = 'Panel'
LABEL = 'Label'
IMAGE = 'Image'
SHAPE = 'Shape'
ALL_COMPONENT_TYPES = (COMPONENT_TYPE.PANEL, COMPONENT_TYPE.LABEL, COMPONENT_TYPE.IMAGE, COMPONENT_TYPE.SHAPE)
class COMPONENT_ALIGN(object):
LEFT = 'left'
RIGHT = 'right'
CENTER = 'center'
TOP = "top"
BOTTOM = 'bottom'
class COMPONENT_STATE(object):
INIT = 1
LOAD = 2
UNLOAD = 3
class COMPONENT_EVENT(object):
LOADED = Event.Event()
UPDATED = Event.Event()
UNLOADED = Event.Event()
class Cache(object):
def __init__(self):
self.components = {}
def create(self, alias, type, props):
self.components[alias] = {'type': type, 'props': props}
def update(self, alias, props):
self.components[alias].get('props').update(props)
def delete(self, alias):
del self.components[alias]
def isComponent(self, alias):
return alias in self.components
def getComponent(self, alias=None):
if alias is None:
return self.components
return self.components.get(alias)
def getKeys(self):
return sorted(self.components.keys())
def getCustomizedType(self, type):
return ''.join(type.split()).capitalize()
def isTypeValid(self, type):
return type in ALL_COMPONENT_TYPES
# ..
def readConfig(self, file):
LOG_DEBUG('GUIFlash :', 'Read config from file "%s".' % file)
with open(file, "r") as file:
data = json.load(file)
return data
# ..
def saveConfig(self, file, data):
LOG_DEBUG('GUIFlash :', 'Save config in file "%s".' % file)
with open(file, 'wb') as file:
json.dump(data, codecs.getwriter('utf-8')(file), indent=4, sort_keys=True, ensure_ascii=False)
class Views(object):
def __init__(self):
self.ui = None
def createAll(self):
for alias in g_guiCache.getKeys():
component = g_guiCache.getComponent(alias)
self.create(alias, component.get('type'), component.get('props'))
def create(self, alias, type, props):
if self.ui is not None:
self.ui.as_createS(alias, type, props)
def update(self, alias, props):
if self.ui is not None:
self.ui.as_updateS(alias, props)
def delete(self, alias):
if self.ui is not None:
self.ui.as_deleteS(alias)
def cursor(self, isShow):
if self.ui is not None:
self.ui.as_cursorS(isShow)
def fullStats(self, isShow):
if self.ui is not None:
self.ui.as_fullStatsS(isShow)
def radialMenu(self, isShow):
if self.ui is not None:
self.ui.as_radialMenuS(isShow)
class Hooks(object):
def _start(self):
g_appLoader.onGUISpaceEntered += self.__onGUISpaceEntered
g_appLoader.onGUISpaceLeft += self.__onGUISpaceLeft
def _destroy(self):
g_appLoader.onGUISpaceEntered -= self.__onGUISpaceEntered
g_appLoader.onGUISpaceLeft -= self.__onGUISpaceLeft
def _populate(self):
g_eventBus.addListener(events.GameEvent.SHOW_CURSOR, self.__handleShowCursor, EVENT_BUS_SCOPE.GLOBAL)
g_eventBus.addListener(events.GameEvent.HIDE_CURSOR, self.__handleHideCursor, EVENT_BUS_SCOPE.GLOBAL)
g_eventBus.addListener(events.GameEvent.RADIAL_MENU_CMD, self.__toggleRadialMenu, scope=EVENT_BUS_SCOPE.BATTLE)
g_eventBus.addListener(events.GameEvent.FULL_STATS, self.__toggleFullStats, scope=EVENT_BUS_SCOPE.BATTLE)
def _dispose(self):
g_eventBus.removeListener(events.GameEvent.SHOW_CURSOR, self.__handleShowCursor, EVENT_BUS_SCOPE.GLOBAL)
g_eventBus.removeListener(events.GameEvent.HIDE_CURSOR, self.__handleHideCursor, EVENT_BUS_SCOPE.GLOBAL)
g_eventBus.removeListener(events.GameEvent.RADIAL_MENU_CMD, self.__toggleRadialMenu,
scope=EVENT_BUS_SCOPE.BATTLE)
g_eventBus.removeListener(events.GameEvent.FULL_STATS, self.__toggleFullStats, scope=EVENT_BUS_SCOPE.BATTLE)
def __onGUISpaceEntered(self, spaceID):
if spaceID == SPACE_ID.LOGIN:
g_guiEvents.goToLogin()
elif spaceID == SPACE_ID.LOBBY:
g_guiEvents.goToLobby()
elif spaceID == SPACE_ID.BATTLE_LOADING:
g_guiEvents.goToBattleLoading()
elif spaceID == SPACE_ID.BATTLE:
g_guiEvents.goToBattle()
def __onGUISpaceLeft(self, spaceID):
if spaceID == SPACE_ID.LOBBY:
g_guiEvents.leaveLobby()
elif spaceID == SPACE_ID.BATTLE:
g_guiEvents.leaveBattle()
def __handleShowCursor(self, _):
isShow = True
g_guiEvents.toggleCursor(isShow)
def __handleHideCursor(self, _):
isShow = False
g_guiEvents.toggleCursor(isShow)
def __toggleFullStats(self, event):
isDown = event.ctx['isDown']
g_guiEvents.toggleFullStats(isDown)
def __toggleRadialMenu(self, event):
if BattleReplay.isPlaying():
return
isDown = event.ctx['isDown']
g_guiEvents.toggleRadialMenu(isDown)
class Events(object):
def goToLogin(self):
pass
def goToLobby(self):
pass
def goToBattleLoading(self):
pass
def goToBattle(self):
g_appLoader.getApp().loadView(ViewLoadParams(CONSTANTS.VIEW_ALIAS))
def leaveLobby(self):
pass
def leaveBattle(self):
pass
def toggleCursor(self, isShow):
g_guiViews.cursor(isShow)
def toggleFullStats(self, isShow):
g_guiViews.fullStats(isShow)
def toggleRadialMenu(self, isShow):
g_guiViews.radialMenu(isShow)
class Settings(object):
def _start(self):
g_entitiesFactories.addSettings(
ViewSettings(CONSTANTS.VIEW_ALIAS, Flash_UI, CONSTANTS.FILE_NAME, ViewTypes.WINDOW, None,
ScopeTemplates.GLOBAL_SCOPE))
def _destroy(self):
g_entitiesFactories.removeSettings(CONSTANTS.VIEW_ALIAS)
class Flash_Meta(View):
def py_log(self, *args):
self._printOverrideError('py_log')
def py_update(self, alias, props):
self._printOverrideError('py_update')
def as_createS(self, alias, type, props):
if self._isDAAPIInited():
return self.flashObject.as_create(alias, type, props)
def as_updateS(self, alias, props):
if self._isDAAPIInited():
return self.flashObject.as_update(alias, props)
def as_deleteS(self, alias):
if self._isDAAPIInited():
return self.flashObject.as_delete(alias)
def as_cursorS(self, isShow):
if self._isDAAPIInited():
return self.flashObject.as_cursor(isShow)
def as_fullStatsS(self, isShow):
if self._isDAAPIInited():
return self.flashObject.as_fullStats(isShow)
def as_radialMenuS(self, isShow):
if self._isDAAPIInited():
return self.flashObject.as_radialMenu(isShow)
class Flash_UI(Flash_Meta):
def _populate(self):
super(Flash_UI, self)._populate()
g_guiHooks._populate()
g_guiViews.ui = self
g_guiViews.createAll()
def _dispose(self):
g_guiViews.ui = None
g_guiHooks._dispose()
super(Flash_UI, self)._dispose()
def py_log(self, *args):
LOG_NOTE('GUIFlash :', *args)
def py_update(self, alias, props):
g_guiCache.update(alias, props.toDict())
COMPONENT_EVENT.UPDATED(alias, props.toDict())
class GUIFlash(object):
def __init__(self):
g_guiSettings._start()
g_guiHooks._start()
def __del__(self):
g_guiHooks._destroy()
g_guiSettings._destroy()
def createComponent(self, alias, type, props=None):
if not g_guiCache.isComponent(alias):
type = g_guiCache.getCustomizedType(type)
if g_guiCache.isTypeValid(type):
g_guiCache.create(alias, type, props)
g_guiViews.create(alias, type, props)
else:
LOG_ERROR('GUIFlash :', 'Invalid type of component "%s"!' % alias)
else:
LOG_ERROR('GUIFlash :', 'Component "%s" already exists!' % alias)
def updateComponent(self, alias, props):
if g_guiCache.isComponent(alias):
g_guiCache.update(alias, props)
g_guiViews.update(alias, props)
else:
LOG_ERROR('GUIFlash :', 'Component "%s" not found!' % alias)
def deleteComponent(self, alias):
if g_guiCache.isComponent(alias):
g_guiCache.delete(alias)
g_guiViews.delete(alias)
else:
LOG_ERROR('GUIFlash :', 'Component "%s" not found!' % alias)
g_guiCache = Cache()
g_guiViews = Views()
g_guiHooks = Hooks()
g_guiEvents = Events()
g_guiSettings = Settings()
| 29.924528
| 119
| 0.656263
|
3890e8628d3618c460d7317002d62cf12b3c5e0c
| 592
|
py
|
Python
|
setup.py
|
fmalina/emails
|
9bb467433e9ad8c8109d76edc894eaaaa309466d
|
[
"BSD-3-Clause"
] | 4
|
2015-04-02T11:59:32.000Z
|
2017-07-08T21:33:11.000Z
|
setup.py
|
fmalina/django-emails
|
66f22c10e433620693d4fee67b5a49f0aecb7ea1
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
fmalina/django-emails
|
66f22c10e433620693d4fee67b5a49f0aecb7ea1
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup, find_packages
import emails
setup(
name='emails',
version=emails.__version__,
description='Mailing and processing for high volume senders and recipients.',
long_description=open('README.rst').read(),
license='BSD License',
platforms=['OS Independent'],
keywords='email,dkim,imap,subscriptions,bounces',
author='fmalina',
author_email='fmalina@gmail.com',
url='https://github.com/fmalina/emails',
packages=find_packages(),
include_package_data=True,
install_requires=open('requirements.txt').read().split(),
)
| 31.157895
| 81
| 0.717905
|
b3233141dae445b78877b25ad51adeea65a91865
| 1,400
|
py
|
Python
|
xl2code/writers/java_writer.py
|
youlanhai/ExcelToCode
|
d160c75b9b7a305f4b3367d85ee0550572869d3e
|
[
"MIT"
] | 47
|
2017-06-23T07:47:50.000Z
|
2022-03-07T22:36:19.000Z
|
xl2code/writers/java_writer.py
|
twjitm/ExcelToCode
|
d160c75b9b7a305f4b3367d85ee0550572869d3e
|
[
"MIT"
] | 1
|
2019-03-12T06:12:50.000Z
|
2019-04-03T00:50:01.000Z
|
xl2code/writers/java_writer.py
|
twjitm/ExcelToCode
|
d160c75b9b7a305f4b3367d85ee0550572869d3e
|
[
"MIT"
] | 23
|
2017-05-12T07:46:07.000Z
|
2022-01-22T03:19:50.000Z
|
# -*- coding: utf-8 -*-
from copy import copy
from json_writer import JsonWriter
import util
# 当前Writer的功能是生成java专用的json格式,而不是java代码
# json格式:
# 整体是一个字典,包含两个元素,header和body
# header有两行:
# 第一行是表头
# 第二行是列名
# body是一个二维数组:
# 对应了excel的各个单元
class JavaWriter(JsonWriter):
def begin_write(self):
super(JavaWriter, self).begin_write()
module_info = self.data_module.info
parser_name = module_info["parser"].split('.')[-1]
class_name = util.to_class_name(parser_name)
self.write_value("class", class_name)
self.is_multi_key = module_info["multi_key"]
self.write_value("multiKey", self.is_multi_key)
sheet_types = module_info["sheet_types"]["main_sheet"]
fields = sheet_types.keys()
fields.sort()
self.fields = fields
texts = [sheet_types[field][2] for field in fields]
headers = [texts, fields, ]
self.write_value("header", headers, 2)
def write_sheet(self, name, sheet):
if name != "main_sheet": return
max_indent = self.max_indent
if self.is_multi_key: max_indent += 1
key_field = self.fields[0]
body = []
keys = sheet.keys()
keys.sort()
for k in keys:
row = sheet[k]
new_row = None
if isinstance(row, list):
new_row = []
for sub_row in row:
new_row.append(sub_row)
else:
new_row = row
body.append(new_row)
self.write_value("body", body, max_indent)
def write_module(self, module): pass
| 20.588235
| 56
| 0.69
|
d74eb42e1b80dce77792a325ffce3f2aa0eb34a9
| 2,145
|
py
|
Python
|
runtest.py
|
daxm/yfinance
|
e57ef355f827766f678cbf9c75fc81ec33709c91
|
[
"Apache-2.0"
] | null | null | null |
runtest.py
|
daxm/yfinance
|
e57ef355f827766f678cbf9c75fc81ec33709c91
|
[
"Apache-2.0"
] | null | null | null |
runtest.py
|
daxm/yfinance
|
e57ef355f827766f678cbf9c75fc81ec33709c91
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Yahoo! Finance market data downloader (+fix for Pandas Datareader)
# https://github.com/ranaroussi/yfinance
"""
Sanity check for most common library uses all working
- Stock: Microsoft
- ETF: Russell 2000 Growth
- Mutual fund: Vanguard 500 Index fund
- Index: S&P500
- Currency BTC-USD
"""
from __future__ import print_function
import yfinance as yf
import typing
def test_yfinance():
for symbol in ['MSFT', 'IWO', 'VFINX', '^GSPC', 'BTC-USD']:
print(">>", symbol, end=' ... ')
ticker = yf.Ticker(symbol)
# always should have info and history for valid symbols
assert(ticker.info is not None and ticker.info != {})
assert(ticker.history(period="max").empty is False)
# following should always gracefully handled, no crashes
ticker.cashflow
ticker.balance_sheet
ticker.financials
ticker.sustainability
ticker.major_holders
ticker.institutional_holders
ticker.mutualfund_holders
print("OK")
# Ford has no institutional investors table or mutual fund holders
ticker = yf.Ticker('F')
print(">> F", end=" ... ")
assert(ticker.info is not None and ticker.info != {})
assert(ticker.major_holders is not None)
assert(ticker.institutional_holders is not None) # Now has institutional investors.
print("OK")
# NKLA has no institutional investors table or mutual fund holders
ticker = yf.Ticker('NKLA')
print(">> NKLA", end=" ... ")
assert(ticker.info is not None and ticker.info != {})
assert(ticker.major_holders is not None)
assert(ticker.institutional_holders is not None) # Now has institutional investors.
print("OK")
# NKLA has no institutional investors table or mutual fund holders
ticker = yf.Ticker('NESN.SW')
print(">> NESN.SW", end=" ... ")
assert(ticker.info is not None and ticker.info != {})
assert(ticker.major_holders is not None)
assert(ticker.institutional_holders is not None) # Now has institutional investors.
print("OK")
if __name__ == "__main__":
test_yfinance()
| 32.014925
| 88
| 0.670396
|
e4a12d3f1ae69f02d18c532deb42ff5e7c374daf
| 274
|
py
|
Python
|
tests/conftest.py
|
unt-libraries/aubrey-transcription
|
0383d67a0ed3f3ddaa306edfb09b2da1364e4178
|
[
"BSD-3-Clause"
] | null | null | null |
tests/conftest.py
|
unt-libraries/aubrey-transcription
|
0383d67a0ed3f3ddaa306edfb09b2da1364e4178
|
[
"BSD-3-Clause"
] | 8
|
2018-08-22T19:14:38.000Z
|
2019-11-22T17:12:56.000Z
|
tests/conftest.py
|
unt-libraries/aubrey-transcription
|
0383d67a0ed3f3ddaa306edfb09b2da1364e4178
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from aubrey_transcription import create_app
@pytest.fixture
def app():
app = create_app({'TESTING': True})
yield app
@pytest.fixture
def client(app):
return app.test_client()
@pytest.fixture
def runner(app):
return app.test_cli_runner()
| 13.7
| 43
| 0.718978
|
7211ad9c3265f996c1b54e4160f286df0f05c9b6
| 3,142
|
py
|
Python
|
app/users.py
|
viraajpunia/Mini-Amazon
|
666417ce697c745f25c3ce978161483943a66ae0
|
[
"MIT"
] | null | null | null |
app/users.py
|
viraajpunia/Mini-Amazon
|
666417ce697c745f25c3ce978161483943a66ae0
|
[
"MIT"
] | 1
|
2021-12-08T23:16:38.000Z
|
2021-12-08T23:16:38.000Z
|
app/users.py
|
viraajpunia/Mini-Amazon
|
666417ce697c745f25c3ce978161483943a66ae0
|
[
"MIT"
] | null | null | null |
from flask import render_template, redirect, url_for, flash, request
from werkzeug.urls import url_parse
from flask_login import login_user, logout_user, current_user
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
from flask_babel import _, lazy_gettext as _l
from .models.user import User
from flask import Blueprint
bp = Blueprint('users', __name__)
class LoginForm(FlaskForm):
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
remember_me = BooleanField(_l('Remember Me'))
submit = SubmitField(_l('Sign In'))
@bp.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index.index'))
form = LoginForm()
if form.validate_on_submit():
user = User.get_by_auth(form.email.data, form.password.data)
#return redirect(url_for('index.nonsellerpublicinfo',variable=user.id))
if user is None:
flash('Invalid email or password')
return redirect(url_for('users.login'))
login_user(user)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index.index')
return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
class RegistrationForm(FlaskForm):
firstname = StringField(_l('First Name'), validators=[DataRequired()])
middlename = StringField(_l('Middle Name'), validators=[DataRequired()])
lastname = StringField(_l('Last Name'), validators=[DataRequired()])
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
password2 = PasswordField(
_l('Repeat Password'), validators=[DataRequired(),
EqualTo('password')])
submit = SubmitField(_l('Register'))
address = StringField(_l('Address'), validators=[DataRequired()])
def validate_email(self, email):
if User.email_exists(email.data):
raise ValidationError(_('Already a user with this email.'))
@bp.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index.newuseracctpage'))
form = RegistrationForm()
if form.validate_on_submit():
if User.register(form.firstname.data,
form.middlename.data,
form.lastname.data,
form.email.data,
form.address.data,
form.password.data):
flash('Congratulations, you are now a registered user!')
return redirect(url_for('users.login'))
return render_template('register.html', title='Register', form=form)
@bp.route('/logout')
def logout():
logout_user()
return redirect(url_for('index.index'))
| 38.790123
| 81
| 0.662954
|
b4324cc08178e72b775618e3b83e8f7cb8d94ae8
| 1,061
|
py
|
Python
|
aoc2021/aoc/day15/dijkstras.py
|
e-jameson/aoc
|
f26196d5564a9ac8027532c276af00aaf3718c6e
|
[
"MIT"
] | null | null | null |
aoc2021/aoc/day15/dijkstras.py
|
e-jameson/aoc
|
f26196d5564a9ac8027532c276af00aaf3718c6e
|
[
"MIT"
] | null | null | null |
aoc2021/aoc/day15/dijkstras.py
|
e-jameson/aoc
|
f26196d5564a9ac8027532c276af00aaf3718c6e
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
class Graph:
def __init__(self):
self.nodes = set()
self.edges = defaultdict(list)
self.distances = {}
def add_node(self, value):
self.nodes.add(value)
def add_edge(self, from_node, to_node, distance):
self.edges[from_node].append(to_node)
self.edges[to_node].append(from_node)
self.distances[(from_node, to_node)] = distance
def dijsktra(graph, initial):
visited = {initial: 0}
path = {}
nodes = set(graph.nodes)
while nodes:
min_node = None
for node in nodes:
if node in visited:
if min_node is None:
min_node = node
elif visited[node] < visited[min_node]:
min_node = node
if min_node is None:
break
nodes.remove(min_node)
current_weight = visited[min_node]
for edge in graph.edges[min_node]:
weight = current_weight + graph.distances[(min_node, edge)]
if edge not in visited or weight < visited[edge]:
visited[edge] = weight
path[edge] = min_node
return visited, path
| 23.577778
| 65
| 0.651272
|
ea292e44581a61cc7068f9e0abb7fd0d929f3d72
| 1,020
|
py
|
Python
|
tests/sparseml/onnx/sparsification/test_info.py
|
clementpoiret/sparseml
|
8442a6ef8ba11fb02f5e51472dd68b72438539b9
|
[
"Apache-2.0"
] | 922
|
2021-02-04T17:51:54.000Z
|
2022-03-31T20:49:26.000Z
|
tests/sparseml/onnx/sparsification/test_info.py
|
clementpoiret/sparseml
|
8442a6ef8ba11fb02f5e51472dd68b72438539b9
|
[
"Apache-2.0"
] | 197
|
2021-02-04T22:17:21.000Z
|
2022-03-31T13:58:55.000Z
|
tests/sparseml/onnx/sparsification/test_info.py
|
clementpoiret/sparseml
|
8442a6ef8ba11fb02f5e51472dd68b72438539b9
|
[
"Apache-2.0"
] | 80
|
2021-02-04T22:20:14.000Z
|
2022-03-30T19:36:15.000Z
|
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sparseml.base import Framework
from sparseml.onnx.sparsification import sparsification_info
from sparseml.sparsification import sparsification_info as base_sparsification_info
def test_sparsification_info():
base_info = base_sparsification_info(Framework.onnx)
info = sparsification_info()
assert base_info == info
assert len(info.modifiers) == 0 # TODO: update once available
| 39.230769
| 83
| 0.776471
|
363674ab097633c206abddc6c3e8ca5e7fa79235
| 4,689
|
py
|
Python
|
scripts/retrieve_analytics.py
|
Carberra/analytics
|
3ad7206778c2ef36c6bb46075ba7d6f4dabd7a56
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/retrieve_analytics.py
|
Carberra/analytics
|
3ad7206778c2ef36c6bb46075ba7d6f4dabd7a56
|
[
"BSD-3-Clause"
] | 2
|
2021-01-17T20:57:09.000Z
|
2021-01-17T23:35:19.000Z
|
scripts/retrieve_analytics.py
|
Carberra/analytics
|
3ad7206778c2ef36c6bb46075ba7d6f4dabd7a56
|
[
"BSD-3-Clause"
] | 1
|
2022-01-12T12:11:04.000Z
|
2022-01-12T12:11:04.000Z
|
import argparse
import datetime as dt
import json
import os
from pathlib import Path
import google.oauth2.credentials
import google_auth_oauthlib.flow
import pandas as pd
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
DATA_PATH = Path("../data")
SECRET_FILE = DATA_PATH / "secrets.json"
API_SERVICE_NAME = "youtubeAnalytics"
API_VERSION = "v2"
SCOPES = (
"https://www.googleapis.com/auth/yt-analytics.readonly",
"https://www.googleapis.com/auth/yt-analytics-monetary.readonly",
)
METRICS = (
"views",
"redViews",
"comments",
"likes",
"dislikes",
"videosAddedToPlaylists",
"videosRemovedFromPlaylists",
"shares",
"estimatedMinutesWatched",
"estimatedRedMinutesWatched",
"averageViewDuration",
"averageViewPercentage",
"annotationClickThroughRate",
"annotationCloseRate",
"annotationImpressions",
"annotationClickableImpressions",
"annotationClosableImpressions",
"annotationClicks",
"annotationCloses",
"cardClickRate",
"cardTeaserClickRate",
"cardImpressions",
"cardTeaserImpressions",
"cardClicks",
"cardTeaserClicks",
"subscribersGained",
"subscribersLost",
# "uniques",
"estimatedRevenue",
"estimatedAdRevenue",
"grossRevenue",
"estimatedRedPartnerRevenue",
"monetizedPlaybacks",
"playbackBasedCpm",
"adImpressions",
"cpm",
)
class InvalidArgument(Exception):
pass
def get_args():
parser = argparse.ArgumentParser(
description="Get YouTube analytics reports.",
epilog=(
"You need a Google Developers project with the YouTube Analytics API enabled to run this. "
"If you don't have that, you can find instructions at https://github.com/Carberra/analytics."
),
)
parser.add_argument(
"-f",
"--filename",
default=f"analytics-{dt.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.csv",
help="The filename for the report. Defaults to 'analytics-YYYY-MM-DD-HH-MM-SS.csv'.",
)
parser.add_argument(
"-s",
"--start-date",
default="2005-02-14",
help="The start date for the report in YYYY-MM-DD format. Defaults to 2005-02-14.",
)
parser.add_argument(
"-e",
"--end-date",
default=dt.date.today().strftime("%Y-%m-%d"),
help="The start date for the report in YYYY-MM-DD format. Defaults to today.",
)
parser.add_argument(
"-m",
"--metrics",
default=",".join(METRICS),
help="A comma-seperated list of metrics to use. View the source code for a list of available metrics. Defaults to all.",
)
args = parser.parse_args()
if not args.filename.endswith((".json", ".csv")):
raise InvalidArgument("You can only save the report as a JSON or a CSV.")
args.filetype = args.filename.split(".")[-1]
lowest = dt.datetime(2005, 2, 14).date()
highest = dt.date.today()
sd = dt.datetime.strptime(args.start_date, "%Y-%m-%d").date()
ed = dt.datetime.strptime(args.end_date, "%Y-%m-%d").date()
if sd < lowest or ed < lowest:
raise InvalidArgument("You cannot set a date before 14 Feb 2005.")
if sd > dt.date.today() or ed > dt.date.today():
raise InvalidArgument("You cannot set a date in the future.")
if any(f := list(filter(lambda m: m not in METRICS, args.metrics.split(",")))):
raise InvalidArgument(f"One or more invalid metrics were passed: {','.join(f)}.")
return args
def get_service():
flow = InstalledAppFlow.from_client_secrets_file(SECRET_FILE, SCOPES)
credentials = flow.run_console()
return build(API_SERVICE_NAME, API_VERSION, credentials=credentials)
def get_analytics(service, save_func, **kwargs):
response = service.reports().query(**kwargs).execute()
save_func(response)
def as_json(response):
with open(DATA_PATH / args.filename, "w", encoding="utf-8") as f:
json.dump(response, f, indent=4, ensure_ascii=False)
def as_csv(response):
df = pd.DataFrame()
df = df.append(response["rows"])
df.columns = [c["name"] for c in response["columnHeaders"]]
df.to_csv(DATA_PATH / args.filename)
if __name__ == "__main__":
os.environ["OAUTH_INSECURE_TRANSPORT"] = "1"
args = get_args()
service = get_service()
get_analytics(
service,
{"csv": as_csv, "json": as_json}[args.filetype],
ids="channel==MINE",
startDate=args.start_date,
endDate=args.end_date,
metrics=args.metrics,
dimensions="day",
sort="day",
)
| 29.124224
| 128
| 0.653231
|
75f2b4c5cdd10db0663c8b9a0682675df75f9532
| 80,180
|
py
|
Python
|
app.py
|
Cooops/ABUpower
|
ec84f8f0177b09a09195a974357d328376df7efc
|
[
"MIT"
] | 2
|
2018-10-09T01:17:21.000Z
|
2019-05-17T11:01:46.000Z
|
app.py
|
Cooops/ABUpower
|
ec84f8f0177b09a09195a974357d328376df7efc
|
[
"MIT"
] | null | null | null |
app.py
|
Cooops/ABUpower
|
ec84f8f0177b09a09195a974357d328376df7efc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask import Flask, jsonify, request, render_template, make_response, redirect, url_for
from flask_restful import Resource, Api
from jinja2 import Template
import json
import requests
from utils.gen_utils import *
from model.build_dataframes import *
from model.build_models import *
########################
# assign global values #
########################
app = Flask(__name__)
api = Api(app)
legend = 'Index (average)'
priceLegend = 'Price (avg)'
powerLegend = 'P9 Index (average)'
dualsLegend= 'Duals Index (average)'
lengthLegend = 'Average Length (days)'
countLegend = 'Total Sold (listings)'
sumLegend = 'Cumulative Sales (gross)'
depthLegend = 'Active Listings'
spotLegend = 'Spot Price'
headers = {'Content-Type': 'text/html'}
########################################
# begin wholly-rendered rendered pages #
########################################
######################
# begin power routes #
######################
@app.route('/alpha/stats/power/<cardName>')
def renderIndividualAlphaCardPower(cardName):
modName = cardName.replace('%20', ' ').replace('-', ' ').split(' ')
wordLength = len(modName)
if wordLength > 1:
modCardName = f'Alpha {modName[0].capitalize()} {modName[1].capitalize()}'
else:
modCardName = f'Alpha {cardName.capitalize()}'
df_allDataIndividual = get_all_data_individual_general(modCardName)
cardPriceIndividual = list(df_allDataIndividual['completed_product_prices'])
cardEndIndividual = list(df_allDataIndividual['enddate'])
cardEndMonthIndividual = list(df_allDataIndividual['month'])
cardEndDayIndividual = list(df_allDataIndividual['day'])
cardTimestampIndividual = list(df_allDataIndividual['timestamp'])
cardDateIndividual = [f'{str(x).rstrip()} {float(str(y).lstrip()):.0f}' for x, y in zip(cardEndMonthIndividual, cardEndDayIndividual)]
df_allStatsIndividual = get_all_data_individual_stats(modCardName)
cardStatsAvgIndividual = list(df_allStatsIndividual['completed_product_avg'])
cardStatsLengthIndividual = list(df_allStatsIndividual['completed_product_avg_length'])
cardStatsCountIndividual = list(df_allStatsIndividual['completed_product_depth'])
cardStatsSumIndividual = list(df_allStatsIndividual['completed_product_sum'])
cardStatsTimestampIndividual = list(df_allStatsIndividual['timestamp'])
df_allActiveStatsIndividual = get_all_data_individual_stats_active(modCardName)
cardStatsActiveCountIndividual = list(df_allActiveStatsIndividual['active_product_depth'])
cardStatsActiveTimestampIndividual = list(df_allActiveStatsIndividual['timestamp'])
url = "https://abupower.com/api/alpha/power/table"
json_data = requests.get(url).json()
x = json_data['results']
price = [i['price'] for i in x if modCardName == i['nick']]
priceChange = [i['priceChange'] for i in x if modCardName == i['nick']]
count = [i['count'] for i in x if modCardName == i['nick']]
countChange = [i['countChange'] for i in x if modCardName == i['nick']]
length = [i['length'] for i in x if modCardName == i['nick']]
lengthChange = [i['lengthChange'] for i in x if modCardName == i['nick']]
cumSum = [i['cumSum'] for i in x if modCardName == i['nick']]
cumSumChange = [i['sumChange'] for i in x if modCardName == i['nick']]
periodLength = len(cardStatsTimestampIndividual)
dateRange = pd.date_range(get_timezones(periodLength), periods=periodLength).tolist()
dateRange = [i.strftime('%b. %d') for i in dateRange]
periodLengthSpot = len(cardDateIndividual)
dateRangeSpot = pd.date_range(get_timezones(periodLengthSpot), periods=periodLengthSpot).tolist()
dateRangeSpot = [i.strftime('%b. %d') for i in dateRangeSpot]
periodLengthActive = len(cardStatsActiveTimestampIndividual)
dateRangeActive = pd.date_range(get_timezones(periodLengthActive), periods=periodLengthActive).tolist()
dateRangeActive = [i.strftime('%b. %d') for i in dateRangeActive]
return make_response(render_template('individual_card.html',
priceLegend=priceLegend, lengthLegend=lengthLegend, countLegend=countLegend, sumLegend=sumLegend, spotLegend=spotLegend, depthLegend=depthLegend,
cardName=modCardName.split('MTG')[0], cardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[0], secondCardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[1].rstrip(),
cardPriceIndividual=cardPriceIndividual, cardEndIndividual=cardEndIndividual, cardDateIndividual=cardDateIndividual, cardTimestampIndividual=cardTimestampIndividual,
cardStatsAvgIndividual=cardStatsAvgIndividual, cardStatsLengthIndividual=cardStatsLengthIndividual, cardStatsCountIndividual=cardStatsCountIndividual, cardStatsSumIndividual=cardStatsSumIndividual, cardStatsTimestampIndividual=cardStatsTimestampIndividual,
cardStatsActiveCountIndividual=cardStatsActiveCountIndividual, cardStatsActiveTimestampIndividual=cardStatsActiveTimestampIndividual,
price=price, priceChange=priceChange, count=count, countChange=countChange, length=length, lengthChange=lengthChange, cumSum=cumSum, cumSumChange=cumSumChange, dateRange=dateRange, dateRangeSpot=dateRangeSpot, dateRangeActive=dateRangeActive,
), 200, headers)
@app.route('/beta/stats/power/<cardName>')
def renderIndividualBetaCardPower(cardName):
modName = cardName.replace('%20', ' ').replace('-', ' ').split(' ')
wordLength = len(modName)
if wordLength > 1:
if modName[0] == 'black':
modCardName = f'Beta {modName[0].capitalize()} {modName[1].capitalize()} MTG'
else:
modCardName = f'Beta {modName[0].capitalize()} {modName[1].capitalize()}'
else:
modCardName = f'Beta {cardName.capitalize()}'
df_allDataIndividual = get_all_data_individual_general(modCardName)
cardPriceIndividual = list(df_allDataIndividual['completed_product_prices'])
cardEndIndividual = list(df_allDataIndividual['enddate'])
cardEndMonthIndividual = list(df_allDataIndividual['month'])
cardEndDayIndividual = list(df_allDataIndividual['day'])
cardDateIndividual = [f'{str(x).rstrip()} {float(str(y).lstrip()):.0f}' for x, y in zip(cardEndMonthIndividual, cardEndDayIndividual)]
df_allStatsIndividual = get_all_data_individual_stats(modCardName)
cardStatsAvgIndividual = list(df_allStatsIndividual['completed_product_avg'])
cardStatsLengthIndividual = list(df_allStatsIndividual['completed_product_avg_length'])
cardStatsCountIndividual = list(df_allStatsIndividual['completed_product_depth'])
cardStatsSumIndividual = list(df_allStatsIndividual['completed_product_sum'])
cardStatsTimestampIndividual = list(df_allStatsIndividual['timestamp'])
df_allActiveStatsIndividual = get_all_data_individual_stats_active(modCardName)
cardStatsActiveCountIndividual = list(df_allActiveStatsIndividual['active_product_depth'])
cardStatsActiveTimestampIndividual = list(df_allActiveStatsIndividual['timestamp'])
url = "https://abupower.com/api/beta/power/table"
json_data = requests.get(url).json()
x = json_data['results']
price = [i['price'] for i in x if modCardName == i['nick']]
priceChange = [i['priceChange'] for i in x if modCardName == i['nick']]
count = [i['count'] for i in x if modCardName == i['nick']]
countChange = [i['countChange'] for i in x if modCardName == i['nick']]
length = [i['length'] for i in x if modCardName == i['nick']]
lengthChange = [i['lengthChange'] for i in x if modCardName == i['nick']]
cumSum = [i['cumSum'] for i in x if modCardName == i['nick']]
cumSumChange = [i['sumChange'] for i in x if modCardName == i['nick']]
periodLength = len(cardStatsTimestampIndividual)
dateRange = pd.date_range(get_timezones(periodLength), periods=periodLength).tolist()
dateRange = [i.strftime('%b. %d') for i in dateRange]
periodLengthSpot = len(cardDateIndividual)
dateRangeSpot = pd.date_range(get_timezones(periodLengthSpot), periods=periodLengthSpot).tolist()
dateRangeSpot = [i.strftime('%b. %d') for i in dateRangeSpot]
periodLengthActive = len(cardStatsActiveTimestampIndividual)
dateRangeActive = pd.date_range(get_timezones(periodLengthActive), periods=periodLengthActive).tolist()
dateRangeActive = [i.strftime('%b. %d') for i in dateRangeActive]
return make_response(render_template('individual_card.html',
priceLegend=priceLegend, lengthLegend=lengthLegend, countLegend=countLegend, sumLegend=sumLegend, spotLegend=spotLegend, depthLegend=depthLegend,
cardName=modCardName.split('MTG')[0], cardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[0], secondCardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[1].rstrip(),
cardPriceIndividual=cardPriceIndividual, cardEndIndividual=cardEndIndividual, cardDateIndividual=cardDateIndividual,
cardStatsAvgIndividual=cardStatsAvgIndividual, cardStatsLengthIndividual=cardStatsLengthIndividual, cardStatsCountIndividual=cardStatsCountIndividual, cardStatsSumIndividual=cardStatsSumIndividual, cardStatsTimestampIndividual=cardStatsTimestampIndividual,
cardStatsActiveCountIndividual=cardStatsActiveCountIndividual, cardStatsActiveTimestampIndividual=cardStatsActiveTimestampIndividual,
price=price, priceChange=priceChange, count=count, countChange=countChange, length=length, lengthChange=lengthChange, cumSum=cumSum, cumSumChange=cumSumChange, dateRange=dateRange, dateRangeSpot=dateRangeSpot, dateRangeActive=dateRangeActive,
), 200, headers)
@app.route('/unlimited/stats/power/<cardName>')
def renderIndividualUnlimitedCardPower(cardName):
modName = cardName.replace('%20', ' ').replace('-', ' ').split(' ')
wordLength = len(modName)
if wordLength > 1:
print(modName)
if modName[0].lower() == 'black':
modCardName = f'Unlimited {modName[0].capitalize()} {modName[1].capitalize()} MTG'
else:
modCardName = f'Unlimited {modName[0].capitalize()} {modName[1].capitalize()}'
else:
modCardName = f'Unlimited {cardName.capitalize()}'
df_allDataIndividual = get_all_data_individual_general(modCardName)
cardPriceIndividual = list(df_allDataIndividual['completed_product_prices'])
cardTimestampIndividual = list(df_allDataIndividual['timestamp'])
cardEndIndividual = list(df_allDataIndividual['enddate'])
cardEndMonthIndividual = list(df_allDataIndividual['month'])
cardEndDayIndividual = list(df_allDataIndividual['day'])
cardDateIndividual = [f'{str(x).rstrip()} {float(str(y).lstrip()):.0f}' for x, y in zip(cardEndMonthIndividual, cardEndDayIndividual)]
df_allStatsIndividual = get_all_data_individual_stats(modCardName)
cardStatsAvgIndividual = list(df_allStatsIndividual['completed_product_avg'])
cardStatsLengthIndividual = list(df_allStatsIndividual['completed_product_avg_length'])
cardStatsCountIndividual = list(df_allStatsIndividual['completed_product_depth'])
cardStatsSumIndividual = list(df_allStatsIndividual['completed_product_sum'])
cardStatsTimestampIndividual = list(df_allStatsIndividual['timestamp'])
df_allActiveStatsIndividual = get_all_data_individual_stats_active(modCardName)
cardStatsActiveCountIndividual = list(df_allActiveStatsIndividual['active_product_depth'])
cardStatsActiveTimestampIndividual = list(df_allActiveStatsIndividual['timestamp'])
url = "https://abupower.com/api/unlimited/power/table"
json_data = requests.get(url).json()
x = json_data['results']
price = [i['price'] for i in x if modCardName == i['nick']]
priceChange = [i['priceChange'] for i in x if modCardName == i['nick']]
count = [i['count'] for i in x if modCardName == i['nick']]
countChange = [i['countChange'] for i in x if modCardName == i['nick']]
length = [i['length'] for i in x if modCardName == i['nick']]
lengthChange = [i['lengthChange'] for i in x if modCardName == i['nick']]
cumSum = [i['cumSum'] for i in x if modCardName == i['nick']]
cumSumChange = [i['sumChange'] for i in x if modCardName == i['nick']]
periodLength = len(cardStatsTimestampIndividual)
dateRange = pd.date_range(get_timezones(periodLength), periods=periodLength).tolist()
dateRange = [i.strftime('%b. %d') for i in dateRange]
periodLengthSpot = len(cardDateIndividual)
dateRangeSpot = pd.date_range(get_timezones(periodLengthSpot), periods=periodLengthSpot).tolist()
dateRangeSpot = [i.strftime('%b. %d') for i in dateRangeSpot]
periodLengthActive = len(cardStatsActiveTimestampIndividual)
dateRangeActive = pd.date_range(get_timezones(periodLengthActive), periods=periodLengthActive).tolist()
dateRangeActive = [i.strftime('%b. %d') for i in dateRangeActive]
return make_response(render_template('individual_card.html',
priceLegend=priceLegend, lengthLegend=lengthLegend, countLegend=countLegend, sumLegend=sumLegend, spotLegend=spotLegend, depthLegend=depthLegend,
cardName=modCardName.split('MTG')[0], cardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[0], secondCardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[1].rstrip(),
cardPriceIndividual=cardPriceIndividual, cardEndIndividual=cardEndIndividual, cardDateIndividual=cardDateIndividual, cardTimestampIndividual=cardTimestampIndividual,
cardStatsAvgIndividual=cardStatsAvgIndividual, cardStatsLengthIndividual=cardStatsLengthIndividual, cardStatsCountIndividual=cardStatsCountIndividual, cardStatsSumIndividual=cardStatsSumIndividual, cardStatsTimestampIndividual=cardStatsTimestampIndividual,
cardStatsActiveCountIndividual=cardStatsActiveCountIndividual, cardStatsActiveTimestampIndividual=cardStatsActiveTimestampIndividual,
price=price, priceChange=priceChange, count=count, countChange=countChange, length=length, lengthChange=lengthChange, cumSum=cumSum, cumSumChange=cumSumChange, dateRange=dateRange, dateRangeSpot=dateRangeSpot, dateRangeActive=dateRangeActive,
), 200, headers)
@app.route('/collectors/stats/power/<cardName>')
def renderIndividualCollectorsCardPower(cardName):
modName = cardName.replace('%20', ' ').replace('-', ' ').split(' ')
wordLength = len(modName)
if wordLength > 1:
print(modName)
if modName[0].lower() == 'black':
modCardName = f'Collectors Edition {modName[0].capitalize()} {modName[1].capitalize()} MTG'
else:
modCardName = f'Collectors Edition {modName[0].capitalize()} {modName[1].capitalize()}'
else:
modCardName = f'Collectors Edition {cardName.capitalize()}'
df_allDataIndividual = get_all_data_individual_general(modCardName)
cardPriceIndividual = list(df_allDataIndividual['completed_product_prices'])
cardTimestampIndividual = list(df_allDataIndividual['timestamp'])
cardEndIndividual = list(df_allDataIndividual['enddate'])
cardEndMonthIndividual = list(df_allDataIndividual['month'])
cardEndDayIndividual = list(df_allDataIndividual['day'])
cardDateIndividual = [f'{str(x).rstrip()} {float(str(y).lstrip()):.0f}' for x, y in zip(cardEndMonthIndividual, cardEndDayIndividual)]
df_allStatsIndividual = get_all_data_individual_stats(modCardName)
cardStatsAvgIndividual = list(df_allStatsIndividual['completed_product_avg'])
cardStatsLengthIndividual = list(df_allStatsIndividual['completed_product_avg_length'])
cardStatsCountIndividual = list(df_allStatsIndividual['completed_product_depth'])
cardStatsSumIndividual = list(df_allStatsIndividual['completed_product_sum'])
cardStatsTimestampIndividual = list(df_allStatsIndividual['timestamp'])
df_allActiveStatsIndividual = get_all_data_individual_stats_active(modCardName)
cardStatsActiveCountIndividual = list(df_allActiveStatsIndividual['active_product_depth'])
cardStatsActiveTimestampIndividual = list(df_allActiveStatsIndividual['timestamp'])
url = "https://abupower.com/api/collectors/power/table"
json_data = requests.get(url).json()
x = json_data['results']
price = [i['price'] for i in x if modCardName == i['nick']]
priceChange = [i['priceChange'] for i in x if modCardName == i['nick']]
count = [i['count'] for i in x if modCardName == i['nick']]
countChange = [i['countChange'] for i in x if modCardName == i['nick']]
length = [i['length'] for i in x if modCardName == i['nick']]
lengthChange = [i['lengthChange'] for i in x if modCardName == i['nick']]
cumSum = [i['cumSum'] for i in x if modCardName == i['nick']]
cumSumChange = [i['sumChange'] for i in x if modCardName == i['nick']]
periodLength = len(cardStatsTimestampIndividual)
dateRange = pd.date_range(get_timezones(periodLength), periods=periodLength).tolist()
dateRange = [i.strftime('%b. %d') for i in dateRange]
periodLengthSpot = len(cardDateIndividual)
dateRangeSpot = pd.date_range(get_timezones(periodLengthSpot), periods=periodLengthSpot).tolist()
dateRangeSpot = [i.strftime('%b. %d') for i in dateRangeSpot]
periodLengthActive = len(cardStatsActiveTimestampIndividual)
dateRangeActive = pd.date_range(get_timezones(periodLengthActive), periods=periodLengthActive).tolist()
dateRangeActive = [i.strftime('%b. %d') for i in dateRangeActive]
return make_response(render_template('individual_card.html',
priceLegend=priceLegend, lengthLegend=lengthLegend, countLegend=countLegend, sumLegend=sumLegend, spotLegend=spotLegend, depthLegend=depthLegend,
cardName=modCardName.split('MTG')[0], cardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[0], secondCardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[1].rstrip(),
cardPriceIndividual=cardPriceIndividual, cardEndIndividual=cardEndIndividual, cardDateIndividual=cardDateIndividual, cardTimestampIndividual=cardTimestampIndividual,
cardStatsAvgIndividual=cardStatsAvgIndividual, cardStatsLengthIndividual=cardStatsLengthIndividual, cardStatsCountIndividual=cardStatsCountIndividual, cardStatsSumIndividual=cardStatsSumIndividual, cardStatsTimestampIndividual=cardStatsTimestampIndividual,
cardStatsActiveCountIndividual=cardStatsActiveCountIndividual, cardStatsActiveTimestampIndividual=cardStatsActiveTimestampIndividual,
price=price, priceChange=priceChange, count=count, countChange=countChange, length=length, lengthChange=lengthChange, cumSum=cumSum, cumSumChange=cumSumChange, dateRange=dateRange, dateRangeSpot=dateRangeSpot, dateRangeActive=dateRangeActive,
), 200, headers)
@app.route('/alpha/stats/duals/<cardName>')
def renderIndividualAlphaCard(cardName):
cardName = cardName.split('-')
try:
modCardName = f'Alpha {cardName[0].capitalize()} {cardName[1].capitalize()} MTG'
except:
modCardName = f'Alpha {cardName[0].capitalize()} MTG'
df_allDataIndividual = get_all_data_individual_general(modCardName)
cardPriceIndividual = list(df_allDataIndividual['completed_product_prices'])
cardEndIndividual = list(df_allDataIndividual['enddate'])
cardEndMonthIndividual = list(df_allDataIndividual['month'])
cardEndDayIndividual = list(df_allDataIndividual['day'])
cardDateIndividual = [f'{str(x).rstrip()} {float(str(y).lstrip()):.0f}' for x, y in zip(cardEndMonthIndividual, cardEndDayIndividual)]
df_allStatsIndividual = get_all_data_individual_stats(modCardName)
cardStatsAvgIndividual = list(df_allStatsIndividual['completed_product_avg'])
cardStatsLengthIndividual = list(df_allStatsIndividual['completed_product_avg_length'])
cardStatsCountIndividual = list(df_allStatsIndividual['completed_product_depth'])
cardStatsSumIndividual = list(df_allStatsIndividual['completed_product_sum'])
cardStatsTimestampIndividual = list(df_allStatsIndividual['timestamp'])
df_allActiveStatsIndividual = get_all_data_individual_stats_active(modCardName)
cardStatsActiveCountIndividual = list(df_allActiveStatsIndividual['active_product_depth'])
cardStatsActiveTimestampIndividual = list(df_allActiveStatsIndividual['timestamp'])
url = "https://abupower.com/api/alpha/duals/table"
json_data = requests.get(url).json()
x = json_data['results']
price = [i['price'] for i in x if modCardName == i['nick']]
priceChange = [i['priceChange'] for i in x if modCardName == i['nick']]
count = [i['count'] for i in x if modCardName == i['nick']]
countChange = [i['countChange'] for i in x if modCardName == i['nick']]
length = [i['length'] for i in x if modCardName == i['nick']]
lengthChange = [i['lengthChange'] for i in x if modCardName == i['nick']]
cumSum = [i['cumSum'] for i in x if modCardName == i['nick']]
cumSumChange = [i['sumChange'] for i in x if modCardName == i['nick']]
periodLength = len(cardStatsTimestampIndividual)
dateRange = pd.date_range(get_timezones(periodLength), periods=periodLength).tolist()
dateRange = [i.strftime('%b. %d') for i in dateRange]
periodLengthSpot = len(cardDateIndividual)
dateRangeSpot = pd.date_range(get_timezones(periodLengthSpot), periods=periodLengthSpot).tolist()
dateRangeSpot = [i.strftime('%b. %d') for i in dateRangeSpot]
periodLengthActive = len(cardStatsActiveTimestampIndividual)
dateRangeActive = pd.date_range(get_timezones(periodLengthActive), periods=periodLengthActive).tolist()
dateRangeActive = [i.strftime('%b. %d') for i in dateRangeActive]
return make_response(render_template('individual_card.html',
priceLegend=priceLegend, lengthLegend=lengthLegend, countLegend=countLegend, sumLegend=sumLegend, spotLegend=spotLegend, depthLegend=depthLegend,
cardName=modCardName.split('MTG')[0], cardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[0], secondCardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[1].rstrip(),
cardPriceIndividual=cardPriceIndividual, cardEndIndividual=cardEndIndividual, cardDateIndividual=cardDateIndividual,
cardStatsAvgIndividual=cardStatsAvgIndividual, cardStatsLengthIndividual=cardStatsLengthIndividual, cardStatsCountIndividual=cardStatsCountIndividual, cardStatsSumIndividual=cardStatsSumIndividual, cardStatsTimestampIndividual=cardStatsTimestampIndividual,
cardStatsActiveCountIndividual=cardStatsActiveCountIndividual, cardStatsActiveTimestampIndividual=cardStatsActiveTimestampIndividual,
price=price, priceChange=priceChange, count=count, countChange=countChange, length=length, lengthChange=lengthChange, cumSum=cumSum, cumSumChange=cumSumChange, dateRange=dateRange, dateRangeSpot=dateRangeSpot, dateRangeActive=dateRangeActive,
), 200, headers)
@app.route('/beta/stats/duals/<cardName>')
def renderIndividualBetaCard(cardName):
cardName = cardName.split('-')
try:
modCardName = f'Beta {cardName[0].capitalize()} {cardName[1].capitalize()} MTG'
except:
modCardName = f'Beta {cardName[0].capitalize()} MTG'
df_allDataIndividual = get_all_data_individual_general(modCardName)
cardPriceIndividual = list(df_allDataIndividual['completed_product_prices'])
cardEndIndividual = list(df_allDataIndividual['enddate'])
cardEndMonthIndividual = list(df_allDataIndividual['month'])
cardEndDayIndividual = list(df_allDataIndividual['day'])
cardDateIndividual = [f'{str(x).rstrip()} {float(str(y).lstrip()):.0f}' for x, y in zip(cardEndMonthIndividual, cardEndDayIndividual)]
df_allStatsIndividual = get_all_data_individual_stats(modCardName)
cardStatsAvgIndividual = list(df_allStatsIndividual['completed_product_avg'])
cardStatsLengthIndividual = list(df_allStatsIndividual['completed_product_avg_length'])
cardStatsCountIndividual = list(df_allStatsIndividual['completed_product_depth'])
cardStatsSumIndividual = list(df_allStatsIndividual['completed_product_sum'])
cardStatsTimestampIndividual = list(df_allStatsIndividual['timestamp'])
df_allActiveStatsIndividual = get_all_data_individual_stats_active(modCardName)
cardStatsActiveCountIndividual = list(df_allActiveStatsIndividual['active_product_depth'])
cardStatsActiveTimestampIndividual = list(df_allActiveStatsIndividual['timestamp'])
url = "https://abupower.com/api/beta/duals/table"
json_data = requests.get(url).json()
x = json_data['results']
price = [i['price'] for i in x if modCardName == i['nick']]
priceChange = [i['priceChange'] for i in x if modCardName == i['nick']]
count = [i['count'] for i in x if modCardName == i['nick']]
countChange = [i['countChange'] for i in x if modCardName == i['nick']]
length = [i['length'] for i in x if modCardName == i['nick']]
lengthChange = [i['lengthChange'] for i in x if modCardName == i['nick']]
cumSum = [i['cumSum'] for i in x if modCardName == i['nick']]
cumSumChange = [i['sumChange'] for i in x if modCardName == i['nick']]
periodLength = len(cardStatsTimestampIndividual)
dateRange = pd.date_range(get_timezones(periodLength), periods=periodLength).tolist()
dateRange = [i.strftime('%b. %d') for i in dateRange]
periodLengthSpot = len(cardDateIndividual)
dateRangeSpot = pd.date_range(get_timezones(periodLengthSpot), periods=periodLengthSpot).tolist()
dateRangeSpot = [i.strftime('%b. %d') for i in dateRangeSpot]
periodLengthActive = len(cardStatsActiveTimestampIndividual)
dateRangeActive = pd.date_range(get_timezones(periodLengthActive), periods=periodLengthActive).tolist()
dateRangeActive = [i.strftime('%b. %d') for i in dateRangeActive]
return make_response(render_template('individual_card.html',
priceLegend=priceLegend, lengthLegend=lengthLegend, countLegend=countLegend, sumLegend=sumLegend, spotLegend=spotLegend, depthLegend=depthLegend,
cardName=modCardName.split('MTG')[0], cardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[0], secondCardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[1].rstrip(),
cardPriceIndividual=cardPriceIndividual, cardEndIndividual=cardEndIndividual, cardDateIndividual=cardDateIndividual,
cardStatsAvgIndividual=cardStatsAvgIndividual, cardStatsLengthIndividual=cardStatsLengthIndividual, cardStatsCountIndividual=cardStatsCountIndividual, cardStatsSumIndividual=cardStatsSumIndividual, cardStatsTimestampIndividual=cardStatsTimestampIndividual,
cardStatsActiveCountIndividual=cardStatsActiveCountIndividual, cardStatsActiveTimestampIndividual=cardStatsActiveTimestampIndividual,
price=price, priceChange=priceChange, count=count, countChange=countChange, length=length, lengthChange=lengthChange, cumSum=cumSum, cumSumChange=cumSumChange, dateRange=dateRange, dateRangeSpot=dateRangeSpot, dateRangeActive=dateRangeActive,
), 200, headers)
@app.route('/unlimited/stats/duals/<cardName>')
def renderIndividualUnlimitedCard(cardName):
modName = cardName.replace('%20', ' ').replace('-', ' ').split(' ')
wordLength = len(modName)
if wordLength > 1:
modCardName = f'Unlimited {modName[0].capitalize()} {modName[1].capitalize()} MTG'
else:
modCardName = f'Unlimited {modName[0].capitalize()} MTG'
df_allDataIndividual = get_all_data_individual_general(modCardName)
cardPriceIndividual = list(df_allDataIndividual['completed_product_prices'])
cardEndIndividual = list(df_allDataIndividual['enddate'])
cardEndMonthIndividual = list(df_allDataIndividual['month'])
cardEndDayIndividual = list(df_allDataIndividual['day'])
cardDateIndividual = [f'{str(x).rstrip()} {float(str(y).lstrip()):.0f}' for x, y in zip(cardEndMonthIndividual, cardEndDayIndividual)]
df_allStatsIndividual = get_all_data_individual_stats(modCardName)
cardStatsAvgIndividual = list(df_allStatsIndividual['completed_product_avg'])
cardStatsLengthIndividual = list(df_allStatsIndividual['completed_product_avg_length'])
cardStatsCountIndividual = list(df_allStatsIndividual['completed_product_depth'])
cardStatsSumIndividual = list(df_allStatsIndividual['completed_product_sum'])
cardStatsTimestampIndividual = list(df_allStatsIndividual['timestamp'])
df_allActiveStatsIndividual = get_all_data_individual_stats_active(modCardName)
cardStatsActiveCountIndividual = list(df_allActiveStatsIndividual['active_product_depth'])
cardStatsActiveTimestampIndividual = list(df_allActiveStatsIndividual['timestamp'])
url = "https://abupower.com/api/unlimited/duals/table"
json_data = requests.get(url).json()
x = json_data['results']
price = [i['price'] for i in x if modCardName == i['nick']]
priceChange = [i['priceChange'] for i in x if modCardName == i['nick']]
count = [i['count'] for i in x if modCardName == i['nick']]
countChange = [i['countChange'] for i in x if modCardName == i['nick']]
length = [i['length'] for i in x if modCardName == i['nick']]
lengthChange = [i['lengthChange'] for i in x if modCardName == i['nick']]
cumSum = [i['cumSum'] for i in x if modCardName == i['nick']]
cumSumChange = [i['sumChange'] for i in x if modCardName == i['nick']]
periodLength = len(cardStatsTimestampIndividual)
dateRange = pd.date_range(get_timezones(periodLength), periods=periodLength).tolist()
dateRange = [i.strftime('%b. %d') for i in dateRange]
periodLengthSpot = len(cardDateIndividual)
dateRangeSpot = pd.date_range(get_timezones(periodLengthSpot), periods=periodLengthSpot).tolist()
dateRangeSpot = [i.strftime('%b. %d') for i in dateRangeSpot]
periodLengthActive = len(cardStatsActiveTimestampIndividual)
dateRangeActive = pd.date_range(get_timezones(periodLengthActive), periods=periodLengthActive).tolist()
dateRangeActive = [i.strftime('%b. %d') for i in dateRangeActive]
return make_response(render_template('individual_card.html',
priceLegend=priceLegend, lengthLegend=lengthLegend, countLegend=countLegend, sumLegend=sumLegend, spotLegend=spotLegend, depthLegend=depthLegend,
cardName=modCardName.split('MTG')[0], cardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[0], secondCardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[1].rstrip(),
cardPriceIndividual=cardPriceIndividual, cardEndIndividual=cardEndIndividual, cardDateIndividual=cardDateIndividual,
cardStatsAvgIndividual=cardStatsAvgIndividual, cardStatsLengthIndividual=cardStatsLengthIndividual, cardStatsCountIndividual=cardStatsCountIndividual, cardStatsSumIndividual=cardStatsSumIndividual, cardStatsTimestampIndividual=cardStatsTimestampIndividual,
cardStatsActiveCountIndividual=cardStatsActiveCountIndividual, cardStatsActiveTimestampIndividual=cardStatsActiveTimestampIndividual,
price=price, priceChange=priceChange, count=count, countChange=countChange, length=length, lengthChange=lengthChange, cumSum=cumSum, cumSumChange=cumSumChange, dateRange=dateRange, dateRangeSpot=dateRangeSpot, dateRangeActive=dateRangeActive,
), 200, headers)
@app.route('/collectors/stats/duals/<cardName>')
def renderIndividualCollectorsCard(cardName):
modName = cardName.replace('%20', ' ').replace('-', ' ').split(' ')
wordLength = len(modName)
if wordLength > 1:
modCardName = f'Collectors Edition {modName[0].capitalize()} {modName[1].capitalize()} MTG'
else:
modCardName = f'Collectors Edition {modName[0].capitalize()} MTG'
df_allDataIndividual = get_all_data_individual_general(modCardName)
cardPriceIndividual = list(df_allDataIndividual['completed_product_prices'])
cardEndIndividual = list(df_allDataIndividual['enddate'])
cardEndMonthIndividual = list(df_allDataIndividual['month'])
cardEndDayIndividual = list(df_allDataIndividual['day'])
cardDateIndividual = [f'{str(x).rstrip()} {float(str(y).lstrip()):.0f}' for x, y in zip(cardEndMonthIndividual, cardEndDayIndividual)]
df_allStatsIndividual = get_all_data_individual_stats(modCardName)
cardStatsAvgIndividual = list(df_allStatsIndividual['completed_product_avg'])
cardStatsLengthIndividual = list(df_allStatsIndividual['completed_product_avg_length'])
cardStatsCountIndividual = list(df_allStatsIndividual['completed_product_depth'])
cardStatsSumIndividual = list(df_allStatsIndividual['completed_product_sum'])
cardStatsTimestampIndividual = list(df_allStatsIndividual['timestamp'])
df_allActiveStatsIndividual = get_all_data_individual_stats_active(modCardName)
cardStatsActiveCountIndividual = list(df_allActiveStatsIndividual['active_product_depth'])
cardStatsActiveTimestampIndividual = list(df_allActiveStatsIndividual['timestamp'])
url = "https://abupower.com/api/collectors/duals/table"
json_data = requests.get(url).json()
x = json_data['results']
price = [i['price'] for i in x if modCardName == i['nick']]
priceChange = [i['priceChange'] for i in x if modCardName == i['nick']]
count = [i['count'] for i in x if modCardName == i['nick']]
countChange = [i['countChange'] for i in x if modCardName == i['nick']]
length = [i['length'] for i in x if modCardName == i['nick']]
lengthChange = [i['lengthChange'] for i in x if modCardName == i['nick']]
cumSum = [i['cumSum'] for i in x if modCardName == i['nick']]
cumSumChange = [i['sumChange'] for i in x if modCardName == i['nick']]
periodLength = len(cardStatsTimestampIndividual)
dateRange = pd.date_range(get_timezones(periodLength), periods=periodLength).tolist()
dateRange = [i.strftime('%b. %d') for i in dateRange]
periodLengthSpot = len(cardDateIndividual)
dateRangeSpot = pd.date_range(get_timezones(periodLengthSpot), periods=periodLengthSpot).tolist()
dateRangeSpot = [i.strftime('%b. %d') for i in dateRangeSpot]
periodLengthActive = len(cardStatsActiveTimestampIndividual)
dateRangeActive = pd.date_range(get_timezones(periodLengthActive), periods=periodLengthActive).tolist()
dateRangeActive = [i.strftime('%b. %d') for i in dateRangeActive]
return make_response(render_template('individual_card.html',
priceLegend=priceLegend, lengthLegend=lengthLegend, countLegend=countLegend, sumLegend=sumLegend, spotLegend=spotLegend, depthLegend=depthLegend,
cardName=modCardName.split('MTG')[0], cardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[0], secondCardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[1].rstrip(),
cardPriceIndividual=cardPriceIndividual, cardEndIndividual=cardEndIndividual, cardDateIndividual=cardDateIndividual,
cardStatsAvgIndividual=cardStatsAvgIndividual, cardStatsLengthIndividual=cardStatsLengthIndividual, cardStatsCountIndividual=cardStatsCountIndividual, cardStatsSumIndividual=cardStatsSumIndividual, cardStatsTimestampIndividual=cardStatsTimestampIndividual,
cardStatsActiveCountIndividual=cardStatsActiveCountIndividual, cardStatsActiveTimestampIndividual=cardStatsActiveTimestampIndividual,
price=price, priceChange=priceChange, count=count, countChange=countChange, length=length, lengthChange=lengthChange, cumSum=cumSum, cumSumChange=cumSumChange, dateRange=dateRange, dateRangeSpot=dateRangeSpot, dateRangeActive=dateRangeActive,
), 200, headers)
@app.route('/revised/stats/duals/<cardName>')
def renderIndividualRevisedCard(cardName):
modName = cardName.replace('%20', ' ').replace('-', ' ').split(' ')
wordLength = len(modName)
if wordLength > 1:
modCardName = f'Revised {modName[0].capitalize()} {modName[1].capitalize()} MTG'
else:
modCardName = f'Revised {cardName.capitalize()} MTG'
df_allDataIndividual = get_all_data_individual_general(modCardName)
cardPriceIndividual = list(df_allDataIndividual['completed_product_prices'])
cardEndIndividual = list(df_allDataIndividual['enddate'])
cardEndMonthIndividual = list(df_allDataIndividual['month'])
cardEndDayIndividual = list(df_allDataIndividual['day'])
cardDateIndividual = [f'{str(x).rstrip()} {float(str(y).lstrip()):.0f}' for x, y in zip(cardEndMonthIndividual, cardEndDayIndividual)]
df_allStatsIndividual = get_all_data_individual_stats(modCardName)
cardStatsAvgIndividual = list(df_allStatsIndividual['completed_product_avg'])
cardStatsLengthIndividual = list(df_allStatsIndividual['completed_product_avg_length'])
cardStatsCountIndividual = list(df_allStatsIndividual['completed_product_depth'])
cardStatsSumIndividual = list(df_allStatsIndividual['completed_product_sum'])
cardStatsTimestampIndividual = list(df_allStatsIndividual['timestamp'])
df_allActiveStatsIndividual = get_all_data_individual_stats_active(modCardName)
cardStatsActiveCountIndividual = list(df_allActiveStatsIndividual['active_product_depth'])
cardStatsActiveTimestampIndividual = list(df_allActiveStatsIndividual['timestamp'])
url = "https://abupower.com/api/revised/duals/table"
json_data = requests.get(url).json()
x = json_data['results']
price = [i['price'] for i in x if modCardName == i['nick']]
priceChange = [i['priceChange'] for i in x if modCardName == i['nick']]
count = [i['count'] for i in x if modCardName == i['nick']]
countChange = [i['countChange'] for i in x if modCardName == i['nick']]
length = [i['length'] for i in x if modCardName == i['nick']]
lengthChange = [i['lengthChange'] for i in x if modCardName == i['nick']]
cumSum = [i['cumSum'] for i in x if modCardName == i['nick']]
cumSumChange = [i['sumChange'] for i in x if modCardName == i['nick']]
periodLength = len(cardStatsTimestampIndividual)
dateRange = pd.date_range(get_timezones(periodLength), periods=periodLength).tolist()
dateRange = [i.strftime('%b. %d') for i in dateRange]
periodLengthSpot = len(cardDateIndividual)
dateRangeSpot = pd.date_range(get_timezones(periodLengthSpot), periods=periodLengthSpot).tolist()
dateRangeSpot = [i.strftime('%b. %d') for i in dateRangeSpot]
periodLengthActive = len(cardStatsActiveTimestampIndividual)
dateRangeActive = pd.date_range(get_timezones(periodLengthActive), periods=periodLengthActive).tolist()
dateRangeActive = [i.strftime('%b. %d') for i in dateRangeActive]
return make_response(render_template('individual_card.html',
priceLegend=priceLegend, lengthLegend=lengthLegend, countLegend=countLegend, sumLegend=sumLegend, spotLegend=spotLegend, depthLegend=depthLegend,
cardName=modCardName.split('MTG')[0], cardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[0], secondCardNameIndividual=modCardName.split('MTG')[0].split(' ', 1)[1].rstrip(),
cardPriceIndividual=cardPriceIndividual, cardEndIndividual=cardEndIndividual, cardDateIndividual=cardDateIndividual,
cardStatsAvgIndividual=cardStatsAvgIndividual, cardStatsLengthIndividual=cardStatsLengthIndividual, cardStatsCountIndividual=cardStatsCountIndividual, cardStatsSumIndividual=cardStatsSumIndividual, cardStatsTimestampIndividual=cardStatsTimestampIndividual,
cardStatsActiveCountIndividual=cardStatsActiveCountIndividual, cardStatsActiveTimestampIndividual=cardStatsActiveTimestampIndividual,
price=price, priceChange=priceChange, count=count, countChange=countChange, length=length, lengthChange=lengthChange, cumSum=cumSum, cumSumChange=cumSumChange, dateRange=dateRange, dateRangeSpot=dateRangeSpot, dateRangeActive=dateRangeActive,
), 200, headers)
###################################
# begin individual query endpoint #
###################################
@app.route('/location/')
def location():
headers = {'Content-Type': 'text/html'}
query = request.args.get('search')
if len(query) > 1:
try:
set = query.split(' ')[0].lower()
cardName = query.split(' ', 1)[1].title() # split only on the first occurence (to avoid filter words with spaces, e.g. Black Lotus)
if cardName in ('Tundra', 'Underground Sea', 'Badlands', 'Taiga', 'Savannah', 'Scrubland', 'Volcanic Island', 'Bayou', 'Plateau', 'Tropical Island'):
return redirect(url_for(f'renderIndividual{set.capitalize()}Card', cardName=cardName))
elif cardName in ('Black Lotus', 'Mox Jet', 'Mox Ruby', 'Mox Emerald', 'Mox Sapphire', 'Mox Pearl', 'Timetwister', 'Time Walk', 'Ancestral Recall'):
return redirect(url_for(f'renderIndividual{set.capitalize()}CardPower', cardName=cardName))
except:
return make_response(render_template('404.html'), 404, headers)
else:
return make_response(render_template('404.html'), 404, headers)
########################################
# begin email sign-up and other POST's #
########################################
@app.route('/email', methods=['POST'])
def email():
# TODO: add proper form handling, insert into db, etc etc @ 10/6/2018
# deal with inserted emails here...insert into db properly
print('logged email')
data = request.values
file = open("emails.txt", "a")
file.write(f"{data['name']}: '{data['email']}'\n")
file.close()
return redirect(url_for('homepage'))
###################################
# begin wholly-rendered endpoints #
###################################
# begin general endpoints
class HomePage(Resource):
def __init__(self):
pass
def get(self):
# TODO: if we feed in, it's rendered with the page and thus only loaded in `once`.
# if we want the data to update/etc, we should use ajax calls to our rest api.
return make_response(render_template('home.html',
get_cumulative_power=get_cumulative_power(), get_cumulative_count_power=get_cumulative_count_power(),
get_cumulative_duals=get_cumulative_duals(), get_cumulative_count_duals=get_cumulative_count_duals()),
200, headers)
class Active(Resource):
def __init__(self):
pass
def get(self):
return make_response(render_template('active.html'), 200, headers)
class About(Resource):
def __init__(self):
pass
def get(self):
return make_response(render_template('about.html'), 200, headers)
class Footer(Resource):
def __init__(self):
pass
def get(self):
return make_response(render_template('footer.html'), 200, headers)
class GeneralIndexAverage(Resource):
def __init__(self):
pass
def get(self):
try:
return jsonify({'results': {'alpha': calc_index_avg()[0], 'beta': calc_index_avg()[1], 'unlimited': calc_index_avg()[2], 'timestamp': calc_index_avg()[3]}})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class Alpha(Resource):
def __init__(self):
pass
def get(self):
maxHeight = round(max(alphaDataCountPower)) + 1
minHeight = round(min(alphaDataCountPower)) - 10
maxHeightLength = round(max(alphaDataLengthPower)) + 2
minHeightLength = round(min(alphaDataLengthPower)) - 10
alphaDate = alphaDataLengthTimestampPower
periodLength = len(alphaDate)
dateRange = pd.date_range(get_timezones(periodLength), periods=periodLength).tolist()
dateRange = [i.strftime('%b. %d') for i in dateRange]
alphaDateDuals = alphaDataLengthTimestampDuals
periodLengthDuals = len(alphaDateDuals)
dateRangeDuals = pd.date_range(get_timezones(periodLengthDuals), periods=periodLengthDuals).tolist()
dateRangeDuals = [i.strftime('%b. %d') for i in dateRangeDuals]
return make_response(render_template('alpha.html',
dualsLegend=dualsLegend, maxHeight=maxHeight, minHeight=minHeight, maxHeightLength=maxHeightLength, minHeightLength=minHeightLength, countLegend=countLegend, lengthLegend=lengthLegend, sumLegend=sumLegend, powerLegend=powerLegend,
dateRange=dateRange, dateRangeDuals=dateRangeDuals,
alphaDataAvgDuals=alphaDataAvgDuals, alphaDataAvgTimestampDuals=alphaDataAvgTimestampDuals,
alphaDataLengthDuals=alphaDataLengthDuals, alphaDataLengthTimestampDuals=alphaDataLengthTimestampDuals,
alphaDataCountDuals=alphaDataCountDuals, alphaDataCountTimestampDuals=alphaDataCountTimestampDuals,
alphaDataBreakdownNameDuals=alphaDataBreakdownNameDuals, alphaDataBreakdownavg=alphaDataBreakdownAvgDuals,
alphaDataAllEndDuals=alphaDataAllEndDuals, alphaDataAllNameDuals=alphaDataAllNameDuals, alphaDataAllHrefDuals=alphaDataAllHrefDuals, alphaDataAllPriceDuals=alphaDataAllPriceDuals,
alphaActiveDataAllStartDuals=alphaActiveDataAllStartDuals, alphaActiveDataAllNameDuals=alphaActiveDataAllNameDuals, alphaActiveDataAllHrefDuals=alphaActiveDataAllHrefDuals, alphaActiveDataAllPriceDuals=alphaActiveDataAllPriceDuals,
get_percent_change_last_sold=get_percent_change_last_sold, get_premiums=get_premiums, get_count=get_data_single_product_count_90, get_length=get_data_single_product_avg_length_90, get_depth=get_data_single_product_depth,
alphaDataCumulativePriceDuals=alphaDataCumulativePriceDuals, alphaDataCumulativeTimestampDuals=alphaDataCumulativeTimestampDuals,
alphaDataAvgPower=alphaDataAvgPower, alphaDataAvgTimestampPower=alphaDataAvgTimestampPower,
alphaDataLengthPower=alphaDataLengthPower, alphaDataLengthTimestampPower=alphaDataLengthTimestampPower,
alphaDataCountPower=alphaDataCountPower, alphaDataCountTimestampPower=alphaDataCountTimestampPower,
alphaDataAllEndPower=alphaDataAllEndPower, alphaDataAllNamePower=alphaDataAllNamePower, alphaDataAllHrefPower=alphaDataAllHrefPower, alphaDataAllPricePower=alphaDataAllPricePower,
alphaActiveDataAllStartPower=alphaActiveDataAllStartPower, alphaActiveDataAllNamePower=alphaActiveDataAllNamePower, alphaActiveDataAllHrefPower=alphaActiveDataAllHrefPower, alphaActiveDataAllPricePower=alphaActiveDataAllPricePower,
alphaDataCumulativePricePower=alphaDataCumulativePricePower, alphaDataCumulativeTimestampPower=alphaDataCumulativeTimestampPower,
get_cumulative=get_data_alpha_cumulative_totals, get_active_depth=get_data_active_index_count_sum), 200, headers)
class AlphaPowerTable(Resource):
def __init__(self):
pass
def get(self):
alphaPowerTable = clean_to_json(df_alphaStatsPower, 'indexTable')
alphaPowerTableJSON = json.loads(alphaPowerTable)
try:
return jsonify({'results': alphaPowerTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class AlphaPowerIndexAverage(Resource):
def __init__(self):
pass
def get(self):
alphaPowerIndexAvg = clean_to_json(df_alphaAvgAllPower, 'avg')
alphaPowerIndexAvgJSON = json.loads(alphaPowerIndexAvg)
try:
return jsonify({'results': alphaPowerIndexAvgJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class AlphaPowerActive(Resource):
def __init__(self):
pass
def get(self):
alphaPowerActive = clean_to_json(df_alphaActiveAllPower, 'active')
alphaPowerActiveJSON = json.loads(alphaPowerActive)
try:
return jsonify({'results': alphaPowerActiveJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class AlphaDualsTable(Resource):
def __init__(self):
pass
def get(self):
alphaDualsTable = clean_to_json(df_alphaStatsDuals, 'indexTable')
alphaDualsTableJSON = json.loads(alphaDualsTable)
try:
return jsonify({'results': alphaDualsTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class AlphaDualsIndividualCardCompletedStats(Resource):
def __init__(self):
pass
def get(self, name):
modName = name.replace('%20', ' ').replace('-', ' ').split(' ')
wordLength = len(modName)
if wordLength > 1:
modCardName = f'Alpha {modName[0].capitalize()} {modName[1].capitalize()} MTG'
else:
modCardName = f'Alpha {name.capitalize()} MTG'
df_alphaIndividualCardCompletedStats = get_all_data_individual_general(modCardName)
alphaDualsIndividualTable = clean_to_json(df_alphaIndividualCardCompletedStats, 'table')
alphaDualsIndividualTableJSON = json.loads(alphaDualsIndividualTable)
try:
return jsonify({'results': alphaDualsIndividualTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class AlphaPowerIndividualCardCompletedStats(Resource):
def __init__(self):
pass
def get(self, name):
modName = name.replace('%20', ' ').replace('-', ' ').split(' ')
wordLength = len(modName)
if wordLength > 1:
modCardName = f'Alpha {modName[0].capitalize()} {modName[1].capitalize()}'
else:
modCardName = f'Alpha {name.capitalize()}'
df_alphaIndividualCardCompletedStats = get_all_data_individual_general(modCardName)
alphaPowerIndividualTable = clean_to_json(df_alphaIndividualCardCompletedStats, 'table')
alphaPowerIndividualTableJSON = json.loads(alphaPowerIndividualTable)
try:
return jsonify({'results': alphaPowerIndividualTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class Beta(Resource):
def __init__(self):
pass
def get(self):
maxHeight = round(max(betaDataCountPower)) + 1
minHeight = round(min(betaDataCountPower)) - 10
maxHeightLength = round(max(betaDataLengthPower)) + 2
minHeightLength = round(min(betaDataLengthPower)) - 10
betaDate = betaDataLengthTimestampPower
periodLength = len(betaDate)
dateRange = pd.date_range(get_timezones(periodLength), periods=periodLength).tolist()
dateRange = [i.strftime('%b. %d') for i in dateRange]
betaDateDuals = betaDataLengthTimestampDuals
periodLengthDuals = len(betaDateDuals)
dateRangeDuals = pd.date_range(get_timezones(periodLengthDuals), periods=periodLengthDuals).tolist()
dateRangeDuals = [i.strftime('%b. %d') for i in dateRangeDuals]
return make_response(render_template('beta.html',
dualsLegend=dualsLegend, maxHeight=maxHeight, minHeight=minHeight, maxHeightLength=maxHeightLength, minHeightLength=minHeightLength, countLegend=countLegend, lengthLegend=lengthLegend, sumLegend=sumLegend, powerLegend=powerLegend,
dateRange=dateRange, dateRangeDuals=dateRangeDuals,
betaDataAvgDuals=betaDataAvgDuals, betaDataAvgTimestampDuals=betaDataAvgTimestampDuals,
betaDataLengthDuals=betaDataLengthDuals, betaDataLengthTimestampDuals=betaDataLengthTimestampDuals,
betaDataCountDuals=betaDataCountDuals, betaDataCountTimestampDuals=betaDataCountTimestampDuals,
betaDataBreakdownNameDuals=betaDataBreakdownNameDuals, betaDataBreakdownavg=betaDataBreakdownAvgDuals,
betaDataAllEndDuals=betaDataAllEndDuals, betaDataAllNameDuals=betaDataAllNameDuals, betaDataAllHrefDuals=betaDataAllHrefDuals, betaDataAllPriceDuals=betaDataAllPriceDuals,
betaActiveDataAllStartDuals=betaActiveDataAllStartDuals, betaActiveDataAllNameDuals=betaActiveDataAllNameDuals, betaActiveDataAllHrefDuals=betaActiveDataAllHrefDuals, betaActiveDataAllPriceDuals=betaActiveDataAllPriceDuals,
get_percent_change_last_sold=get_percent_change_last_sold, get_premiums=get_premiums, get_count=get_data_single_product_count_90, get_length=get_data_single_product_avg_length_90, get_depth=get_data_single_product_depth,
betaDataCumulativePriceDuals=betaDataCumulativePriceDuals, betaDataCumulativeTimestampDuals=betaDataCumulativeTimestampDuals,
betaDataAvgPower=betaDataAvgPower, betaDataAvgTimestampPower=betaDataAvgTimestampPower,
betaDataLengthPower=betaDataLengthPower, betaDataLengthTimestampPower=betaDataLengthTimestampPower,
betaDataCountPower=betaDataCountPower, betaDataCountTimestampPower=betaDataCountTimestampPower,
betaDataAllEndPower=betaDataAllEndPower, betaDataAllNamePower=betaDataAllNamePower, betaDataAllHrefPower=betaDataAllHrefPower, betaDataAllPricePower=betaDataAllPricePower,
betaActiveDataAllStartPower=betaActiveDataAllStartPower, betaActiveDataAllNamePower=betaActiveDataAllNamePower, betaActiveDataAllHrefPower=betaActiveDataAllHrefPower, betaActiveDataAllPricePower=betaActiveDataAllPricePower,
betaDataCumulativePricePower=betaDataCumulativePricePower, betaDataCumulativeTimestampPower=betaDataCumulativeTimestampPower,
get_cumulative=get_data_beta_cumulative_totals, get_active_depth=get_data_active_index_count_sum), 200, headers)
class BetaPowerTable(Resource):
def __init__(self):
pass
def get(self):
betaPowerTable = clean_to_json(df_betaStatsPower, 'indexTable')
betaPowerTableJSON = json.loads(betaPowerTable)
try:
return jsonify({'results': betaPowerTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class BetaPowerIndexAverage(Resource):
def __init__(self):
pass
def get(self):
betaPowerIndexAvg = clean_to_json(df_betaAvgAllPower, 'avg')
betaPowerIndexAvgJSON = json.loads(betaPowerIndexAvg)
try:
return jsonify({'results': betaPowerIndexAvgJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class BetaPowerActive(Resource):
def __init__(self):
pass
def get(self):
betaPowerActive = clean_to_json(df_betaActiveAllPower, 'active')
betaPowerActiveJSON = json.loads(betaPowerActive)
try:
return jsonify({'results': betaPowerActiveJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class BetaDualsTable(Resource):
def __init__(self):
pass
def get(self):
betaDualsTable = clean_to_json(df_betaStatsDuals, 'indexTable')
betaDualsTableJSON = json.loads(betaDualsTable)
try:
return jsonify({'results': betaDualsTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class BetaDualsIndividualCardCompletedStats(Resource):
def __init__(self):
pass
def get(self, name):
modName = name.replace('%20', ' ').replace('-', ' ').split(' ')
wordLength = len(modName)
if wordLength > 1:
modCardName = f'Beta {modName[0].capitalize()} {modName[1].capitalize()} MTG'
else:
modCardName = f'Beta {name.capitalize()} MTG'
df_betaIndividualCardCompletedStats = get_all_data_individual_general(modCardName)
betaDualsIndividualTable = clean_to_json(df_betaIndividualCardCompletedStats, 'table')
betaDualsIndividualTableJSON = json.loads(betaDualsIndividualTable)
try:
return jsonify({'results': betaDualsIndividualTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class BetaPowerIndividualCardCompletedStats(Resource):
def __init__(self):
pass
def get(self, name):
modName = name.replace('%20', ' ').replace('-', ' ').split(' ')
wordLength = len(modName)
if wordLength > 1:
if modName[0] == 'black':
modCardName = f'Beta {modName[0].capitalize()} {modName[1].capitalize()} MTG'
else:
modCardName = f'Beta {modName[0].capitalize()} {modName[1].capitalize()}'
else:
modCardName = f'Beta {name.capitalize()}'
df_betaIndividualCardCompletedStats = get_all_data_individual_general(modCardName)
betaPowerIndividualTable = clean_to_json(df_betaIndividualCardCompletedStats, 'table')
betaPowerIndividualTableJSON = json.loads(betaPowerIndividualTable)
try:
return jsonify({'results': betaPowerIndividualTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
# begin unlimited API endpoints
class Unlimited(Resource):
def __init__(self):
pass
def get(self):
maxHeight = round(max(unlimitedDataCountPower)) + 1
minHeight = round(min(unlimitedDataCountPower)) - 10
maxHeightLength = round(max(unlimitedDataLengthPower)) + 2
minHeightLength = round(min(unlimitedDataLengthPower)) - 10
headers = {'Content-Type': 'text/html'}
unlimitedDate = unlimitedDataLengthTimestampPower
periodLength = len(unlimitedDate)
dateRange = pd.date_range(get_timezones(periodLength), periods=periodLength).tolist()
dateRange = [i.strftime('%b. %d') for i in dateRange]
unlimitedDateDuals = unlimitedDataLengthTimestampDuals
periodLengthDuals = len(unlimitedDateDuals)
dateRangeDuals = pd.date_range(get_timezones(periodLengthDuals), periods=periodLengthDuals).tolist()
dateRangeDuals = [i.strftime('%b. %d') for i in dateRangeDuals]
return make_response(render_template('unlimited.html',
dualsLegend=dualsLegend, maxHeight=maxHeight, minHeight=minHeight, maxHeightLength=maxHeightLength, minHeightLength=minHeightLength, countLegend=countLegend, lengthLegend=lengthLegend, sumLegend=sumLegend, powerLegend=powerLegend,
dateRange=dateRange[3::], dateRangeDuals=dateRangeDuals[7::],
# begin duals
unlimitedDataAvgDuals=unlimitedDataAvgDuals[7::], unlimitedDataAvgTimestampDuals=unlimitedDataAvgTimestampDuals,
unlimitedDataLengthDuals=unlimitedDataLengthDuals[7::], unlimitedDataLengthTimestampDuals=unlimitedDataLengthTimestampDuals,
unlimitedDataCountDuals=unlimitedDataCountDuals[7::], unlimitedDataCountTimestampDuals=unlimitedDataCountTimestampDuals,
unlimitedDataBreakdownNameDuals=unlimitedDataBreakdownNameDuals, unlimitedDataBreakdownavg=unlimitedDataBreakdownAvgDuals,
unlimitedDataAllEndDuals=unlimitedDataAllEndDuals, unlimitedDataAllNameDuals=unlimitedDataAllNameDuals, unlimitedDataAllHrefDuals=unlimitedDataAllHrefDuals, unlimitedDataAllPriceDuals=unlimitedDataAllPriceDuals,
unlimitedActiveDataAllStartDuals=unlimitedActiveDataAllStartDuals, unlimitedActiveDataAllNameDuals=unlimitedActiveDataAllNameDuals, unlimitedActiveDataAllHrefDuals=unlimitedActiveDataAllHrefDuals, unlimitedActiveDataAllPriceDuals=unlimitedActiveDataAllPriceDuals,
get_percent_change_last_sold=get_percent_change_last_sold, get_premiums=get_premiums, get_count=get_data_single_product_count_90, get_length=get_data_single_product_avg_length_90, get_depth=get_data_single_product_depth,
unlimitedDataCumulativePriceDuals=unlimitedDataCumulativePriceDuals[7::], unlimitedDataCumulativeTimestampDuals=unlimitedDataCumulativeTimestampDuals,
# begin power
unlimitedDataAvgPower=unlimitedDataAvgPower[3::], unlimitedDataAvgTimestampPower=unlimitedDataAvgTimestampPower,
unlimitedDataLengthPower=unlimitedDataLengthPower[3::], unlimitedDataLengthTimestampPower=unlimitedDataLengthTimestampPower,
unlimitedDataCountPower=unlimitedDataCountPower[3::], unlimitedDataCountTimestampPower=unlimitedDataCountTimestampPower,
unlimitedDataAllEndPower=unlimitedDataAllEndPower, unlimitedDataAllNamePower=unlimitedDataAllNamePower, unlimitedDataAllHrefPower=unlimitedDataAllHrefPower, unlimitedDataAllPricePower=unlimitedDataAllPricePower,
unlimitedActiveDataAllStartPower=unlimitedActiveDataAllStartPower, unlimitedActiveDataAllNamePower=unlimitedActiveDataAllNamePower, unlimitedActiveDataAllHrefPower=unlimitedActiveDataAllHrefPower, unlimitedActiveDataAllPricePower=unlimitedActiveDataAllPricePower,
unlimitedDataCumulativePricePower=unlimitedDataCumulativePricePower[3::], unlimitedDataCumulativeTimestampPower=unlimitedDataCumulativeTimestampPower,
get_cumulative=get_data_unlimited_cumulative_totals, get_active_depth=get_data_active_index_count_sum), 200, headers)
class UnlimitedPowerTable(Resource):
def __init__(self):
pass
def get(self):
unlimitedPowerTable = clean_to_json(df_unlimitedStatsPower, 'indexTable')
unlimitedPowerTableJSON = json.loads(unlimitedPowerTable)
try:
return jsonify({'results': unlimitedPowerTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class UnlimitedPowerIndexAverage(Resource):
def __init__(self):
pass
def get(self):
unlimitedPowerIndexAvg = clean_to_json(df_unlimitedAvgAllPower, 'avg')
unlimitedPowerIndexAvgJSON = json.loads(unlimitedPowerIndexAvg)
try:
return jsonify({'results': unlimitedPowerIndexAvgJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class UnlimitedPowerActive(Resource):
def __init__(self):
pass
def get(self):
unlimitedActivePower = clean_to_json(df_unlimitedActiveAllPower, 'active')
unlimitedActivePowerJSON = json.loads(unlimitedActivePower)
try:
return jsonify({'results': unlimitedActivePowerJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class UnlimitedDualsTable(Resource):
def __init__(self):
pass
def get(self):
unlimitedDualsTable = clean_to_json(df_unlimitedStatsDuals, 'indexTable')
unlimitedDualsTableJSON = json.loads(unlimitedDualsTable)
try:
return jsonify({'results': unlimitedDualsTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class UnlimitedDualsIndividualCardCompletedStats(Resource):
def __init__(self):
pass
def get(self, name):
modName = name.replace('%20', ' ').replace('-', ' ').split(' ')
wordLength = len(modName)
if wordLength > 1:
modCardName = f'Unlimited {modName[0].capitalize()} {modName[1].capitalize()} MTG'
else:
modCardName = f'Unlimited {name.capitalize()} MTG'
df_unlimitedIndividualCardCompletedStats = get_all_data_individual_general(modCardName)
unlimitedDualsIndividualTable = clean_to_json(df_unlimitedIndividualCardCompletedStats, 'table')
unlimitedDualsIndividualTableJSON = json.loads(unlimitedDualsIndividualTable)
try:
return jsonify({'results': unlimitedDualsIndividualTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class UnlimitedPowerIndividualCardCompletedStats(Resource):
def __init__(self):
pass
def get(self, name):
modName = name.replace('%20', ' ').replace('-', ' ').split(' ')
wordLength = len(modName)
if wordLength > 1:
if modName[0] == 'black':
modCardName = f'Unlimited {modName[0].capitalize()} {modName[1].capitalize()} MTG'
else:
modCardName = f'Unlimited {modName[0].capitalize()} {modName[1].capitalize()}'
else:
modCardName = f'Unlimited {name.capitalize()}'
df_unlimitedIndividualCardCompletedStats = get_all_data_individual_general(modCardName)
unlimitedPowerIndividualTable = clean_to_json(df_unlimitedIndividualCardCompletedStats, 'table')
unlimitedPowerIndividualTableJSON = json.loads(unlimitedPowerIndividualTable)
try:
return jsonify({'results': unlimitedPowerIndividualTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
# begin ce API endpoints
class Ce(Resource):
def __init__(self):
pass
def get(self):
maxHeight = round(max(ceDataCountPower)) + 1
minHeight = round(min(ceDataCountPower)) - 10
maxHeightLength = round(max(ceDataLengthPower)) + 2
minHeightLength = round(min(ceDataLengthPower)) - 10
headers = {'Content-Type': 'text/html'}
ceDate = ceDataLengthTimestampPower
periodLength = len(ceDate)
dateRange = pd.date_range(get_timezones(periodLength), periods=periodLength).tolist()
dateRange = [i.strftime('%b. %d') for i in dateRange]
ceDateDuals = ceDataLengthTimestampDuals
periodLengthDuals = len(ceDateDuals)
dateRangeDuals = pd.date_range(get_timezones(periodLengthDuals), periods=periodLengthDuals).tolist()
dateRangeDuals = [i.strftime('%b. %d') for i in dateRangeDuals]
return make_response(render_template('ce.html',
dualsLegend=dualsLegend, maxHeight=maxHeight, minHeight=minHeight, maxHeightLength=maxHeightLength, minHeightLength=minHeightLength, countLegend=countLegend, lengthLegend=lengthLegend, sumLegend=sumLegend, powerLegend=powerLegend,
dateRange=dateRange[3::], dateRangeDuals=dateRangeDuals[7::],
# begin duals
ceDataAvgDuals=ceDataAvgDuals[7::], ceDataAvgTimestampDuals=ceDataAvgTimestampDuals,
ceDataLengthDuals=ceDataLengthDuals[7::], ceDataLengthTimestampDuals=ceDataLengthTimestampDuals,
ceDataCountDuals=ceDataCountDuals[7::], ceDataCountTimestampDuals=ceDataCountTimestampDuals,
ceDataBreakdownNameDuals=ceDataBreakdownNameDuals, ceDataBreakdownavg=ceDataBreakdownAvgDuals,
ceDataAllEndDuals=ceDataAllEndDuals, ceDataAllNameDuals=ceDataAllNameDuals, ceDataAllHrefDuals=ceDataAllHrefDuals, ceDataAllPriceDuals=ceDataAllPriceDuals,
ceActiveDataAllStartDuals=ceActiveDataAllStartDuals, ceActiveDataAllNameDuals=ceActiveDataAllNameDuals, ceActiveDataAllHrefDuals=ceActiveDataAllHrefDuals, ceActiveDataAllPriceDuals=ceActiveDataAllPriceDuals,
get_percent_change_last_sold=get_percent_change_last_sold, get_premiums=get_premiums, get_count=get_data_single_product_count_90, get_length=get_data_single_product_avg_length_90, get_depth=get_data_single_product_depth,
ceDataCumulativePriceDuals=ceDataCumulativePriceDuals[7::], ceDataCumulativeTimestampDuals=ceDataCumulativeTimestampDuals,
# begin power
ceDataAvgPower=ceDataAvgPower[3::], ceDataAvgTimestampPower=ceDataAvgTimestampPower,
ceDataLengthPower=ceDataLengthPower[3::], ceDataLengthTimestampPower=ceDataLengthTimestampPower,
ceDataCountPower=ceDataCountPower[3::], ceDataCountTimestampPower=ceDataCountTimestampPower,
ceDataAllEndPower=ceDataAllEndPower, ceDataAllNamePower=ceDataAllNamePower, ceDataAllHrefPower=ceDataAllHrefPower, ceDataAllPricePower=ceDataAllPricePower,
ceActiveDataAllStartPower=ceActiveDataAllStartPower, ceActiveDataAllNamePower=ceActiveDataAllNamePower, ceActiveDataAllHrefPower=ceActiveDataAllHrefPower, ceActiveDataAllPricePower=ceActiveDataAllPricePower,
ceDataCumulativePricePower=ceDataCumulativePricePower[3::], ceDataCumulativeTimestampPower=ceDataCumulativeTimestampPower,
get_cumulative=get_data_ce_cumulative_totals, get_active_depth=get_data_active_index_count_sum), 200, headers)
class CePowerTable(Resource):
def __init__(self):
pass
def get(self):
cePowerTable = clean_to_json(df_ceStatsPower, 'indexTable')
cePowerTableJSON = json.loads(cePowerTable)
try:
return jsonify({'results': cePowerTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class CePowerIndexAverage(Resource):
def __init__(self):
pass
def get(self):
cePowerIndexAvg = clean_to_json(df_ceAvgAllPower, 'avg')
cePowerIndexAvgJSON = json.loads(cePowerIndexAvg)
try:
return jsonify({'results': cePowerIndexAvgJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class CePowerActive(Resource):
def __init__(self):
pass
def get(self):
ceActivePower = clean_to_json(df_ceActiveAllPower, 'active')
ceActivePowerJSON = json.loads(ceActivePower)
try:
return jsonify({'results': ceActivePowerJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class CeDualsTable(Resource):
def __init__(self):
pass
def get(self):
ceDualsTable = clean_to_json(df_ceStatsDuals, 'indexTable')
ceDualsTableJSON = json.loads(ceDualsTable)
try:
return jsonify({'results': ceDualsTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class CeDualsIndividualCardCompletedStats(Resource):
def __init__(self):
pass
def get(self, name):
modName = name.replace('%20', ' ').replace('-', ' ').split(' ')
wordLength = len(modName)
if wordLength > 1:
modCardName = f'Ce {modName[0].capitalize()} {modName[1].capitalize()} MTG'
else:
modCardName = f'Ce {name.capitalize()} MTG'
df_ceIndividualCardCompletedStats = get_all_data_individual_general(modCardName)
ceDualsIndividualTable = clean_to_json(df_ceIndividualCardCompletedStats, 'table')
ceDualsIndividualTableJSON = json.loads(ceDualsIndividualTable)
try:
return jsonify({'results': ceDualsIndividualTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class CePowerIndividualCardCompletedStats(Resource):
def __init__(self):
pass
def get(self, name):
modName = name.replace('%20', ' ').replace('-', ' ').split(' ')
wordLength = len(modName)
if wordLength > 1:
if modName[0] == 'black':
modCardName = f'Ce {modName[0].capitalize()} {modName[1].capitalize()} MTG'
else:
modCardName = f'Ce {modName[0].capitalize()} {modName[1].capitalize()}'
else:
modCardName = f'Ce {name.capitalize()}'
df_ceIndividualCardCompletedStats = get_all_data_individual_general(modCardName)
cePowerIndividualTable = clean_to_json(df_ceIndividualCardCompletedStats, 'table')
cePowerIndividualTableJSON = json.loads(cePowerIndividualTable)
try:
return jsonify({'results': cePowerIndividualTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
# begin revised API endpoints
class Revised(Resource):
def __init__(self):
pass
def get(self):
revisedDataLengthLegendDuals = 'Average Length (days)'
revisedDataCountLegendDuals = 'Total Sold (listings)'
revisedDataCumulativeLegendDuals = 'Cumulative Sales (gross)'
maxHeight = round(max(revisedDataCountDuals)) + 1
minHeight = round(min(revisedDataCountDuals)) - 10
maxHeightLength = round(max(revisedDataLengthDuals)) + 2
minHeightLength = round(min(revisedDataLengthDuals)) - 10
headers = {'Content-Type': 'text/html'}
revisedDate = revisedDataLengthTimestampDuals
periodLength = len(revisedDate)
dateRange = pd.date_range(get_timezones(periodLength), periods=periodLength).tolist()
dateRange = [i.strftime('%b. %d') for i in dateRange]
return make_response(render_template('revised.html',
dualsLegend=dualsLegend, maxHeight=maxHeight, minHeight=minHeight, maxHeightLength=maxHeightLength, minHeightLength=minHeightLength,
dateRange=dateRange[2::],
revisedDataAvgDuals=revisedDataAvgDuals[2::], revisedDataAvgTimestampDuals=revisedDataAvgTimestampDuals,
revisedDataLengthDuals=revisedDataLengthDuals[2::], revisedDataLengthTimestampDuals=revisedDataLengthTimestampDuals, revisedDataLengthLegendDuals=revisedDataLengthLegendDuals,
revisedDataCountDuals=revisedDataCountDuals[2::], revisedDataCountTimestampDuals=revisedDataCountTimestampDuals, revisedDataCountLegendDuals=revisedDataCountLegendDuals,
revisedDataBreakdownNameDuals=revisedDataBreakdownNameDuals, revisedDataBreakdownavg=revisedDataBreakdownAvgDuals,
revisedDataAllEndDuals=revisedDataAllEndDuals, revisedDataAllNameDuals=revisedDataAllNameDuals, revisedDataAllHrefDuals=revisedDataAllHrefDuals, revisedDataAllPriceDuals=revisedDataAllPriceDuals,
revisedActiveDataAllStartDuals=revisedActiveDataAllStartDuals, revisedActiveDataAllNameDuals=revisedActiveDataAllNameDuals, revisedActiveDataAllHrefDuals=revisedActiveDataAllHrefDuals, revisedActiveDataAllPriceDuals=revisedActiveDataAllPriceDuals,
get_percent_change_last_sold=get_percent_change_last_sold, get_premiums=get_premiums, get_count=get_data_single_product_count_90, get_length=get_data_single_product_avg_length_90, get_depth=get_data_single_product_depth,
revisedDataCumulativePriceDuals=revisedDataCumulativePriceDuals[2::], revisedDataCumulativeTimestampDuals=revisedDataCumulativeTimestampDuals, revisedDataCumulativeLegendDuals=revisedDataCumulativeLegendDuals,
get_cumulative=get_data_revised_cumulative_totals, get_active_depth=get_data_active_index_count_sum), 200, headers)
class RevisedDualsTable(Resource):
def __init__(self):
pass
def get(self):
revisedDualsTable = clean_to_json(df_revisedStatsDuals, 'indexTable')
revisedDualsTableJSON = json.loads(revisedDualsTable)
try:
return jsonify({'results': revisedDualsTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class RevisedDualsIndexAverage(Resource):
def __init__(self):
pass
def get(self):
revisedDualsIndexAvg = clean_to_json(df_revisedAvgAllDuals, 'avg')
revisedDualsIndexAvgJSON = json.loads(revisedDualsIndexAvg)
try:
return jsonify({'results': revisedDualsIndexAvgJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class RevisedDualsActive(Resource):
def __init__(self):
pass
def get(self):
revisedActiveDuals = clean_to_json(df_revisedActiveAllDuals, 'active')
revisedActiveDualsJSON = json.loads(revisedActiveDuals)
try:
return jsonify({'results': revisedActiveDualsJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
class RevisedIndividualCardCompletedStats(Resource):
def __init__(self):
pass
def get(self, name):
modName = name.replace('%20', ' ').replace('-', ' ').split(' ')
wordLength = len(modName)
if wordLength > 1:
modCardName = f'Revised {modName[0].capitalize()} {modName[1].capitalize()} MTG'
else:
modCardName = f'Revised {name.capitalize()} MTG'
df_revisedIndividualCardCompletedStats = get_all_data_individual_general(modCardName)
revisedDualsIndividualTable = clean_to_json(df_revisedIndividualCardCompletedStats, 'table')
revisedDualsIndividualTableJSON = json.loads(revisedDualsIndividualTable)
try:
return jsonify({'results': revisedDualsIndividualTableJSON})
except Exception as e:
return jsonify({'results': 'failed'},
{'error': e})
#######################
# begin all endpoints #
#######################
# TODO: should these be declared as regular flask routes (although isn't that the same as get-requesting an api endpoint that returns only html?)
# TODO: should this be fed in from a jinja template instead of an api call -> render?
api.add_resource(HomePage, '/')
api.add_resource(Active, '/active')
api.add_resource(About, '/about')
api.add_resource(Footer, '/footer')
api.add_resource(Alpha, '/alpha')
api.add_resource(Beta, '/beta')
api.add_resource(Unlimited, '/unlimited')
api.add_resource(Revised, '/revised')
api.add_resource(Ce, '/collectors')
# begin alpha api endpoints
api.add_resource(AlphaPowerIndexAverage, '/api/alpha/power/index/avg')
api.add_resource(AlphaPowerActive, '/api/alpha/power/active')
api.add_resource(AlphaDualsTable, '/api/alpha/duals/table')
api.add_resource(AlphaPowerTable, '/api/alpha/power/table')
api.add_resource(AlphaDualsIndividualCardCompletedStats, '/api/alpha/duals/<name>')
api.add_resource(AlphaPowerIndividualCardCompletedStats, '/api/alpha/power/<name>')
# begin beta api endpoints
api.add_resource(BetaPowerIndexAverage, '/api/beta/power/index/avg')
api.add_resource(BetaPowerActive, '/api/beta/power/active')
api.add_resource(BetaDualsTable, '/api/beta/duals/table')
api.add_resource(BetaPowerTable, '/api/beta/power/table')
api.add_resource(BetaDualsIndividualCardCompletedStats, '/api/beta/duals/<name>')
api.add_resource(BetaPowerIndividualCardCompletedStats, '/api/beta/power/<name>')
# begin unlimited api endpoints
api.add_resource(UnlimitedPowerIndexAverage, '/api/unlimited/power/index/avg')
api.add_resource(UnlimitedPowerActive, '/api/unlimited/power/active')
api.add_resource(UnlimitedDualsTable, '/api/unlimited/duals/table')
api.add_resource(UnlimitedPowerTable, '/api/unlimited/power/table')
api.add_resource(UnlimitedDualsIndividualCardCompletedStats, '/api/unlimited/duals/<name>')
api.add_resource(UnlimitedPowerIndividualCardCompletedStats, '/api/unlimited/power/<name>')
# begin ce & ice api endpoints
api.add_resource(CePowerIndexAverage, '/api/collectors/power/index/avg')
api.add_resource(CePowerActive, '/api/collectors/power/active')
api.add_resource(CeDualsTable, '/api/collectors/duals/table')
api.add_resource(CePowerTable, '/api/collectors/power/table')
api.add_resource(CeDualsIndividualCardCompletedStats, '/api/collectors/duals/<name>')
api.add_resource(CePowerIndividualCardCompletedStats, '/api/collectors/power/<name>')
# begin revised api endpoints
api.add_resource(RevisedDualsIndexAverage, '/api/revised/duals/index/avg')
api.add_resource(RevisedDualsActive, '/api/revised/duals/active')
api.add_resource(RevisedDualsTable, '/api/revised/duals/table')
api.add_resource(RevisedIndividualCardCompletedStats, '/api/revised/duals/<name>')
# begin general api endpoints
api.add_resource(GeneralIndexAverage, '/api/general/index')
if __name__ == "__main__":
TEMPLATES_AUTO_RELOAD = True
# app.run(host='0.0.0.0')
app.run(debug=True, port=8050, threaded=True)
| 62.591725
| 275
| 0.724944
|
00b8a754caae0a4b841a5cbf2c724b6541742ab9
| 7,289
|
py
|
Python
|
diofant/calculus/optimization.py
|
diofant/omg
|
72fd45f832240d1ded6f0a411e97bb9f7aa9f1d2
|
[
"BSD-3-Clause"
] | null | null | null |
diofant/calculus/optimization.py
|
diofant/omg
|
72fd45f832240d1ded6f0a411e97bb9f7aa9f1d2
|
[
"BSD-3-Clause"
] | null | null | null |
diofant/calculus/optimization.py
|
diofant/omg
|
72fd45f832240d1ded6f0a411e97bb9f7aa9f1d2
|
[
"BSD-3-Clause"
] | null | null | null |
from ..core import Integer, Lt, diff, nan, oo, sympify
from ..core.compatibility import is_sequence
from ..functions import Min
from ..matrices import eye, zeros
from ..series import limit
from ..sets import Interval
from ..solvers import reduce_inequalities, solve
from ..solvers.inequalities import canonicalize_inequalities
from ..utilities import ordered
from .singularities import singularities
__all__ = 'minimize', 'maximize'
def minimize(f, *v):
"""Minimizes `f` with respect to given variables `v`.
Examples
========
>>> minimize(x**2, x)
(0, {x: 0})
>>> minimize([x**2, x >= 1], x)
(1, {x: 1})
>>> minimize([-x**2, x >= -2, x <= 1], x)
(-4, {x: -2})
See Also
========
maximize
"""
f = set(map(sympify, f if is_sequence(f) else [f]))
constraints = {c for c in f if c.is_Relational}
assert len(f - constraints) == 1
obj = (f - constraints).pop()
if not v:
v = obj.free_symbols
if not v:
return obj, {}
v = list(ordered(v))
dim = len(v)
assert all(x.is_Symbol for x in v)
constraints = canonicalize_inequalities(constraints)
if dim == 1:
x = v[0]
if constraints:
constraints.extend([x - oo < 0, -oo - x < 0])
dom = reduce_inequalities(constraints, x).as_set()
else:
dom = Interval(-oo, oo, True, True)**len(v)
return minimize_univariate(obj, x, dom)
polys = [obj.as_poly(*v)] + [c.lhs.as_poly(*v) for c in constraints]
is_polynomial = all(p is not None for p in polys)
is_linear = is_polynomial and all(p.is_linear for p in polys)
if is_linear:
# Quick exit for strict forms
if any(isinstance(c, Lt) for c in constraints):
return
# Transform to the standard form: maximize cᵀx with m⋅x≤b, x≥0.
# We replace original vector of unrestricted variables v with
# x of doubled size, so e.g. for the first component of v we
# will have v₁ = x₁⁺ - x₁⁻, where x₁⁺≥0 and x₁⁻≥0.
c = [-polys[0].coeff_monomial(x) for x in v]
c.extend([-_ for _ in c])
m = [([+p.coeff_monomial(x) for x in v] +
[-p.coeff_monomial(x) for x in v])
for p in polys[1:]]
b = [-p.coeff_monomial(1) for p in polys[1:]]
res, sol = simplex(c, m, b)
res -= polys[0].coeff_monomial(1)
sol = map(lambda x, y: x - y, sol[:dim], sol[dim:])
return -res, dict(zip(v, sol))
raise NotImplementedError
def maximize(f, *v):
"""
Maximizes `f` with respect to given variables `v`.
See Also
========
minimize
"""
f = set(map(sympify, f if is_sequence(f) else [f]))
fv, d = minimize([e if e.is_Relational else -e for e in f], *v)
return -fv, d
def minimize_univariate(f, x, dom):
extr = {}
if dom.is_Union:
for d in dom.args:
fp, r = minimize_univariate(f, x, d)
extr[r[x]] = fp
elif dom.is_Interval:
if not dom.left_open:
extr[dom.start] = limit(f, x, dom.start)
if not dom.right_open:
extr[dom.end] = limit(f, x, dom.end, dir=1)
for s in singularities(f, x):
if s in dom:
m = Min(limit(f, x, s), limit(f, x, s, dir=1))
if m == -oo:
return -oo, dict({x: s})
else:
extr[s] = m
for p in solve(diff(f, x), x):
p = p[x]
if p in dom:
extr[p] = f.subs({x: p})
elif dom.is_FiniteSet:
for p in dom.args:
extr[p] = f.subs({x: p})
else:
raise NotImplementedError
if extr:
min, point = oo, nan
for p, fp in sorted(extr.items()):
if fp < min:
point, min = p, fp
return min, dict({x: point})
class InfeasibleProblem(Exception):
pass
def simplex(c, m, b):
"""
Simplex algorithm for linear programming.
Find a vector x with nonnegative elements, that maximizes
quantity `c^T x`, subject to the constraints `m x <= b`.
Examples
========
>>> simplex([2, 3, 4], [[3, 2, 1], [2, 5, 3]], [10, 15])
(20, (0, 0, 5))
References
==========
* Paul R. Thie, Gerard E. Keough, An Introduction to Linear
Programming and Game Theory, Third edition, 2008, Ch. 3.
"""
rows, cols = len(b), len(c)
if len(m) != rows or any(len(_) != cols for _ in m):
raise ValueError("The dimensions doesn't match")
m = sorted(m, key=lambda v: b[m.index(v)])
b = sorted(b)
# build full tableau
tableau = zeros(rows + 1, cols + rows + 1)
tableau[-1, :-1] = [[-_ for _ in c] + [0]*rows]
tableau[:-1, :cols] = m
tableau[:-1, cols:-1] = eye(rows)
tableau[:, -1] = b + [0]
def pivot_col(obj):
# use Bland's rule
for i in range(len(obj) - 1): # pragma: no branch
if obj[i] < 0:
return i
def pivot_row(lhs, rhs):
ratio, idx = oo, 0
for i, l in enumerate(lhs):
if l > 0:
r = rhs[i]/l
if r < ratio:
ratio, idx = r, i
return idx
def solve_simplex(tableau, basis, phase1=False):
while min(tableau[-1, :-1]) < 0:
col = pivot_col(tableau[-1, :])
row = pivot_row(tableau[:-1 - phase1, col], tableau[:, -1])
if tableau[row, col] <= 0:
return 1
else:
basis[row] = col
tableau[row, :] /= tableau[row, col]
for r in range(tableau.rows):
if r != row:
tableau[r, :] -= tableau[r, col]*tableau[row, :]
return 0
# Now solve
neg_idx = [b.index(_) for _ in b if _ < 0]
nneg = len(neg_idx)
basis = list(range(cols + nneg - 1, cols + nneg + rows - 1))
if neg_idx:
tableau = tableau.col_insert(-1, zeros(tableau.rows, nneg))
tableau = tableau.row_insert(tableau.cols, zeros(1, tableau.cols))
j = tableau.cols - nneg - 1
for i in neg_idx:
tableau[i, :] *= -1
tableau[i, j] = 1
tableau[-1, :-1 - nneg] -= tableau[i, :-1 - nneg]
tableau[-1, -1] -= tableau[i, -1]
j += 1
status = solve_simplex(tableau, basis, phase1=True)
assert status == 0
if tableau[-1, -1].is_nonzero:
raise InfeasibleProblem
del tableau[-1, :]
for i in range(nneg):
del tableau[:, -2]
for row in [_ for _ in range(rows) if basis[_] > cols + rows - 1]:
for col in range(tableau.cols - 1): # pragma: no branch
if tableau[row, col] != 0:
break
basis[row] = col
tableau[row, :] /= tableau[row, col]
for r in range(tableau.rows):
if r != row:
tableau[r, :] -= tableau[r, col]*tableau[row, :]
status = solve_simplex(tableau, basis)
if status == 1:
return oo, (oo,)*cols
ans = [Integer(0)]*cols
for c, b in enumerate(basis):
if b < cols:
ans[b] = tableau[:-1, -1][c]
return tableau[-1, -1], tuple(ans)
| 27.714829
| 74
| 0.516532
|
be10d5005166bb8a864df1bff8dac18dbb5c986f
| 68,280
|
py
|
Python
|
cde-root/usr/lib64/python2.4/site-packages/numpy/core/numeric.py
|
NirBenTalLab/proorigami-cde-package
|
273379075830a9b94d3f2884661a54f853777ff6
|
[
"MIT"
] | 1
|
2017-02-08T12:46:18.000Z
|
2017-02-08T12:46:18.000Z
|
cde-root/usr/lib64/python2.4/site-packages/numpy/core/numeric.py
|
NirBenTalLab/proorigami-cde-package
|
273379075830a9b94d3f2884661a54f853777ff6
|
[
"MIT"
] | null | null | null |
cde-root/usr/lib64/python2.4/site-packages/numpy/core/numeric.py
|
NirBenTalLab/proorigami-cde-package
|
273379075830a9b94d3f2884661a54f853777ff6
|
[
"MIT"
] | null | null | null |
__all__ = ['newaxis', 'ndarray', 'flatiter', 'ufunc',
'arange', 'array', 'zeros', 'empty', 'broadcast', 'dtype',
'fromstring', 'fromfile', 'frombuffer','newbuffer',
'getbuffer', 'int_asbuffer', 'where', 'argwhere',
'concatenate', 'fastCopyAndTranspose', 'lexsort',
'set_numeric_ops', 'can_cast',
'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray',
'isfortran', 'empty_like', 'zeros_like',
'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot',
'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot',
'array2string', 'get_printoptions', 'set_printoptions',
'array_repr', 'array_str', 'set_string_function',
'little_endian', 'require',
'fromiter', 'array_equal', 'array_equiv',
'indices', 'fromfunction',
'load', 'loads', 'isscalar', 'binary_repr', 'base_repr',
'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask',
'seterr', 'geterr', 'setbufsize', 'getbufsize',
'seterrcall', 'geterrcall', 'errstate', 'flatnonzero',
'Inf', 'inf', 'infty', 'Infinity',
'nan', 'NaN', 'False_', 'True_', 'bitwise_not',
'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS']
import sys
import warnings
import multiarray
import umath
from umath import *
import numerictypes
from numerictypes import *
bitwise_not = invert
CLIP = multiarray.CLIP
WRAP = multiarray.WRAP
RAISE = multiarray.RAISE
MAXDIMS = multiarray.MAXDIMS
ALLOW_THREADS = multiarray.ALLOW_THREADS
BUFSIZE = multiarray.BUFSIZE
ndarray = multiarray.ndarray
flatiter = multiarray.flatiter
broadcast = multiarray.broadcast
dtype = multiarray.dtype
ufunc = type(sin)
# originally from Fernando Perez's IPython
def zeros_like(a):
"""
Return an array of zeros with the same shape and type as a given array.
Equivalent to ``a.copy().fill(0)``.
Parameters
----------
a : array_like
The shape and data-type of `a` define the parameters of
the returned array.
Returns
-------
out : ndarray
Array of zeros with same shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
>>> y = np.arange(3, dtype=np.float)
>>> y
array([ 0., 1., 2.])
>>> np.zeros_like(y)
array([ 0., 0., 0.])
"""
if isinstance(a, ndarray):
res = ndarray.__new__(type(a), a.shape, a.dtype, order=a.flags.fnc)
res.fill(0)
return res
try:
wrap = a.__array_wrap__
except AttributeError:
wrap = None
a = asarray(a)
res = zeros(a.shape, a.dtype)
if wrap:
res = wrap(res)
return res
def empty_like(a):
"""
Return a new array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define the parameters of the
returned array.
Returns
-------
out : ndarray
Array of random data with the same shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than the
functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], #random
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], #random
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
"""
if isinstance(a, ndarray):
res = ndarray.__new__(type(a), a.shape, a.dtype, order=a.flags.fnc)
return res
try:
wrap = a.__array_wrap__
except AttributeError:
wrap = None
a = asarray(a)
res = empty(a.shape, a.dtype)
if wrap:
res = wrap(res)
return res
# end Fernando's utilities
def extend_all(module):
adict = {}
for a in __all__:
adict[a] = 1
try:
mall = getattr(module, '__all__')
except AttributeError:
mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
for a in mall:
if a not in adict:
__all__.append(a)
extend_all(umath)
extend_all(numerictypes)
newaxis = None
arange = multiarray.arange
array = multiarray.array
zeros = multiarray.zeros
empty = multiarray.empty
fromstring = multiarray.fromstring
fromiter = multiarray.fromiter
fromfile = multiarray.fromfile
frombuffer = multiarray.frombuffer
newbuffer = multiarray.newbuffer
getbuffer = multiarray.getbuffer
int_asbuffer = multiarray.int_asbuffer
where = multiarray.where
concatenate = multiarray.concatenate
fastCopyAndTranspose = multiarray._fastCopyAndTranspose
set_numeric_ops = multiarray.set_numeric_ops
can_cast = multiarray.can_cast
lexsort = multiarray.lexsort
compare_chararrays = multiarray.compare_chararrays
putmask = multiarray.putmask
def asarray(a, dtype=None, order=None):
"""
Convert the input to an array.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F' for FORTRAN)
memory representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
See Also
--------
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asarray(a)
array([1, 2])
Existing arrays are not copied:
>>> a = np.array([1, 2])
>>> np.asarray(a) is a
True
Contrary to `asanyarray`, ndarray subclasses are not passed through:
>>> issubclass(np.matrix, np.ndarray)
True
>>> a = np.matrix([[1, 2]])
>>> np.asarray(a) is a
False
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order)
def asanyarray(a, dtype=None, order=None):
"""
Convert the input to a ndarray, but pass ndarray subclasses through.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes scalars, lists, lists of tuples, tuples, tuples of tuples,
tuples of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray or an ndarray subclass
Array interpretation of `a`. If `a` is an ndarray or a subclass
of ndarray, it is returned as-is and no copy is performed.
See Also
--------
asarray : Similar function which always returns ndarrays.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asanyarray(a)
array([1, 2])
Instances of `ndarray` subclasses are passed through as-is:
>>> a = np.matrix([1, 2])
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order, subok=True)
def ascontiguousarray(a, dtype=None):
"""
Return a contiguous array in memory (C order).
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
Data-type of returned array.
Returns
-------
out : ndarray
Contiguous array of same shape and content as `a`, with type `dtype`
if specified.
See Also
--------
asfortranarray : Convert input to an ndarray with column-major
memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> np.ascontiguousarray(x, dtype=np.float32)
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> x.flags['C_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='C', ndmin=1)
def asfortranarray(a, dtype=None):
"""
Return an array laid out in Fortran order in memory.
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
By default, the data-type is inferred from the input data.
Returns
-------
out : ndarray
The input `a` in Fortran, or column-major, order.
See Also
--------
ascontiguousarray : Convert input to a contiguous (C order) array.
asanyarray : Convert input to an ndarray with either row or
column-major memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> y = np.asfortranarray(x)
>>> x.flags['F_CONTIGUOUS']
False
>>> y.flags['F_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='F', ndmin=1)
def require(a, dtype=None, requirements=None):
"""
Return an ndarray of the provided type that satisfies requirements.
This function is useful to be sure that an array with the correct flags
is returned for passing to compiled code (perhaps through ctypes).
Parameters
----------
a : array_like
The object to be converted to a type-and-requirement-satisfying array.
dtype : data-type
The required data-type, the default data-type is float64).
requirements : str or list of str
The requirements list can be any of the following
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
* 'ALIGNED' ('A') - ensure a data-type aligned array
* 'WRITEABLE' ('W') - ensure a writable array
* 'OWNDATA' ('O') - ensure an array that owns its own data
See Also
--------
asarray : Convert input to an ndarray.
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfortranarray : Convert input to an ndarray with column-major
memory order.
ndarray.flags : Information about the memory layout of the array.
Notes
-----
The returned array will be guaranteed to have the listed requirements
by making a copy if needed.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : False
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
>>> y.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
"""
if requirements is None:
requirements = []
else:
requirements = [x.upper() for x in requirements]
if not requirements:
return asanyarray(a, dtype=dtype)
if 'ENSUREARRAY' in requirements or 'E' in requirements:
subok = False
else:
subok = True
arr = array(a, dtype=dtype, copy=False, subok=subok)
copychar = 'A'
if 'FORTRAN' in requirements or \
'F_CONTIGUOUS' in requirements or \
'F' in requirements:
copychar = 'F'
elif 'CONTIGUOUS' in requirements or \
'C_CONTIGUOUS' in requirements or \
'C' in requirements:
copychar = 'C'
for prop in requirements:
if not arr.flags[prop]:
arr = arr.copy(copychar)
break
return arr
def isfortran(a):
"""
Returns True if array is arranged in Fortran-order in memory
and dimension > 1.
Parameters
----------
a : ndarray
Input array.
Examples
--------
np.array allows to specify whether the array is written in C-contiguous
order (last index varies the fastest), or FORTRAN-contiguous order in
memory (first index varies the fastest).
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN')
>>> b
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(b)
True
The transpose of a C-ordered array is a FORTRAN-ordered array.
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = a.T
>>> b
array([[1, 4],
[2, 5],
[3, 6]])
>>> np.isfortran(b)
True
1-D arrays always evaluate as False.
>>> np.isfortran(np.array([1, 2], order='FORTRAN'))
False
"""
return a.flags.fnc
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
Parameters
----------
a : array_like
Input data.
Returns
-------
index_array : ndarray
Indices of elements that are non-zero. Indices are grouped by element.
See Also
--------
where, nonzero
Notes
-----
``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``where(a)`` instead.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argwhere(x>1)
array([[0, 2],
[1, 0],
[1, 1],
[1, 2]])
"""
return transpose(asanyarray(a).nonzero())
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to a.ravel().nonzero()[0].
Parameters
----------
a : ndarray
Input array.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return a.ravel().nonzero()[0]
_mode_from_name_dict = {'v': 0,
's' : 1,
'f' : 2}
def _mode_from_name(mode):
if isinstance(mode, type("")):
return _mode_from_name_dict[mode.lower()[0]]
return mode
def correlate(a,v,mode='valid',old_behavior=True):
"""
Discrete, linear correlation of two 1-dimensional sequences.
This function is equivalent to
>>> np.convolve(a, v[::-1], mode=mode)
where ``v[::-1]`` is the reverse of `v`.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `convolve` docstring. Note that the default
is `valid`, unlike `convolve`, which uses `full`.
old_behavior : bool
If True, uses the old, numeric behavior (correlate(a,v) == correlate(v,
a), and the conjugate is not taken for complex arrays). If False, uses
the conventional signal processing definition (see note).
See Also
--------
convolve : Discrete, linear convolution of two
one-dimensional sequences.
acorrelate : Discrete correlation following the usual signal processing
definition for complex arrays, and without assuming that
``correlate(a, b) == correlate(b, a)``.
Notes
-----
If `old_behavior` is False, this function computes the correlation as
generally defined in signal processing texts::
z[k] = sum_n a[n] * conj(v[n+k])
with a and v sequences being zero-padded where necessary and conj being
the conjugate.
Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
array([ 3.5])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
array([ 2. , 3.5, 3. ])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
array([ 0.5, 2. , 3.5, 3. , 0. ])
"""
mode = _mode_from_name(mode)
if old_behavior:
warnings.warn("""
The current behavior of correlate is deprecated for 1.4.0, and will be removed
for NumPy 1.5.0.
The new behavior fits the conventional definition of correlation: inputs are
never swapped, and the second argument is conjugated for complex arrays.""",
DeprecationWarning)
return multiarray.correlate(a,v,mode)
else:
return multiarray.correlate2(a,v,mode)
def convolve(a,v,mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_. In
probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
Parameters
----------
a : (N,) array_like
First one-dimensional input array.
v : (M,) array_like
Second one-dimensional input array.
mode : {'full', 'valid', 'same'}, optional
'full':
By default, mode is 'full'. This returns the convolution
at each point of overlap, with an output shape of (N+M-1,). At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
Mode `same` returns output of length ``max(M, N)``. Boundary
effects are still visible.
'valid':
Mode `valid` returns output of length
``max(M, N) - min(M, N) + 1``. The convolution product is only given
for points where the signals overlap completely. Values outside
the signal boundary have no effect.
Returns
-------
out : ndarray
Discrete, linear convolution of `a` and `v`.
See Also
--------
scipy.signal.fftconv : Convolve two arrays using the Fast Fourier
Transform.
scipy.linalg.toeplitz : Used to construct the convolution operator.
Notes
-----
The discrete convolution operation is defined as
.. math:: (f * g)[n] = \\sum_{m = -\\infty}^{\\infty} f[m] f[n - m]
It can be shown that a convolution :math:`x(t) * y(t)` in time/space
is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
domain, after appropriate padding (padding is necessary to prevent
circular convolution). Since multiplication is more efficient (faster)
than convolution, the function `scipy.signal.fftconvolve` exploits the
FFT to calculate the convolution of large data-sets.
References
----------
.. [1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution.
Examples
--------
Note how the convolution operator flips the second array
before "sliding" the two across one another:
>>> np.convolve([1, 2, 3], [0, 1, 0.5])
array([ 0. , 1. , 2.5, 4. , 1.5])
Only return the middle values of the convolution.
Contains boundary effects, where zeros are taken
into account:
>>> np.convolve([1,2,3],[0,1,0.5], 'same')
array([ 1. , 2.5, 4. ])
The two arrays are of the same length, so there
is only one position where they completely overlap:
>>> np.convolve([1,2,3],[0,1,0.5], 'valid')
array([ 2.5])
"""
a,v = array(a, ndmin=1),array(v, ndmin=1)
if (len(v) > len(a)):
a, v = v, a
if len(a) == 0 :
raise ValueError('a cannot be empty')
if len(v) == 0 :
raise ValueError('v cannot be empty')
mode = _mode_from_name(mode)
return multiarray.correlate(a, v[::-1], mode)
def outer(a,b):
"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a, b : array_like, shape (M,), (N,)
First and second input vectors. Inputs are flattened if they
are not already 1-dimensional.
Returns
-------
out : ndarray, shape (M, N)
``out[i, j] = a[i] * b[j]``
References
----------
.. [1] : G. H. Golub and C. F. van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.outer(x, [1, 2, 3])
array([[a, aa, aaa],
[b, bb, bbb],
[c, cc, ccc]], dtype=object)
"""
a = asarray(a)
b = asarray(b)
return a.ravel()[:,newaxis]*b.ravel()[newaxis,:]
# try to import blas optimized dot if available
try:
# importing this changes the dot function for basic 4 types
# to blas-optimized versions.
from _dotblas import dot, vdot, inner, alterdot, restoredot
except ImportError:
# docstrings are in add_newdocs.py
inner = multiarray.inner
dot = multiarray.dot
def vdot(a, b):
return dot(asarray(a).ravel().conj(), asarray(b).ravel())
def alterdot():
pass
def restoredot():
pass
def tensordot(a, b, axes=2):
"""
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
``a`` and ``b``, and an array_like object containing two array_like
objects, ``(a_axes, b_axes)``, sum the products of ``a``'s and ``b``'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of ``a`` and the first ``N`` dimensions of ``b`` are summed
over.
Parameters
----------
a, b : array_like, len(shape) >= 1
Tensors to "dot".
axes : variable type
* integer_like scalar
Number of axes to sum over (applies to both arrays); or
* array_like, shape = (2,), both elements array_like
Axes to be summed over, first sequence applying to ``a``, second
to ``b``.
See Also
--------
numpy.dot
Notes
-----
When there is more than one axis to sum over - and they are not the last
(first) axes of ``a`` (``b``) - the argument ``axes`` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
A "traditional" example:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> d = np.zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... d[i,j] += a[k,n,i] * b[n,k,j]
>>> c == d
array([[ True, True],
[ True, True],
[ True, True],
[ True, True],
[ True, True]], dtype=bool)
An extended example taking advantage of the overloading of + and \\*:
>>> a = np.array(range(1, 9))
>>> a.shape = (2, 2, 2)
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
>>> A.shape = (2, 2)
>>> a; A
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
array([[a, b],
[c, d]], dtype=object)
>>> np.tensordot(a, A) # third argument default is 2
array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, 1)
array([[[acc, bdd],
[aaacccc, bbbdddd]],
[[aaaaacccccc, bbbbbdddddd],
[aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object)
>>> np.tensordot(a, A, 0) # "Left for reader" (result too long to incl.)
>>> np.tensordot(a, A, (0, 1))
array([[[abbbbb, cddddd],
[aabbbbbb, ccdddddd]],
[[aaabbbbbbb, cccddddddd],
[aaaabbbbbbbb, ccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, (2, 1))
array([[[abb, cdd],
[aaabbbb, cccdddd]],
[[aaaaabbbbbb, cccccdddddd],
[aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object)
"""
try:
iter(axes)
except:
axes_a = range(-axes,0)
axes_b = range(0,axes)
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
a, b = asarray(a), asarray(b)
as_ = a.shape
nda = len(a.shape)
bs = b.shape
ndb = len(b.shape)
equal = True
if (na != nb): equal = False
else:
for k in xrange(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError, "shape-mismatch for sum"
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (-1, N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, -1)
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = dot(at, bt)
return res.reshape(olda + oldb)
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : array_like
Input array.
shift : int
The number of places by which elements are shifted.
axis : int, optional
The axis along which elements are shifted. By default, the array
is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
See Also
--------
rollaxis : Roll the specified axis backwards, until it lies in a
given position.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> np.roll(x2, 1)
array([[9, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> np.roll(x2, 1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, 1, axis=1)
array([[4, 0, 1, 2, 3],
[9, 5, 6, 7, 8]])
"""
a = asanyarray(a)
if axis is None:
n = a.size
reshape = True
else:
n = a.shape[axis]
reshape = False
shift %= n
indexes = concatenate((arange(n-shift,n),arange(n-shift)))
res = a.take(indexes, axis)
if reshape:
return res.reshape(a.shape)
else:
return res
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : ndarray
Input array.
axis : int
The axis to roll backwards. The positions of the other axes do not
change relative to one another.
start : int, optional
The axis is rolled until it lies before this position.
Returns
-------
res : ndarray
Output array.
See Also
--------
roll : Roll the elements of an array by a number of positions along a
given axis.
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
n = a.ndim
if axis < 0:
axis += n
if start < 0:
start += n
msg = 'rollaxis: %s (%d) must be >=0 and < %d'
if not (0 <= axis < n):
raise ValueError, msg % ('axis', axis, n)
if not (0 <= start < n+1):
raise ValueError, msg % ('start', start, n+1)
if (axis < start): # it's been removed
start -= 1
if axis==start:
return a
axes = range(0,n)
axes.remove(axis)
axes.insert(start, axis)
return a.transpose(axes)
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
return rollaxis(a, axis, 0)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : array_like
Components of the first vector(s).
b : array_like
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). By default, the
last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
See Also
--------
inner : Inner product
outer : Outer product.
ix_ : Construct index arrays.
Examples
--------
Vector cross-product.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([-3, 6, -3])
One vector with dimension 2.
>>> x = [1, 2]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Equivalently:
>>> x = [1, 2, 0]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Both vectors with dimension 2.
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
-3
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3, 3],
[ 6, -6],
[-3, 3]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[ -6, 12, -6],
[ 0, 0, 0],
[ 6, -12, 6]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24, 48, -24],
[-30, 60, -30],
[-36, 72, -36]])
"""
if axis is not None:
axisa,axisb,axisc=(axis,)*3
a = asarray(a).swapaxes(axisa, 0)
b = asarray(b).swapaxes(axisb, 0)
msg = "incompatible dimensions for cross product\n"\
"(dimension must be 2 or 3)"
if (a.shape[0] not in [2,3]) or (b.shape[0] not in [2,3]):
raise ValueError(msg)
if a.shape[0] == 2:
if (b.shape[0] == 2):
cp = a[0]*b[1] - a[1]*b[0]
if cp.ndim == 0:
return cp
else:
return cp.swapaxes(0, axisc)
else:
x = a[1]*b[2]
y = -a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
elif a.shape[0] == 3:
if (b.shape[0] == 3):
x = a[1]*b[2] - a[2]*b[1]
y = a[2]*b[0] - a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
else:
x = -a[2]*b[1]
y = a[2]*b[0]
z = a[0]*b[1] - a[1]*b[0]
cp = array([x,y,z])
if cp.ndim == 1:
return cp
else:
return cp.swapaxes(0,axisc)
#Use numarray's printing function
from arrayprint import array2string, get_printoptions, set_printoptions
_typelessdata = [int_, float_, complex_]
if issubclass(intc, int):
_typelessdata.append(intc)
if issubclass(longlong, int):
_typelessdata.append(longlong)
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters split the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero, default is False. Very small
is defined by `precision`, if the precision is 8 then
numbers smaller than 5e-9 are represented as zero.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([ 0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([ 0.000001, 0. , 2. , 3. ])'
"""
if arr.size > 0 or arr.shape==(0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', "array(")
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
typeless = arr.dtype.type in _typelessdata
if arr.__class__ is not ndarray:
cName= arr.__class__.__name__
else:
cName = "array"
if typeless and arr.size:
return cName + "(%s)" % lst
else:
typename=arr.dtype.name
lf = ''
if issubclass(arr.dtype.type, flexible):
if arr.dtype.names:
typename = "%s" % str(arr.dtype)
else:
typename = "'%s'" % str(arr.dtype)
lf = '\n'+' '*len("array(")
return cName + "(%s, %sdtype=%s)" % (lst, lf, typename)
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function
is similar to `array_repr`, the difference is that `array_repr` also
returns information on the type of array and data type.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using set_printoptions.
suppress_small : bool, optional
Represent very small numbers as zero, default is False. Very small is
defined by precision, if the precision is 8 then numbers smaller than
5e-9 are represented as zero.
See Also
--------
array2string, array_repr, set_printoptions
Examples
--------
>>> np.array_str(np.arange(3))
>>> '[0 1 2]'
"""
return array2string(a, max_line_width, precision, suppress_small, ' ', "", str)
set_string_function = multiarray.set_string_function
set_string_function(array_str, 0)
set_string_function(array_repr, 1)
little_endian = (sys.byteorder == 'little')
def indices(dimensions, dtype=int):
"""
Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : dtype, optional
Data type of the result.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
See Also
--------
mgrid, meshgrid
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2,2,3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0, 1, 2],
[4, 5, 6]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
dimensions = tuple(dimensions)
N = len(dimensions)
if N == 0:
return array([],dtype=dtype)
res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
tmp = arange(dim,dtype=dtype)
tmp.shape = (1,)*i + (dim,)+(1,)*(N-i-1)
newdim = dimensions[:i] + (1,)+ dimensions[i+1:]
val = zeros(newdim, dtype)
add(tmp, val, res[i])
return res
def fromfunction(function, shape, **kwargs):
"""
Construct an array by executing a function over each coordinate.
The resulting array therefore has a value ``fn(x, y, z)`` at
coordinate ``(x, y, z)``.
Parameters
----------
function : callable
The function is called with N parameters, each of which
represents the coordinates of the array varying along a
specific axis. For example, if `shape` were ``(2, 2)``, then
the parameters would be two arrays, ``[[0, 0], [1, 1]]`` and
``[[0, 1], [0, 1]]``. `function` must be capable of operating on
arrays, and should return a scalar value.
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `function`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
Returns
-------
out : any
The result of the call to `function` is passed back directly.
Therefore the type and shape of `out` is completely determined by
`function`.
See Also
--------
indices, meshgrid
Notes
-----
Keywords other than `shape` and `dtype` are passed to `function`.
Examples
--------
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
array([[ True, False, False],
[False, True, False],
[False, False, True]], dtype=bool)
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
"""
dtype = kwargs.pop('dtype', float)
args = indices(shape, dtype=dtype)
return function(*args,**kwargs)
def isscalar(num):
"""
Returns True if the type of `num` is a scalar type.
Parameters
----------
num : any
Input argument, can be of any type and shape.
Returns
-------
val : bool
True if `num` is a scalar type, False if it is not.
Examples
--------
>>> np.isscalar(3.1)
True
>>> np.isscalar([3.1])
False
>>> np.isscalar(False)
True
"""
if isinstance(num, generic):
return True
else:
return type(num) in ScalarType
_lkup = {
'0':'0000',
'1':'0001',
'2':'0010',
'3':'0011',
'4':'0100',
'5':'0101',
'6':'0110',
'7':'0111',
'8':'1000',
'9':'1001',
'a':'1010',
'b':'1011',
'c':'1100',
'd':'1101',
'e':'1110',
'f':'1111',
'A':'1010',
'B':'1011',
'C':'1100',
'D':'1101',
'E':'1110',
'F':'1111',
'L':''}
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, the length of
the two's complement if `num` is negative.
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=4)
'1101'
"""
sign = ''
if num < 0:
if width is None:
sign = '-'
num = -num
else:
# replace num with its 2-complement
num = 2**width + num
elif num == 0:
return '0'*(width or 1)
ostr = hex(num)
bin = ''.join([_lkup[ch] for ch in ostr[2:]])
bin = bin.lstrip('0')
if width is not None:
bin = bin.zfill(width)
return sign + bin
def base_repr (number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
Parameters
----------
number : scalar
The value to convert. Only positive values are handled.
base : int, optional
Convert `number` to the `base` number system. The valid range is 2-36,
the default value is 2.
padding : int, optional
Number of zeros padded on the left. Default is 0 (no padding).
Returns
-------
out : str
String representation of `number` in `base` system.
See Also
--------
binary_repr : Faster version of `base_repr` for base 2 that also handles
negative numbers.
Examples
--------
>>> np.base_repr(5)
'101'
>>> np.base_repr(6, 5)
'11'
>>> np.base_repr(7, base=5, padding=3)
'00012'
>>> np.base_repr(10, base=16)
'A'
>>> np.base_repr(32, base=16)
'20'
"""
if number < 0:
raise ValueError("negative numbers not handled in base_repr")
if base > 36:
raise ValueError("bases greater than 36 not handled in base_repr")
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
import math
lnb = math.log(base)
res = padding*chars[0]
if number == 0:
return res + chars[0]
exponent = int (math.log (number)/lnb)
while(exponent >= 0):
term = long(base)**exponent
lead_digit = int(number / term)
res += chars[lead_digit]
number -= term*lead_digit
exponent -= 1
return res
from cPickle import load, loads
_cload = load
_file = file
def load(file):
"""
Wrapper around cPickle.load which accepts either a file-like object or
a filename.
Note that the NumPy binary format is not based on pickle/cPickle anymore.
For details on the preferred way of loading and saving files, see `load`
and `save`.
See Also
--------
load, save
"""
if isinstance(file, type("")):
file = _file(file,"rb")
return _cload(file)
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
def _maketup(descr, val):
dt = dtype(descr)
# Place val in all scalar tuples:
fields = dt.fields
if fields is None:
return val
else:
res = [_maketup(fields[name][0],val) for name in dt.names]
return tuple(res)
def ones(shape, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with ones.
Please refer to the documentation for `zeros`.
See Also
--------
zeros
Examples
--------
>>> np.ones(5)
array([ 1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=np.int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
array([[ 1.],
[ 1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[ 1., 1.],
[ 1., 1.]])
"""
a = empty(shape, dtype, order)
try:
a.fill(1)
# Above is faster now after addition of fast loops.
#a = zeros(shape, dtype, order)
#a+=1
except TypeError:
obj = _maketup(dtype, 1)
a.fill(obj)
return a
def identity(n, dtype=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
a = zeros((n,n), dtype=dtype)
a.flat[::n+1] = 1
return a
def allclose(a, b, rtol=1.e-5, atol=1.e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance; False otherwise. If either array contains NaN, then
False is returned.
See Also
--------
all, any, alltrue, sometrue
Notes
-----
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`allclose(a, b)` might be different from `allclose(b, a)` in
some rare cases.
Examples
--------
>>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
False
>>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
True
>>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan])
False
"""
x = array(a, copy=False)
y = array(b, copy=False)
xinf = isinf(x)
if not all(xinf == isinf(y)):
return False
if not any(xinf):
return all(less_equal(absolute(x-y), atol + rtol * absolute(y)))
if not all(x[xinf] == y[xinf]):
return False
x = x[~xinf]
y = y[~xinf]
return all(less_equal(absolute(x-y), atol + rtol * absolute(y)))
def array_equal(a1, a2):
"""
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(logical_and.reduce(equal(a1,a2).ravel()))
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
Shape consistent means they are either the same shape, or one input array
can be broadcasted to create the same shape as the other one.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
out : bool
True if equivalent, False otherwise.
Examples
--------
>>> np.array_equiv([1, 2], [1, 2])
>>> True
>>> np.array_equiv([1, 2], [1, 3])
>>> False
Showing the shape equivalence:
>>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
>>> True
>>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
>>> False
>>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
>>> False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
try:
return bool(logical_and.reduce(equal(a1,a2).ravel()))
except ValueError:
return False
_errdict = {"ignore":ERR_IGNORE,
"warn":ERR_WARN,
"raise":ERR_RAISE,
"call":ERR_CALL,
"print":ERR_PRINT,
"log":ERR_LOG}
_errdict_rev = {}
for key in _errdict.keys():
_errdict_rev[_errdict[key]] = key
del key
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
"""
Set how floating-point errors are handled.
Note that operations on integer scalar types (such as `int16`) are
handled like floating point, and are affected by these settings.
Parameters
----------
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Set treatment for all types of floating-point errors at once:
- ignore: Take no action when the exception occurs.
- warn: Print a `RuntimeWarning` (via the Python `warnings` module).
- raise: Raise a `FloatingPointError`.
- call: Call a function specified using the `seterrcall` function.
- print: Print a warning directly to ``stdout``.
- log: Record error in a Log object specified by `seterrcall`.
The default is not to change the current behavior.
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for division by zero.
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point overflow.
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point underflow.
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for invalid floating-point operation.
Returns
-------
old_settings : dict
Dictionary containing the old settings.
See also
--------
seterrcall : Set a callback function for the 'call' mode.
geterr, geterrcall
Notes
-----
The floating-point exceptions are defined in the IEEE 754 standard [1]:
- Division by zero: infinite result obtained from finite numbers.
- Overflow: result too large to be expressed.
- Underflow: result so close to zero that some precision
was lost.
- Invalid operation: result is not an expressible number, typically
indicates that a NaN was produced.
.. [1] http://en.wikipedia.org/wiki/IEEE_754
Examples
--------
>>> np.seterr(over='raise')
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
>>> np.seterr(all='ignore') # reset to default
{'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'}
>>> np.int16(32000) * np.int16(3)
30464
>>> old_settings = np.seterr(all='warn', over='raise')
>>> np.int16(32000) * np.int16(3)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: overflow encountered in short_scalars
>>> np.seterr(all='print')
{'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'}
>>> np.int16(32000) * np.int16(3)
Warning: overflow encountered in short_scalars
30464
Calling `seterr` with no arguments resets treatment for all floating-point
errors to the defaults.
>>> old_settings = np.seterr()
>>> np.geterr()
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
"""
pyvals = umath.geterrobj()
old = geterr()
if divide is None: divide = all or old['divide']
if over is None: over = all or old['over']
if under is None: under = all or old['under']
if invalid is None: invalid = all or old['invalid']
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
(_errdict[over] << SHIFT_OVERFLOW ) +
(_errdict[under] << SHIFT_UNDERFLOW) +
(_errdict[invalid] << SHIFT_INVALID))
pyvals[1] = maskvalue
umath.seterrobj(pyvals)
return old
def geterr():
"""
Get the current way of handling floating-point errors.
Returns
-------
res : dict
A dictionary with keys "divide", "over", "under", and "invalid",
whose values are from the strings "ignore", "print", "log", "warn",
"raise", and "call". The keys represent possible floating-point
exceptions, and the values define how these exceptions are handled.
See Also
--------
geterrcall, seterr, seterrcall
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterr() # default is all set to 'ignore'
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
>>> np.arange(3.) / np.arange(3.)
array([ NaN, 1., 1.])
>>> oldsettings = np.seterr(all='warn', over='raise')
>>> np.geterr()
{'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'}
>>> np.arange(3.) / np.arange(3.)
__main__:1: RuntimeWarning: invalid value encountered in divide
array([ NaN, 1., 1.])
"""
maskvalue = umath.geterrobj()[1]
mask = 7
res = {}
val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
res['divide'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_OVERFLOW) & mask
res['over'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_UNDERFLOW) & mask
res['under'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_INVALID) & mask
res['invalid'] = _errdict_rev[val]
return res
def setbufsize(size):
"""
Set the size of the buffer used in ufuncs.
Parameters
----------
size : int
Size of buffer.
"""
if size > 10e6:
raise ValueError, "Buffer size, %s, is too big." % size
if size < 5:
raise ValueError, "Buffer size, %s, is too small." %size
if size % 16 != 0:
raise ValueError, "Buffer size, %s, is not a multiple of 16." %size
pyvals = umath.geterrobj()
old = getbufsize()
pyvals[0] = size
umath.seterrobj(pyvals)
return old
def getbufsize():
"""Return the size of the buffer used in ufuncs.
"""
return umath.geterrobj()[0]
def seterrcall(func):
"""
Set the floating-point error callback function or log object.
There are two ways to capture floating-point error messages. The first
is to set the error-handler to 'call', using `seterr`. Then, set
the function to call using this function.
The second is to set the error-handler to 'log', using `seterr`.
Floating-point errors then trigger a call to the 'write' method of
the provided object.
Parameters
----------
func : callable f(err, flag) or object with write method
Function to call upon floating-point errors ('call'-mode) or
object whose 'write' method is used to log such message ('log'-mode).
The call function takes two arguments. The first is the
type of error (one of "divide", "over", "under", or "invalid"),
and the second is the status flag. The flag is a byte, whose
least-significant bits indicate the status::
[0 0 0 0 invalid over under invalid]
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
If an object is provided, its write method should take one argument,
a string.
Returns
-------
h : callable, log instance or None
The old error handler.
See Also
--------
seterr, geterr, geterrcall
Examples
--------
Callback upon error:
>>> def err_handler(type, flag):
print "Floating point error (%s), with flag %s" % (type, flag)
...
>>> saved_handler = np.seterrcall(err_handler)
>>> save_err = np.seterr(all='call')
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
>>> np.seterr(**save_err)
Log error message:
>>> class Log(object):
def write(self, msg):
print "LOG: %s" % msg
...
>>> log = Log()
>>> saved_handler = np.seterrcall(log)
>>> save_err = np.seterr(all='log')
>>> np.array([1, 2, 3]) / 0.0
LOG: Warning: divide by zero encountered in divide
>>> np.seterrcall(saved_handler)
>>> np.seterr(**save_err)
"""
if func is not None and not callable(func):
if not hasattr(func, 'write') or not callable(func.write):
raise ValueError, "Only callable can be used as callback"
pyvals = umath.geterrobj()
old = geterrcall()
pyvals[2] = func
umath.seterrobj(pyvals)
return old
def geterrcall():
"""
Return the current callback function used on floating-point errors.
When the error handling for a floating-point error (one of "divide",
"over", "under", or "invalid") is set to 'call' or 'log', the function
that is called or the log instance that is written to is returned by
`geterrcall`. This function or log instance has been set with
`seterrcall`.
Returns
-------
errobj : callable, log instance or None
The current error handler. If no handler was set through `seterrcall`,
``None`` is returned.
See Also
--------
seterrcall, seterr, geterr
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrcall() # we did not yet set a handler, returns None
>>> oldsettings = np.seterr(all='call')
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
>>> oldhandler = np.seterrcall(err_handler)
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> cur_handler = np.geterrcall()
>>> cur_handler is err_handler
True
"""
return umath.geterrobj()[2]
class _unspecified(object):
pass
_Unspecified = _unspecified()
class errstate(object):
"""
errstate(**kwargs)
Context manager for floating-point error handling.
Using an instance of `errstate` as a context manager allows statements in
that context to execute with a known error handling behavior. Upon entering
the context the error handling is set with `seterr` and `seterrcall`, and
upon exiting it is reset to what it was before.
Parameters
----------
kwargs : {divide, over, under, invalid}
Keyword arguments. The valid keywords are the possible floating-point
exceptions. Each keyword should have a string value that defines the
treatment for the particular error. Possible values are
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
See Also
--------
seterr, geterr, seterrcall, geterrcall
Notes
-----
The ``with`` statement was introduced in Python 2.5, and can only be used
there by importing it: ``from __future__ import with_statement``. In
earlier Python versions the ``with`` statement is not available.
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> from __future__ import with_statement # use 'with' in Python 2.5
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
>>> np.arange(3) / 0.
array([ NaN, Inf, Inf])
>>> with np.errstate(divide='warn'):
... np.arange(3) / 0.
...
__main__:2: RuntimeWarning: divide by zero encountered in divide
array([ NaN, Inf, Inf])
>>> np.sqrt(-1)
nan
>>> with np.errstate(invalid='raise'):
... np.sqrt(-1)
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
FloatingPointError: invalid value encountered in sqrt
Outside the context the error handling behavior has not changed:
>>> np.geterr()
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
"""
# Note that we don't want to run the above doctests because they will fail
# without a from __future__ import with_statement
def __init__(self, **kwargs):
self.call = kwargs.pop('call',_Unspecified)
self.kwargs = kwargs
def __enter__(self):
self.oldstate = seterr(**self.kwargs)
if self.call is not _Unspecified:
self.oldcall = seterrcall(self.call)
def __exit__(self, *exc_info):
seterr(**self.oldstate)
if self.call is not _Unspecified:
seterrcall(self.oldcall)
def _setdef():
defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT2, None]
umath.seterrobj(defval)
# set the default values
_setdef()
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
import fromnumeric
from fromnumeric import *
extend_all(fromnumeric)
| 28.45
| 83
| 0.573828
|
fc208edfc6e7384f66d605747a5e9c22d3602282
| 298
|
py
|
Python
|
src2/utils/torch_utils.py
|
LIV4D/RetinalSurgeryRecognition
|
06621e8078b4ee60ed77ba2db0d0c209429e90b8
|
[
"MIT"
] | null | null | null |
src2/utils/torch_utils.py
|
LIV4D/RetinalSurgeryRecognition
|
06621e8078b4ee60ed77ba2db0d0c209429e90b8
|
[
"MIT"
] | null | null | null |
src2/utils/torch_utils.py
|
LIV4D/RetinalSurgeryRecognition
|
06621e8078b4ee60ed77ba2db0d0c209429e90b8
|
[
"MIT"
] | null | null | null |
from torch import nn
class DataParallel(nn.DataParallel):
"""
Allow nn.DataParallel to call model's attributes.
"""
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
| 22.923077
| 53
| 0.627517
|
ea2428b4316bc70b25fabf26d12a83fb7d2b0f7e
| 1,535
|
py
|
Python
|
setup.py
|
zeroday0619/user_agent
|
03b0154d7019ddc8e4323a07f106a383cbd8af22
|
[
"MIT"
] | null | null | null |
setup.py
|
zeroday0619/user_agent
|
03b0154d7019ddc8e4323a07f106a383cbd8af22
|
[
"MIT"
] | null | null | null |
setup.py
|
zeroday0619/user_agent
|
03b0154d7019ddc8e4323a07f106a383cbd8af22
|
[
"MIT"
] | null | null | null |
import os
from setuptools import setup
ROOT = os.path.dirname(os.path.realpath(__file__))
setup(
# Meta data
name='user_agent',
version='0.1.9',
author="Gregory Petukhov",
author_email='lorien@lorien.name',
maintainer="Gregory Petukhov",
maintainer_email='lorien@lorien.name',
url='https://github.com/lorien/user_agent',
description='User-Agent generator',
long_description=open(os.path.join(ROOT, 'README.rst')).read(),
download_url='http://pypi.python.org/pypi/user_agent',
keywords="user agent browser navigator",
license="MIT License",
# Package files
packages=['user_agent'],
include_package_data=True,
install_requires=['six', 'requests'],
entry_points={
'console_scripts': [
'ua = user_agent.cli:script_ua',
],
},
# Topics
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'License :: OSI Approved :: MIT License',
#'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: WWW/HTTP',
],
)
| 31.979167
| 71
| 0.621498
|
629602a088ec1ac93de8071a6317ea21260e5298
| 1,305
|
py
|
Python
|
src/globus_cli/commands/task/generate_submission_id.py
|
sirosen/temp-cli-test
|
416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6
|
[
"Apache-2.0"
] | 47
|
2016-04-21T19:51:17.000Z
|
2022-02-25T14:13:30.000Z
|
src/globus_cli/commands/task/generate_submission_id.py
|
sirosen/temp-cli-test
|
416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6
|
[
"Apache-2.0"
] | 421
|
2016-04-20T18:45:24.000Z
|
2022-03-14T14:50:41.000Z
|
src/globus_cli/commands/task/generate_submission_id.py
|
sirosen/temp-cli-test
|
416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6
|
[
"Apache-2.0"
] | 20
|
2016-09-10T20:25:27.000Z
|
2021-10-06T16:02:47.000Z
|
from globus_cli.login_manager import LoginManager
from globus_cli.parsing import command
from globus_cli.termio import FORMAT_TEXT_RAW, formatted_print
@command(
"generate-submission-id",
short_help="Get a task submission ID",
adoc_output=(
"When text output is requested, the generated 'UUID' is the only output."
),
adoc_examples="""Submit a transfer, using a submission ID generated by this command:
[source,bash]
----
$ sub_id="$(globus task generate-submission-id)"
$ globus transfer --submission-id "$sub_id" ...
----
""",
)
@LoginManager.requires_login(LoginManager.TRANSFER_RS)
def generate_submission_id(*, login_manager: LoginManager):
"""
Generate a new task submission ID for use in `globus transfer` and `globus delete`.
Submission IDs allow you to safely retry submission of a task in the presence of
network errors. No matter how many times you submit a task with a given ID, it will
only be accepted and executed once. The response status may change between
submissions.
\b
Important Note: Submission IDs are not the same as Task IDs.
"""
transfer_client = login_manager.get_transfer_client()
res = transfer_client.get_submission_id()
formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="value")
| 35.27027
| 88
| 0.739464
|
aec7b94b2a09918ac03c0e5a036f63f3b7c85783
| 9,527
|
py
|
Python
|
flashexpander/src/flashexpander.py
|
Mariusz1970/enigma2-plugins-1
|
126d31d075c156f32b09d4321ebe1a17f93a5bd6
|
[
"OLDAP-2.3"
] | 2
|
2020-09-02T18:25:39.000Z
|
2020-09-02T18:39:07.000Z
|
flashexpander/src/flashexpander.py
|
Mariusz1970/enigma2-plugins-1
|
126d31d075c156f32b09d4321ebe1a17f93a5bd6
|
[
"OLDAP-2.3"
] | null | null | null |
flashexpander/src/flashexpander.py
|
Mariusz1970/enigma2-plugins-1
|
126d31d075c156f32b09d4321ebe1a17f93a5bd6
|
[
"OLDAP-2.3"
] | 11
|
2015-02-26T20:59:14.000Z
|
2021-09-20T08:23:03.000Z
|
# -*- coding: utf-8 -*-
# code by GeminiTeam
from Screens.Screen import Screen
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Components.ActionMap import ActionMap
from Components.MenuList import MenuList
from Components.Harddisk import harddiskmanager, Harddisk
from Components.Console import Console
from Tools.Directories import createDir
from Tools.BoundFunction import boundFunction
from locale import _
from enigma import quitMainloop
from os import system, listdir, path, statvfs, remove, popen as os_popen
import re
#Topfi begin
from subprocess import Popen, PIPE
#Topfi end
#from Plugins.Bp.geminimain.gTools import cleanexit
def getMountP():
try:
mounts = open("/proc/mounts")
except IOError:
return []
lines = mounts.readlines()
mounts.close()
return lines
def ismounted(dev,mp):
for x in getMountP():
parts = x.strip().split(" ")
#realpath = path.realpath(parts[0])
if len(parts)>1:
if parts[0] == dev or parts[1] == mp:
return parts[1]
return False
def getFreeSize(mp):
try:
stat = statvfs(mp)
return stat.f_bfree/1000 * stat.f_bsize/1000
except:
return 0
#-----------------------------------------------------------------------------
class FlashExpander(Screen):
skin = """<screen position="center,center" size="580,50" title="FlashExpander v0.33">
<widget name="list" position="5,5" size="570,40" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions"],
{
"cancel": self.Exit,
"ok": self.Ok
}, -1)
if ismounted("","/usr"):
self.__foundFE=True
list = [(_("... is used, %dMB free") % getFreeSize("/usr"))]
else:
self.__foundFE=False
list = [(_("FlashExpander is not installed, create? Press Key OK."))]
self["list"] = MenuList(list=list)
def Ok(self):
if self.__foundFE==False:
self.session.openWithCallback(self.__confCallback,FEconf)
def __confCallback(self,data):
if data==False:
self.Exit()
else:
self.close()
quitMainloop(2)
def Exit(self):
self.close()
#cleanexit(__name__)
#-----------------------------------------------------------------------------
class FEconf(Screen):
skin = """<screen position="center,center" size="640,160" title="%s">
<widget name="list" position="5,5" size="630,150" />
</screen>""" %(_("choose device to FlashExpander"))
def __init__(self, session):
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions"],
{
"cancel": self.Exit,
"ok": self.Ok
}, -1)
#Blocklaufwerke
list = []
for x in listdir("/sys/block"):
if x[0:2] == 'sd' or x[0:2] == 'hd':
print "[FlashExpander] device",x
devices = Harddisk(x)
for y in range(devices.numPartitions()):
fstype = self.__getPartitionType(devices.partitionPath(str(y+1)))
if fstype==False:
fstype = self.__getPartitionType(devices.partitionPath(str(y+1)))
try:
bustype = devices.bus_type()
except:
bustype = _("unknown")
if fstype in ("ext2","ext3","ext4","xfs"):
list.append(("%s (%s) - Partition %d (%s)" %(devices.model(), bustype, y+1, fstype),(devices, y+1, fstype)))
#Netzlaufwerke
try:
for x in getMountP():
entry = x.split(' ')
if len(entry)>3 and entry[2]=="nfs":
server = entry[0].split(':')
if len(server)==2:
print "[FlashExpander] server",server
list.append(("Server (%s) - Path (%s)" %(server[0], server[1]),server))
except:
print "[FlashExpander] <getMountPoints>"
if len(list)==0:
list.append((_("No HDD-, SSD- or USB-Device found. Please first initialized."),None))
self["list"] = MenuList(list=list)
self.Console = Console()
def Ok(self):
sel = self["list"].getCurrent()
if sel and sel[1]:
if len(sel[1])==3:#Device
tstr = _("Are you sure want to create FlashExpander on\n%s\nPartition %d") % (sel[1][0].model(), sel[1][1])
self.session.openWithCallback(boundFunction(self.__startFE_device,sel[1]), MessageBox, tstr)
if len(sel[1])==2:#Server
tstr = _("Are you sure want to create FlashExpander on \nServer: %s\nPath: %s") % (sel[1][0], sel[1][1])
self.session.openWithCallback(boundFunction(self.__startFE_server,sel[1]), MessageBox, tstr)
def __getPartitionType(self,device):
fstype = None
try:
if path.exists("/lib/udev/vol_id"):
val = os_popen("/lib/udev/vol_id --type " + device)
fstype = val.read().strip()
elif path.exists("/sbin/blkid"):
for line in os_popen("/sbin/blkid " + device).read().split('\n'):
if not line.startswith(device):
continue
fstobj = re.search(r' TYPE="((?:[^"\\]|\\.)*)"', line)
if fstobj:
fstype = fstobj.group(1)
except:
print "[FlashExpander] <error get fstype>"
return False
return fstype
def __getPartitionUUID(self,device):
try:
if path.exists("/dev/disk/by-uuid"):
for uuid in listdir("/dev/disk/by-uuid/"):
if not path.exists("/dev/disk/by-uuid/" + uuid):
return None
if path.realpath("/dev/disk/by-uuid/" + uuid) == device:
return "/dev/disk/by-uuid/" + uuid
else:
#Topfi begin (use more reliable UUID mount on boxes without /dev/disk/by-uuid)
p = Popen(["blkid", "-o", "udev", device], stdout=PIPE, stderr=PIPE, stdin=PIPE)
txtUUID = p.stdout.read()
start = txtUUID.find("ID_FS_UUID=")
if start > -1:
txtUUID=txtUUID[start+11:]
end = txtUUID.find("\n")
if end > -1:
txtUUID=txtUUID[:end]
return "UUID="+txtUUID
#Topfi end
return device
except:
print "[FlashExpander] <error get UUID>"
return None
def __startFE_device(self, val, result):
if result:
partitionPath = val[0].partitionPath(str(val[1]))
uuidPath = self.__getPartitionUUID(partitionPath)
fstype=val[2]
print "[FlashExpander]",partitionPath,uuidPath,fstype
if uuidPath == None:
self.session.open(MessageBox, _("read UUID"), MessageBox.TYPE_ERROR, timeout=5)
return
mountpoint = ismounted(uuidPath,"")
if mountpoint == False:
mountpoint = ismounted(partitionPath,"")
if mountpoint == False:
if self.__mount(uuidPath,"/media/FEtmp")==0:
mountpoint = "/media/FEtmp"
self.__copyFlash(mountpoint,(partitionPath,uuidPath,fstype))
#if self.__checkMountPoint(mountpoint):
# cmd = "rm -rf %s/* && cp -a /usr/* %s/" % (mountpoint, mountpoint)
# self.Console.ePopen(cmd, self.__CopyFinished)
# self.__message = self.session.openWithCallback(boundFunction(self.__EndCB,(partitionPath,uuidPath,fstype)), MessageBox, _("Please wait, Flash memory will be copied."), MessageBox.TYPE_INFO,enable_input=False)
def __startFE_server(self, val, result):
if result:
server = val[0]
path = val[1]
print "[FlashExpander]",server,path
mountpoint = ismounted("%s:%s" %(server,path),"")
self.__copyFlash(mountpoint,("%s:%s" %(server,path),None,"nfs"))
#if self.__checkMountPoint(mountpoint):
# cmd = "rm -rf %s/* && cp -a /usr/* %s/" % (mountpoint, mountpoint)
# self.Console.ePopen(cmd, self.__CopyFinished)
# self.__message = self.session.openWithCallback(boundFunction(self.__EndCB,("%s:%s" %(server,path),None,"nfs")), MessageBox, _("Please wait, Flash memory will be copied."), MessageBox.TYPE_INFO,enable_input=False)
def __copyFlash(self,mp,data):
if self.__checkMountPoint(mp):
cmd = "cp -af /usr/* %s/" % (mp)
self.Console.ePopen(cmd, self.__CopyFinished)
self.__message = self.session.openWithCallback(boundFunction(self.__EndCB,data), MessageBox, _("Please wait, Flash memory will be copied."), MessageBox.TYPE_INFO,enable_input=False)
def __mount(self,dev,mp):
if path.exists(mp)==False:
createDir(mp,True)
cmd = "mount " + dev + " " + mp
#print "[FlashExpander]",cmd
res = system(cmd)
return (res >> 8)
def __checkMountPoint(self,mp):
if mp == False:
self.session.open(MessageBox, _("Mount failed (%s)") %fstype, MessageBox.TYPE_ERROR, timeout=5)
return False
if getFreeSize(mp)<180:
self.session.open(MessageBox, _("Too little free space < 180MB or wrong Filesystem!"), MessageBox.TYPE_ERROR, timeout=5)
return False
return True
def __CopyFinished(self, result, retval, extra_args = None):
if retval==0:
self.__message.close(True)
else:
self.__message.close(False)
def __EndCB(self,val,retval):
if retval==True:
try:
devPath = val[0]
uuidPath = val[1]
fstype = val[2]
#fstab editieren
mounts = file('/etc/fstab').read().split('\n')
newlines = []
for x in mounts:
if x.startswith(devPath) or x.startswith("/dev/hdc1"):#/dev/hdc1 wegen 7025+
continue
if uuidPath and x.startswith(uuidPath):
continue
if len(x)>1 and x[0]!='#':
newlines.append(x)
if fstype=="nfs":
newlines.append("%s\t/usr\t%s\trw,nolock,timeo=14,intr\t0 0" %(devPath, fstype))
else:
newlines.append("%s\t/usr\tauto\tdefaults\t0 0" %(uuidPath))
fp = file("/etc/fstab", 'w')
fp.write("#automatically edited by FlashExpander\n")
for x in newlines:
fp.write(x + "\n")
fp.close()
print "[FlashExpander] write new /etc/fstab"
self.session.openWithCallback(self.Exit, MessageBox, _("Do you want to reboot your STB_BOX?"))
except:
self.session.open(MessageBox, _("error adding fstab entry for: %s") % (devPath), MessageBox.TYPE_ERROR, timeout=5)
return
else:
self.session.open(MessageBox, _("error copy flash memory"), MessageBox.TYPE_ERROR, timeout=10)
def Exit(self,data=False):
self.close(data)
| 31.546358
| 217
| 0.653826
|
e62a0759b9120de8114c5bec45f44eeea2c41e3c
| 5,518
|
py
|
Python
|
Tests/test_SCOP_Scop.py
|
erpeg/biopython
|
296b6b451ce7161fdace2fd36d0817722491d733
|
[
"BSD-3-Clause"
] | 2
|
2020-06-25T12:52:03.000Z
|
2020-07-11T09:47:34.000Z
|
Tests/test_SCOP_Scop.py
|
cosign070128/biopython
|
2f02e34ba76306e9c27eec9e051809bec2cece9b
|
[
"BSD-3-Clause"
] | 9
|
2020-05-05T00:54:23.000Z
|
2020-06-09T17:10:45.000Z
|
Tests/test_SCOP_Scop.py
|
cosign070128/biopython
|
2f02e34ba76306e9c27eec9e051809bec2cece9b
|
[
"BSD-3-Clause"
] | 3
|
2020-06-29T13:07:46.000Z
|
2021-06-14T20:11:55.000Z
|
# Copyright 2001 by Gavin E. Crooks. All rights reserved.
# Modifications Copyright 2010 Jeffrey Finkelstein. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Unit test for Scop."""
import unittest
from io import StringIO
from Bio.SCOP import Scop, cmp_sccs, parse_domain
class ScopTests(unittest.TestCase):
def _compare_cla_lines(self, cla_line_1, cla_line_2):
"""Compare the two specified Cla lines for equality.
The order of the key-value pairs in the sixth field of the lines does
not matter. For more information, see
http://scop.mrc-lmb.cam.ac.uk/scop/release-notes.html.
"""
fields1 = cla_line_1.rstrip().split("\t")
fields2 = cla_line_2.rstrip().split("\t")
print(fields1)
print(fields2)
# compare the first five fields in a Cla line, which should be exactly
# the same
if fields1[:5] != fields2[:5]:
return False
# compare the hierarchy key-value pairs, which are unordered
if set(fields1[5].split(",")) != set(fields2[5].split(",")):
return False
return True
def testParse(self):
with open("./SCOP/dir.cla.scop.txt_test") as f:
cla = f.read()
with open("./SCOP/dir.des.scop.txt_test") as f:
des = f.read()
with open("./SCOP/dir.hie.scop.txt_test") as f:
hie = f.read()
scop = Scop(StringIO(cla), StringIO(des), StringIO(hie))
cla_out = StringIO()
scop.write_cla(cla_out)
lines = zip(cla.rstrip().split("\n"), cla_out.getvalue().rstrip().split("\n"))
for expected_line, line in lines:
self.assertTrue(self._compare_cla_lines(expected_line, line))
des_out = StringIO()
scop.write_des(des_out)
self.assertEqual(des_out.getvalue(), des)
hie_out = StringIO()
scop.write_hie(hie_out)
self.assertEqual(hie_out.getvalue(), hie)
domain = scop.getDomainBySid("d1hbia_")
self.assertEqual(domain.sunid, 14996)
domains = scop.getDomains()
self.assertEqual(len(domains), 14)
self.assertEqual(domains[4].sunid, 14988)
dom = scop.getNodeBySunid(-111)
self.assertEqual(dom, None)
dom = scop.getDomainBySid("no such domain")
self.assertEqual(dom, None)
def testSccsOrder(self):
self.assertEqual(cmp_sccs("a.1.1.1", "a.1.1.1"), 0)
self.assertEqual(cmp_sccs("a.1.1.2", "a.1.1.1"), 1)
self.assertEqual(cmp_sccs("a.1.1.2", "a.1.1.11"), -1)
self.assertEqual(cmp_sccs("a.1.2.2", "a.1.1.11"), 1)
self.assertEqual(cmp_sccs("a.1.2.2", "a.5.1.11"), -1)
self.assertEqual(cmp_sccs("b.1.2.2", "a.5.1.11"), 1)
self.assertEqual(cmp_sccs("b.1.2.2", "b.1.2"), 1)
def testParseDomain(self):
s = ">d1tpt_1 a.46.2.1 (1-70) Thymidine phosphorylase {Escherichia coli}"
dom = parse_domain(s)
self.assertEqual(dom.sid, "d1tpt_1")
self.assertEqual(dom.sccs, "a.46.2.1")
self.assertEqual(dom.residues.pdbid, "1tpt")
self.assertEqual(dom.description, "Thymidine phosphorylase {Escherichia coli}")
s2 = "d1tpt_1 a.46.2.1 (1tpt 1-70) Thymidine phosphorylase {E. coli}"
self.assertEqual(s2, str(parse_domain(s2)))
# Genetic domains (See Astral release notes)
s3 = "g1cph.1 g.1.1.1 (1cph B:,A:) Insulin {Cow (Bos taurus)}"
self.assertEqual(s3, str(parse_domain(s3)))
s4 = "e1cph.1a g.1.1.1 (1cph A:) Insulin {Cow (Bos taurus)}"
self.assertEqual(s4, str(parse_domain(s4)))
# Raw Astral header
s5 = ">e1cph.1a g.1.1.1 (A:) Insulin {Cow (Bos taurus)}"
self.assertEqual(s4, str(parse_domain(s5)))
self.assertRaises(ValueError, parse_domain, "Totally wrong")
def testConstructFromDirectory(self):
scop = Scop(dir_path="SCOP", version="test")
self.assertIsInstance(scop, Scop)
domain = scop.getDomainBySid("d1hbia_")
self.assertEqual(domain.sunid, 14996)
def testGetAscendent(self):
scop = Scop(dir_path="SCOP", version="test")
domain = scop.getDomainBySid("d1hbia_")
# get the fold
fold = domain.getAscendent("cf")
self.assertEqual(fold.sunid, 46457)
# get the superfamily
sf = domain.getAscendent("superfamily")
self.assertEqual(sf.sunid, 46458)
# px has no px ascendent
px = domain.getAscendent("px")
self.assertEqual(px, None)
# an sf has no px ascendent
px2 = sf.getAscendent("px")
self.assertEqual(px2, None)
def test_get_descendents(self):
"""Test getDescendents method."""
scop = Scop(dir_path="SCOP", version="test")
fold = scop.getNodeBySunid(46457)
# get px descendents
domains = fold.getDescendents("px")
self.assertEqual(len(domains), 14)
for d in domains:
self.assertEqual(d.type, "px")
sfs = fold.getDescendents("superfamily")
self.assertEqual(len(sfs), 1)
for d in sfs:
self.assertEqual(d.type, "sf")
# cl has no cl descendent
cl = fold.getDescendents("cl")
self.assertEqual(cl, [])
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 34.704403
| 87
| 0.615259
|
cacc5f492b569659f7f73e4b9ea1de827e9b0134
| 1,721
|
py
|
Python
|
tests/test_qft.py
|
AGaliciaMartinez/qutip-qip
|
73411ae884d117f05ff0ecb920ca055940fc76dd
|
[
"BSD-3-Clause"
] | 36
|
2020-05-22T10:51:13.000Z
|
2022-03-07T05:41:08.000Z
|
tests/test_qft.py
|
AGaliciaMartinez/qutip-qip
|
73411ae884d117f05ff0ecb920ca055940fc76dd
|
[
"BSD-3-Clause"
] | 73
|
2020-07-14T07:26:48.000Z
|
2022-03-25T08:00:43.000Z
|
tests/test_qft.py
|
AGaliciaMartinez/qutip-qip
|
73411ae884d117f05ff0ecb920ca055940fc76dd
|
[
"BSD-3-Clause"
] | 24
|
2020-06-18T22:59:20.000Z
|
2022-03-12T05:11:59.000Z
|
from numpy.testing import assert_, assert_equal, assert_string_equal, run_module_suite
from qutip_qip.algorithms.qft import qft, qft_steps, qft_gate_sequence
from qutip_qip.operations import gate_sequence_product
class TestQFT:
"""
A test class for the QuTiP functions for QFT
"""
def testQFTComparison(self):
"""
qft: compare qft and product of qft steps
"""
for N in range(1, 5):
U1 = qft(N)
U2 = gate_sequence_product(qft_steps(N))
assert_((U1 - U2).norm() < 1e-12)
def testQFTGateSequenceNoSwapping(self):
"""
qft: Inspect key properties of gate sequences of length N,
with swapping disabled.
"""
for N in range(1, 6):
circuit = qft_gate_sequence(N, swapping=False)
assert_equal(circuit.N, N)
totsize = N * (N + 1) / 2
assert_equal(len(circuit.gates), totsize)
snots = sum(g.name == "SNOT" for g in circuit.gates)
assert_equal(snots, N)
phases = sum(g.name == "CPHASE" for g in circuit.gates)
assert_equal(phases, N * (N - 1) / 2)
def testQFTGateSequenceWithSwapping(self):
"""
qft: Inspect swap gates added to gate sequences if
swapping is enabled.
"""
for N in range(1, 6):
circuit = qft_gate_sequence(N, swapping=True)
phases = int(N * (N + 1) / 2)
swaps = int(N // 2)
assert_equal(len(circuit.gates), phases + swaps)
for i in range(phases, phases + swaps):
assert_string_equal(circuit.gates[i].name, "SWAP")
if __name__ == "__main__":
run_module_suite()
| 31.290909
| 86
| 0.583963
|
29a2fe16a4e4e250675dc31fa497a6e6dcfa7422
| 1,352
|
py
|
Python
|
data/p2DJ/New/program/pyquil/startPyquil383.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/program/pyquil/startPyquil383.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/program/pyquil/startPyquil383.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=2
# total number=20
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=8
prog += CZ(0,1) # number=9
prog += H(1) # number=10
prog += H(1) # number=11
prog += CZ(0,1) # number=12
prog += H(1) # number=13
prog += CNOT(0,1) # number=7
prog += CNOT(1,0) # number=14
prog += CNOT(1,0) # number=17
prog += X(0) # number=18
prog += CNOT(1,0) # number=19
prog += CNOT(1,0) # number=16
prog += Y(1) # number=6
prog += X(0) # number=4
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil383.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 23.310345
| 64
| 0.590976
|
5f128179b1655b7d231a72c72acfd828e5e20904
| 893
|
py
|
Python
|
src/syft/ast/util.py
|
chinmayshah99/PySyft
|
c26c7c9478df37da7d0327a67a5987c2dfd91cbe
|
[
"MIT"
] | 1
|
2020-12-22T17:22:13.000Z
|
2020-12-22T17:22:13.000Z
|
src/syft/ast/util.py
|
chinmayshah99/PySyft
|
c26c7c9478df37da7d0327a67a5987c2dfd91cbe
|
[
"MIT"
] | null | null | null |
src/syft/ast/util.py
|
chinmayshah99/PySyft
|
c26c7c9478df37da7d0327a67a5987c2dfd91cbe
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This is a skeleton file that can serve as a starting point for a Python
console script. To run this script uncomment the following lines in the
[options.entry_points] section in setup.cfg:
console_scripts =
fibonacci = syft.skeleton:run
Then run `python setup.py install` which will install the command `fibonacci`
inside your current environment.
Besides console scripts, the header (i.e. until _logger...) of this file can
also be used as template for Python modules.
Note: This skeleton file can be safely removed if not needed!
"""
# stdlib
from typing import List as TypeList
# third party
import torch
module_type = type(torch)
func_type = type(lambda x: x)
builtin_func_type = type(torch.ones)
class_type = type(func_type)
def unsplit(list_of_things: TypeList[str], separator: str = ".") -> str:
return separator.join(list_of_things)
| 28.806452
| 77
| 0.74692
|
28e95173f9ce666869a5ebc79ce293ebf114d125
| 13,669
|
py
|
Python
|
py3status/modules/lm_sensors.py
|
cstruck/py3status
|
102ad92d54226ad874e8792827b6f91f7346f9f1
|
[
"BSD-3-Clause"
] | null | null | null |
py3status/modules/lm_sensors.py
|
cstruck/py3status
|
102ad92d54226ad874e8792827b6f91f7346f9f1
|
[
"BSD-3-Clause"
] | null | null | null |
py3status/modules/lm_sensors.py
|
cstruck/py3status
|
102ad92d54226ad874e8792827b6f91f7346f9f1
|
[
"BSD-3-Clause"
] | null | null | null |
r"""
Display temperatures, voltages, fans, and more from hardware sensors.
Configuration parameters:
cache_timeout: refresh interval for this module (default 10)
chips: specify a list of chips to use (default [])
format: display format for this module (default '{format_chip}')
format_chip: display format for chips (default '{name} {format_sensor}')
format_chip_separator: show separator if more than one (default ' ')
format_sensor: display format for sensors
(default '[\?color=darkgray {name}] [\?color=auto.input&show {input}]')
format_sensor_separator: show separator if more than one (default ' ')
sensors: specify a list of sensors to use (default [])
thresholds: specify color thresholds to use (default {'auto.input': True})
Format placeholders:
{format_chip} format for chips
Format_chip placeholders:
{name} chip name, eg coretemp-isa-0000, nouveau-pci-0500
{adapter} adapter type, eg ISA adapter, PCI adapter
{format_sensor} format for sensors
Format_sensor placeholders:
{name} sensor name, eg core_0, gpu_core, temp1, fan1
See `sensors -u` for a full list of placeholders for `format_chip`,
`format_sensors` without the prefixes, `chips` and `sensors` options.
See https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface
for more information on the sensor placeholders.
Color options for `auto.input` threshold:
color_zero: zero value or less (color red)
color_min: minimum value (color lightgreen)
color_excl_input: input value excluded from threshold (color None)
color_input: input value (color lime)
color_near_max: input value near maximum value (color yellow)
color_max: maximum value (color orange)
color_near_crit: input value near critical value (color lightcoral)
color_crit: critical value (color red)
Color thresholds:
format_sensor:
xxx: print a color based on the value of `xxx` placeholder
auto.input: print a color based on the value of `input` placeholder
against a customized threshold
Requires:
lm_sensors: a tool to read temperature/voltage/fan sensors
sensors-detect: see `man sensors-detect # --auto` to read about
using defaults or to compile a list of kernel modules
Examples:
```
# identify possible chips, sensors, placeholders, etc
[user@py3status ~] $ sensors -u
----------------------------- # ──────────────────────────────────────
coretemp-isa-0000 # chip {name} # chip: coretemp*
Adapter: ISA adapter # ├── {adapter} type
---- # │------------------------------------
Core 0: # ├── sensor {name} # sensor: core_0
temp2_input: 48.000 # │ ├── {input}
temp2_max: 81.000 # │ ├── {max}
temp2_crit: 91.000 # │ ├── {crit}
temp2_crit_alarm: 0.000 # │ └── {crit_alarm}
Core 1: # └── sensor {name} # sensor: core_1
temp3_input: 48.000 # ├── {input}
temp3_max: 81.000 # ├── {max}
temp3_crit: 91.000 # ├── {crit}
temp3_crit_alarm: 0.000 # └── {crit_alarm}
# ──────────────────────────────────────
k10temp-pci-00c3 # chip {name} # chip: k10temp*
Adapter: PCI adapter # ├── {adapter} type
---- # │------------------------------------
temp1: # ├── sensor {name} # sensor: temp1
temp1_input: 30.000 # │ ├── {input}
temp1_max: -71.000 # │ ├── {max}
temp1_min: -15.000 # │ ├── {min}
temp1_alarm: 1.000 # │ ├── {alarm}
temp1_offset: 0.000 # │ ├── {offset}
temp1_beep: 0.000 # │ └── {beep}
intrusion0: # └── sensor {name} # sensor: intrusion0
intrusion0_alarm: 0.000 # └── {alarm}
Solid lines denotes chips. Dashed lines denotes sensors.
Sensor names are lowercased and its spaces replaced with underscores.
The numbered prefixes, eg `temp1_*` are removed to keep names clean.
# specify chips to use
lm_sensors {
chips = ['coretemp-isa-0000'] # full
OR
chips = ['coretemp*'] # fnmatch
}
# specify sensors to use
lm_sensors {
sensors = ['core_0', 'core_1', 'core_2', 'core_3'] # full
OR
sensors = ['core_*'] # fnmatch
}
# show name per chip, eg CPU 35°C 36°C 37°C 39°C GPU 52°C
lm_sensors {
format_chip = '[\?if=name=coretemp-isa-0000 CPU ]'
format_chip += '[\?if=name=nouveau-pci-0500 GPU ]'
format_chip += '{format_sensor}'
format_sensor = '\?color=auto.input {input}°C'
sensors = ['core*', 'temp*']
}
# show name per sensor, eg CPU1 35°C CPU2 36°C CPU3 37°C CPU4 39°C GPU 52°C
lm_sensors {
format_chip = '{format_sensor}'
format_sensor = '[\?if=name=core_0 CPU1 ]'
format_sensor += '[\?if=name=core_1 CPU2 ]'
format_sensor += '[\?if=name=core_2 CPU3 ]'
format_sensor += '[\?if=name=core_3 CPU4 ]'
format_sensor += '[\?if=name=gpu_core GPU ]'
format_sensor += '[\?color=auto.input {input}°C]'
sensors = ['core*', 'temp*']
}
```
@author lasers
SAMPLE OUTPUT
[
{'full_text': 'coretemp-isa-000 '},
{'full_text': 'core_0 ', 'color': '#a9a9a9'},
{'full_text': '39 ', 'color': '#00ff00'},
{'full_text': 'core_1 ', 'color': '#a9a9a9'},
{'full_text': '40', 'color': '#00ff00'},
]
chip_names
[
{'full_text': 'CPU '},
{'full_text': '62°C ', 'color': '#00ff00'},
{'full_text': '76°C ', 'color': '#ffff00'},
{'full_text': '83°C ', 'color': '#ffa500'},
{'full_text': '92°C ', 'color': '#ff0000'},
{'full_text': 'GPU '},
{'full_text': '52°C', 'color': '#00ff00'},
]
sensor_names
[
{'full_text': 'CPU1 '},
{'full_text': '62°C ', 'color': '#00ff00'},
{'full_text': 'CPU2 '},
{'full_text': '76°C ', 'color': '#ffff00'},
{'full_text': 'TEMP1 '},
{'full_text': '30 ', 'color': '#ffa500'},
{'full_text': 'TEMP2 '},
{'full_text': '27 ', 'color': '#ffa500'},
{'full_text': 'GPU '},
{'full_text': '52°C', 'color': '#00ff00'},
]
"""
from fnmatch import fnmatch
from collections import OrderedDict
STRING_NOT_INSTALLED = "not installed"
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 10
chips = []
format = "{format_chip}"
format_chip = "{name} {format_sensor}"
format_chip_separator = " "
format_sensor = r"[\?color=darkgray {name}] [\?color=auto.input&show {input}]"
format_sensor_separator = " "
sensors = []
thresholds = {"auto.input": True}
def post_config_hook(self):
if not self.py3.check_commands("sensors"):
raise Exception(STRING_NOT_INSTALLED)
placeholders = self.py3.get_placeholders_list(self.format_sensor)
format_sensor = {x: ":g" for x in placeholders if x != "name"}
self.sensor_placeholders = [x for x in placeholders if x != "name"]
self.format_sensor = self.py3.update_placeholder_formats(
self.format_sensor, format_sensor
)
self.first_run = True
self.lm_sensors_command = "sensors -u"
if not self.py3.format_contains(self.format_chip, "adapter"):
self.lm_sensors_command += "A" # don't print adapters
if self.chips:
lm_sensors_data = self._get_lm_sensors_data()
chips = []
for _filter in self.chips:
for chunk in lm_sensors_data.split("\n\n")[:-1]:
for line in chunk.splitlines():
if fnmatch(line, _filter):
chips.append(line)
break
self.lm_sensors_command += " {}".format(" ".join(chips))
self.sensors = {"list": [], "name": {}, "sensors": self.sensors}
self.thresholds_auto = False
self.thresholds_man = self.py3.get_color_names_list(self.format_sensor)
if (
all("auto.input" in x for x in [self.thresholds, self.thresholds_man])
and "input" in placeholders
):
self.color_zero = self.py3.COLOR_ZERO or "red"
self.color_input = self.py3.COLOR_INPUT or "lime"
self.color_min = self.py3.COLOR_MIN or "lightgreen"
self.color_excl_input = self.py3.COLOR_EXCL_INPUT or None
self.color_near_max = self.py3.COLOR_NEAR_MAX or "yellow"
self.color_max = self.py3.COLOR_MAX or "orange"
self.color_near_crit = self.py3.COLOR_NEAR_CRIT or "lightcoral"
self.color_crit = self.py3.COLOR_CRIT or "red"
self.thresholds_auto = self.thresholds["auto.input"]
del self.thresholds["auto.input"]
if "auto.input" in self.thresholds_man:
self.thresholds_man.remove("auto.input")
def _get_lm_sensors_data(self):
return self.py3.command_output(self.lm_sensors_command)
def lm_sensors(self):
lm_sensors_data = self._get_lm_sensors_data()
new_chip = []
for chunk in lm_sensors_data.split("\n\n")[:-1]:
chip = {"sensors": OrderedDict()}
first_line = True
sensor_name = None
new_sensor = []
for line in chunk.splitlines():
if line.startswith(" "):
if not sensor_name:
continue
key, value = line.split(": ")
prefix, key = key.split("_", 1)
chip["sensors"][sensor_name][key] = value
elif first_line:
chip["name"] = line
first_line = False
elif "Adapter:" in line:
chip["adapter"] = line[9:]
else:
try:
sensor_name = self.sensors["name"][line]
except KeyError:
sensor_name = line[:-1].lower().replace(" ", "_")
self.sensors["name"][line] = sensor_name
if self.sensors["sensors"]:
if self.first_run:
for _filter in self.sensors["sensors"]:
if fnmatch(sensor_name, _filter):
self.sensors["list"].append(sensor_name)
if sensor_name not in self.sensors["list"]:
sensor_name = None
continue
chip["sensors"][sensor_name] = {}
for name, sensor in chip["sensors"].items():
sensor["name"] = name
for x in self.thresholds_man:
if x in sensor:
self.py3.threshold_get_color(sensor[x], x)
if self.thresholds_auto:
auto_input = []
_input = sensor.get("input")
if self.first_run and _input is not None:
_input = float(_input)
_min = float(sensor.get("min", 0))
_max = float(sensor.get("max", 0))
_crit = float(sensor.get("crit", 0))
auto_input.append((0, self.color_zero))
if _min or _max or _crit:
_color_input = self.color_input
else:
_color_input = self.color_excl_input
auto_input.append((0.001, _color_input))
if _min >= _input:
auto_input.append((_min, self.color_min))
if _max:
_near_max = _max - _max / 100 * 10
auto_input.append((_near_max, self.color_near_max))
auto_input.append((_max, self.color_max))
if _crit:
_near_crit = _crit - _crit / 100 * 10
auto_input.append((_near_crit, self.color_near_crit))
auto_input.append((_crit, self.color_crit))
key = "{}/{}".format(chip["name"], sensor["name"])
self.py3.threshold_get_color(
_input, ("auto.input", key, auto_input)
)
for x in self.sensor_placeholders:
if x not in sensor:
sensor[x] = None
new_sensor.append(self.py3.safe_format(self.format_sensor, sensor))
format_sensor_separator = self.py3.safe_format(self.format_sensor_separator)
format_sensor = self.py3.composite_join(format_sensor_separator, new_sensor)
chip["format_sensor"] = format_sensor
del chip["sensors"]
new_chip.append(self.py3.safe_format(self.format_chip, chip))
format_chip_separator = self.py3.safe_format(self.format_chip_separator)
format_chip = self.py3.composite_join(format_chip_separator, new_chip)
self.first_run = False
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(
self.format, {"format_chip": format_chip}
),
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| 39.62029
| 88
| 0.543639
|
2c29c84a7fa18ade1c9b21879610b016c84aaaea
| 575
|
py
|
Python
|
apps/predict/src/dsp.py
|
kikei/btc-bot-ai
|
cb118fa1809ebef472a2025be697c9050e948009
|
[
"Apache-2.0"
] | 1
|
2020-02-02T13:53:21.000Z
|
2020-02-02T13:53:21.000Z
|
apps/predict/src/dsp.py
|
kikei/btc-bot-ai
|
cb118fa1809ebef472a2025be697c9050e948009
|
[
"Apache-2.0"
] | null | null | null |
apps/predict/src/dsp.py
|
kikei/btc-bot-ai
|
cb118fa1809ebef472a2025be697c9050e948009
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
def lpfilter(size):
return np.full(size, 1.0 / size)
def crosszero(v, thres=0., ud=+1.0, du=-1.0):
w = np.zeros(v.shape)
iud = idu = None
for i in range(1, len(v)):
if v[i-1] > 0. > v[i]:
if -thres > v[i]:
w[i] = du
else:
idu = i
elif v[i-1] > -thres > v[i] and idu is not None:
w[idu] = du
idu = None
elif v[i-1] < 0. < v[i]:
if thres < v[i]:
w[i] = ud
else:
iud = i
elif v[i-1] < thres < v[i] and iud is not None:
w[iud] = ud
iud = None
return w
| 21.296296
| 52
| 0.469565
|
b5b35b653a2ace33dcf5b3e05131d4f3a6af1531
| 1,011
|
py
|
Python
|
betty/cropper/storage.py
|
theonion/betty-cropper
|
bb0e570c1eb0ddb2f39d109f996edd1d417d1fe4
|
[
"MIT"
] | 14
|
2015-01-13T21:24:30.000Z
|
2020-12-18T23:39:35.000Z
|
betty/cropper/storage.py
|
theonion/betty-cropper
|
bb0e570c1eb0ddb2f39d109f996edd1d417d1fe4
|
[
"MIT"
] | 39
|
2015-06-08T20:56:35.000Z
|
2017-06-09T03:20:08.000Z
|
betty/cropper/storage.py
|
theonion/betty-cropper
|
bb0e570c1eb0ddb2f39d109f996edd1d417d1fe4
|
[
"MIT"
] | 5
|
2015-12-04T20:22:08.000Z
|
2018-03-05T19:17:43.000Z
|
from storages.backends.s3boto import S3BotoStorage
from betty.conf.app import settings
logger = __import__('logging').getLogger(__name__)
class MigratedS3BotoStorage(S3BotoStorage):
"""Workaround for allowing using 2 different storage systems in parallel during migration to S3
storage.
Use this storage intead of S3BotoStorage to allow easy re-wiring of path locations from
filesystem to S3-based.
Required Settings:
BETTY_STORAGE_MIGRATION_OLD_ROOT - Old localfilesystem root directory
BETTY_STORAGE_MIGRATION_NEW_ROOT - S3 key root
"""
def _clean_name(self, name):
if name.startswith(settings.BETTY_STORAGE_MIGRATION_OLD_ROOT):
old_name = name
name = (settings.BETTY_STORAGE_MIGRATION_NEW_ROOT +
name[len(settings.BETTY_STORAGE_MIGRATION_OLD_ROOT):])
logger.info('Remap name: %s --> %s', old_name, name)
return super(MigratedS3BotoStorage, self)._clean_name(name)
| 36.107143
| 99
| 0.709199
|
41c3b5c6d3a6b00d82718308dbf5ba1d4904e8d9
| 4,794
|
py
|
Python
|
dnnv/nn/operations/patterns.py
|
samysweb/dnnv
|
58fb95b7300914d9da28eed86c39eca473b1aaef
|
[
"MIT"
] | 5
|
2022-01-28T20:30:34.000Z
|
2022-03-17T09:26:52.000Z
|
dnnv/nn/operations/patterns.py
|
samysweb/dnnv
|
58fb95b7300914d9da28eed86c39eca473b1aaef
|
[
"MIT"
] | 9
|
2022-01-27T03:50:28.000Z
|
2022-02-08T18:42:17.000Z
|
dnnv/nn/operations/patterns.py
|
samysweb/dnnv
|
58fb95b7300914d9da28eed86c39eca473b1aaef
|
[
"MIT"
] | 2
|
2022-02-03T17:32:43.000Z
|
2022-03-24T16:38:49.000Z
|
from abc import ABC, abstractmethod
from typing import Optional, Sequence, Union, TYPE_CHECKING
if TYPE_CHECKING:
from .base import Op, Operation
OpPatternType = Union["Op", "OperationPattern"]
class OperationPattern(ABC):
@abstractmethod
def match(self, operations: Sequence["Operation"]):
raise NotImplementedError()
def __and__(self, other: Optional[OpPatternType]) -> "Parallel":
return Parallel(self, other)
def __rand__(self, other: Optional[OpPatternType]) -> "Parallel":
return Parallel(other, self)
def __or__(self, other: Optional[OpPatternType]) -> "Or":
return Or(self, other)
def __ror__(self, other: Optional[OpPatternType]) -> "Or":
return Or(other, self)
def __rshift__(self, other: OpPatternType) -> "Sequential":
return Sequential(self, other)
def __rrshift__(self, other: OpPatternType) -> "Sequential":
return Sequential(other, self)
class Or(OperationPattern):
def __init__(self, *patterns: Optional[OpPatternType]):
self.patterns = set(patterns)
def __str__(self):
result_str = " | ".join(str(p) for p in self.patterns)
return f"({result_str})"
def __or__(self, other):
if other is not None and not isinstance(other, OperationPattern):
return NotImplemented
if isinstance(other, Or):
return Or(*self.patterns.union(other.patterns))
return Or(*self.patterns.union([other]))
def __ror__(self, other):
if other is not None and not isinstance(other, OperationPattern):
return NotImplemented
return Or(*self.patterns.union([other]))
def match(self, operations):
optional = False
for pattern in self.patterns:
if pattern is None:
optional = True
continue
for match in pattern.match(operations):
yield match
if optional:
yield operations
class Parallel(OperationPattern):
def __init__(self, *patterns: Optional[OpPatternType]):
self.patterns = patterns
def __str__(self):
result_str = " & ".join(str(p) for p in self.patterns)
return f"({result_str})"
def __and__(self, other):
if other is not None and not isinstance(other, OperationPattern):
return NotImplemented
if isinstance(other, Parallel):
return Parallel(*(self.patterns + other.patterns))
return Parallel(*(self.patterns + (other,)))
def __rand__(self, other):
if other is not None and not isinstance(other, OperationPattern):
return NotImplemented
return Parallel(*((other,) + self.patterns))
def match(self, operations):
if len(operations) != len(self.patterns):
return
matches = [[]]
for pattern, operation in zip(self.patterns, operations):
if pattern is None:
for match in matches:
match.append(operation)
continue
new_matches = []
for new_match in pattern.match([operation]):
for match in matches:
new_matches.append(match + new_match)
matches = new_matches
for match in matches:
match_set = set(match)
if len(match_set) == 1:
yield list(match_set)
elif len(match_set) == len(match):
yield match
else:
raise AssertionError(
"Unexpected error: Parallel match was not length 1 or N"
) # impossible?
class Sequential(OperationPattern):
def __init__(self, *patterns: OpPatternType):
self.patterns = patterns
def __str__(self):
result_str = " >> ".join(str(p) for p in self.patterns)
return f"({result_str})"
def __rshift__(self, other):
if not isinstance(other, OperationPattern):
return NotImplemented
if isinstance(other, Sequential):
return Sequential(*(self.patterns + other.patterns))
return Sequential(*(self.patterns + (other,)))
def __rrshift__(self, other):
if not isinstance(other, OperationPattern):
return NotImplemented
return Sequential(*((other,) + self.patterns))
def match(self, operations):
next_operations = [operations]
for pattern in reversed(self.patterns):
matches = []
for ops in next_operations:
for match in pattern.match(ops):
matches.append(match)
next_operations = matches
for match in next_operations:
yield match
__all__ = ["OperationPattern", "Or", "Parallel", "Sequential"]
| 33.291667
| 76
| 0.605966
|
25ff8e3528609950271e7509c2f7821beadf4143
| 31,917
|
py
|
Python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2018_03_01/operations/_policy_set_definitions_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 1
|
2021-06-02T08:01:35.000Z
|
2021-06-02T08:01:35.000Z
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2018_03_01/operations/_policy_set_definitions_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2018_03_01/operations/_policy_set_definitions_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class PolicySetDefinitionsOperations(object):
"""PolicySetDefinitionsOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version to use for the operation. Constant value: "2018-03-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-03-01"
self.config = config
def create_or_update(
self, policy_set_definition_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a policy set definition.
This operation creates or updates a policy set definition in the given
subscription with the given name.
:param policy_set_definition_name: The name of the policy set
definition to create.
:type policy_set_definition_name: str
:param parameters: The policy set definition properties.
:type parameters:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicySetDefinition
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicySetDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicySetDefinition or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.resource.policy.v2018_03_01.models.ErrorResponseException>`
"""
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'policySetDefinitionName': self._serialize.url("policy_set_definition_name", policy_set_definition_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PolicySetDefinition')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicySetDefinition', response)
if response.status_code == 201:
deserialized = self._deserialize('PolicySetDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policySetDefinitions/{policySetDefinitionName}'}
def delete(
self, policy_set_definition_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a policy set definition.
This operation deletes the policy set definition in the given
subscription with the given name.
:param policy_set_definition_name: The name of the policy set
definition to delete.
:type policy_set_definition_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.resource.policy.v2018_03_01.models.ErrorResponseException>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'policySetDefinitionName': self._serialize.url("policy_set_definition_name", policy_set_definition_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policySetDefinitions/{policySetDefinitionName}'}
def get(
self, policy_set_definition_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves a policy set definition.
This operation retrieves the policy set definition in the given
subscription with the given name.
:param policy_set_definition_name: The name of the policy set
definition to get.
:type policy_set_definition_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicySetDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicySetDefinition or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.resource.policy.v2018_03_01.models.ErrorResponseException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'policySetDefinitionName': self._serialize.url("policy_set_definition_name", policy_set_definition_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicySetDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policySetDefinitions/{policySetDefinitionName}'}
def get_built_in(
self, policy_set_definition_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves a built in policy set definition.
This operation retrieves the built-in policy set definition with the
given name.
:param policy_set_definition_name: The name of the policy set
definition to get.
:type policy_set_definition_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicySetDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicySetDefinition or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.resource.policy.v2018_03_01.models.ErrorResponseException>`
"""
# Construct URL
url = self.get_built_in.metadata['url']
path_format_arguments = {
'policySetDefinitionName': self._serialize.url("policy_set_definition_name", policy_set_definition_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicySetDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_built_in.metadata = {'url': '/providers/Microsoft.Authorization/policySetDefinitions/{policySetDefinitionName}'}
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Retrieves the policy set definitions for a subscription.
This operation retrieves a list of all the policy set definitions in
the given subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PolicySetDefinition
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicySetDefinitionPaged[~azure.mgmt.resource.policy.v2018_03_01.models.PolicySetDefinition]
:raises:
:class:`ErrorResponseException<azure.mgmt.resource.policy.v2018_03_01.models.ErrorResponseException>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.PolicySetDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policySetDefinitions'}
def list_built_in(
self, custom_headers=None, raw=False, **operation_config):
"""Retrieves built-in policy set definitions.
This operation retrieves a list of all the built-in policy set
definitions.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PolicySetDefinition
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicySetDefinitionPaged[~azure.mgmt.resource.policy.v2018_03_01.models.PolicySetDefinition]
:raises:
:class:`ErrorResponseException<azure.mgmt.resource.policy.v2018_03_01.models.ErrorResponseException>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_built_in.metadata['url']
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.PolicySetDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_built_in.metadata = {'url': '/providers/Microsoft.Authorization/policySetDefinitions'}
def create_or_update_at_management_group(
self, policy_set_definition_name, parameters, management_group_id, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a policy set definition.
This operation creates or updates a policy set definition in the given
management group with the given name.
:param policy_set_definition_name: The name of the policy set
definition to create.
:type policy_set_definition_name: str
:param parameters: The policy set definition properties.
:type parameters:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicySetDefinition
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicySetDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicySetDefinition or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.resource.policy.v2018_03_01.models.ErrorResponseException>`
"""
# Construct URL
url = self.create_or_update_at_management_group.metadata['url']
path_format_arguments = {
'policySetDefinitionName': self._serialize.url("policy_set_definition_name", policy_set_definition_name, 'str'),
'managementGroupId': self._serialize.url("management_group_id", management_group_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PolicySetDefinition')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicySetDefinition', response)
if response.status_code == 201:
deserialized = self._deserialize('PolicySetDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update_at_management_group.metadata = {'url': '/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policySetDefinitions/{policySetDefinitionName}'}
def delete_at_management_group(
self, policy_set_definition_name, management_group_id, custom_headers=None, raw=False, **operation_config):
"""Deletes a policy set definition.
This operation deletes the policy set definition in the given
management group with the given name.
:param policy_set_definition_name: The name of the policy set
definition to delete.
:type policy_set_definition_name: str
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.resource.policy.v2018_03_01.models.ErrorResponseException>`
"""
# Construct URL
url = self.delete_at_management_group.metadata['url']
path_format_arguments = {
'policySetDefinitionName': self._serialize.url("policy_set_definition_name", policy_set_definition_name, 'str'),
'managementGroupId': self._serialize.url("management_group_id", management_group_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete_at_management_group.metadata = {'url': '/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policySetDefinitions/{policySetDefinitionName}'}
def get_at_management_group(
self, policy_set_definition_name, management_group_id, custom_headers=None, raw=False, **operation_config):
"""Retrieves a policy set definition.
This operation retrieves the policy set definition in the given
management group with the given name.
:param policy_set_definition_name: The name of the policy set
definition to get.
:type policy_set_definition_name: str
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicySetDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicySetDefinition or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.resource.policy.v2018_03_01.models.ErrorResponseException>`
"""
# Construct URL
url = self.get_at_management_group.metadata['url']
path_format_arguments = {
'policySetDefinitionName': self._serialize.url("policy_set_definition_name", policy_set_definition_name, 'str'),
'managementGroupId': self._serialize.url("management_group_id", management_group_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicySetDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_at_management_group.metadata = {'url': '/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policySetDefinitions/{policySetDefinitionName}'}
def list_by_management_group(
self, management_group_id, custom_headers=None, raw=False, **operation_config):
"""Retrieves all policy set definitions in management group.
This operation retrieves a list of all the a policy set definition in
the given management group.
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PolicySetDefinition
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicySetDefinitionPaged[~azure.mgmt.resource.policy.v2018_03_01.models.PolicySetDefinition]
:raises:
:class:`ErrorResponseException<azure.mgmt.resource.policy.v2018_03_01.models.ErrorResponseException>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_management_group.metadata['url']
path_format_arguments = {
'managementGroupId': self._serialize.url("management_group_id", management_group_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.PolicySetDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_by_management_group.metadata = {'url': '/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policySetDefinitions'}
| 46.867841
| 212
| 0.680578
|
b7fa1c8d58fd027320dd0b9eaf71e218106cea09
| 98
|
py
|
Python
|
flask_app/twitoff/__init__.py
|
1aaronscott/DS-Unit-3-Sprint-3-Productization-and-Cloud
|
fcd06800f25b277195a4ec5117faa750afba96cb
|
[
"MIT"
] | null | null | null |
flask_app/twitoff/__init__.py
|
1aaronscott/DS-Unit-3-Sprint-3-Productization-and-Cloud
|
fcd06800f25b277195a4ec5117faa750afba96cb
|
[
"MIT"
] | null | null | null |
flask_app/twitoff/__init__.py
|
1aaronscott/DS-Unit-3-Sprint-3-Productization-and-Cloud
|
fcd06800f25b277195a4ec5117faa750afba96cb
|
[
"MIT"
] | null | null | null |
"""
Entry point for TwitOff flask application
"""
from .app import create_app
APP = create_app()
| 14
| 41
| 0.734694
|
017dfcecbe7812d7fca1e51abb1906e5594ec13b
| 1,200
|
py
|
Python
|
cmp_changefile.py
|
YnkDK/dynAlg14
|
120a49cb385246afb92aea89920200d3b3db2835
|
[
"MIT"
] | null | null | null |
cmp_changefile.py
|
YnkDK/dynAlg14
|
120a49cb385246afb92aea89920200d3b3db2835
|
[
"MIT"
] | null | null | null |
cmp_changefile.py
|
YnkDK/dynAlg14
|
120a49cb385246afb92aea89920200d3b3db2835
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python2
# -*- coding: utf-8 -*-
from os.path import abspath, join
from filecmp import cmp as diff
from itertools import combinations
from math import ceil
from utils import get_test_cases, get_test_case_and_alg
# Define path to output folder
OUTPUT_PATH = abspath(join(".", "output"))
# Holds the test cases, e.g. test_cases['changefile3'] = ('Lazy DFS', PATH)
test_cases = get_test_cases(OUTPUT_PATH, '.out', get_test_case_and_alg)
print ''
# Categorize the test cases into:
# 1. changefile name
# 2. algorithm name
# 3. path to test file
# Check if the files are the same for each changefile
for key in sorted(test_cases.keys(), key=lambda x: int(x[10:])):
body = []
for x, y in combinations(test_cases[key], 2):
if not diff(x[1], y[1]):
body.append("{:s} and {:s} mismatches".format(x[0], y[0]))
if len(body) > 0:
width = len(max(body, key=len))
# Prepare header to be printed
hwidth = (width-11-len(key))/2.0
left = "="*int(hwidth)
right = "="*int(ceil(hwidth))
print '{:s} ERROR IN {:s} {:s}'.format(left, key, right)
# Print body
print "\n".join(body)
# Prepare footer
print "="*width, "\n"
print "All output files have been compared!"
| 27.272727
| 75
| 0.670833
|
effce1c6c2425fd38b0da11cf4338f3619377a97
| 1,919
|
py
|
Python
|
shop/migrations/0001_initial.py
|
TerminalBen/shop
|
dcdae0ea80c767d4ddbaff493724bf49484dbeef
|
[
"MIT"
] | 1
|
2022-02-11T18:09:58.000Z
|
2022-02-11T18:09:58.000Z
|
shop/migrations/0001_initial.py
|
TerminalBen/shop
|
dcdae0ea80c767d4ddbaff493724bf49484dbeef
|
[
"MIT"
] | null | null | null |
shop/migrations/0001_initial.py
|
TerminalBen/shop
|
dcdae0ea80c767d4ddbaff493724bf49484dbeef
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-09-10 19:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200)),
('slug', models.SlugField(max_length=200, unique=True)),
],
options={
'verbose_name': 'category',
'verbose_name_plural': 'categories',
'ordering': ['-name'],
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200)),
('image', models.ImageField(blank=True, upload_to='products/%Y/%M/%D')),
('slug', models.SlugField(max_length=200)),
('description', models.TextField(blank=True, max_length=500)),
('price', models.DecimalField(decimal_places=1, max_digits=10)),
('stock', models.IntegerField(db_index=True)),
('available', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='shop.category')),
],
options={
'ordering': ['name'],
'index_together': {('id', 'slug')},
},
),
]
| 39.163265
| 138
| 0.550808
|
60354ea64469cc423690332aa3267c100e97b5db
| 3,481
|
py
|
Python
|
ponzi/color/space.py
|
pyramidscheme/ponzi
|
5fe1651c22974b91370b21ac50f2b09a67397b91
|
[
"MIT"
] | null | null | null |
ponzi/color/space.py
|
pyramidscheme/ponzi
|
5fe1651c22974b91370b21ac50f2b09a67397b91
|
[
"MIT"
] | null | null | null |
ponzi/color/space.py
|
pyramidscheme/ponzi
|
5fe1651c22974b91370b21ac50f2b09a67397b91
|
[
"MIT"
] | null | null | null |
from math import atan2, cos, degrees, sin, sqrt, radians
from typing import Callable, NamedTuple
from . import linear
class RGB(NamedTuple):
"""
RGB is an sRGB color.
"""
r: float # [0-1]
g: float # [0-1]
b: float # [0-1]
@property
def valid(self) -> bool:
return 0.0 <= self.r <= 1.0 and 0.0 <= self.g <= 1.0 and 0.0 <= self.b <= 1.0
def clamp(self) -> "RGB":
def c(v: float) -> float:
return max(0.0, min(v, 1.0))
return RGB(c(self.r), c(self.g), c(self.b))
class Lab(NamedTuple):
"""
Lab is a color in the CIE L*a*b* perceptually-uniform color space.
"""
l: float
a: float
b: float
class HCL(NamedTuple):
"""
HCL is a color in the CIE L*C*h° color space, a polar projection of L*a*b*.
It's basically a superior HSV.
"""
h: float # hue [0-360)
c: float # chroma [0-1]
l: float # luminance [0-1]
class XYZ(NamedTuple):
"""
XYZ is a color in CIE's standard color space.
"""
x: float
y: float
z: float
class LinearRGB(NamedTuple):
"""
RGB is a linear color.
"""
r: float # [0-1]
g: float # [0-1]
b: float # [0-1]
# Reference white points
D50 = XYZ(0.96422, 1.00000, 0.82521)
D65 = XYZ(0.95047, 1.00000, 1.08883)
def hcl_to_lab(hcl: HCL) -> Lab:
h_rad = radians(hcl.h)
a = hcl.c * cos(h_rad)
b = hcl.c * sin(h_rad)
return Lab(hcl.l, a, b)
def lab_to_hcl(lab: Lab) -> HCL:
t = 1.0e-4
h = (
degrees(atan2(lab.b, lab.a)) % 360.0
if abs(lab.b - lab.a) > t and abs(lab.a) > t
else 0.0
)
c = sqrt(lab.a ** 2 + lab.b ** 2)
l = lab.l
return HCL(h, c, l)
def lab_to_xyz(lab: Lab, white_ref: XYZ = D65) -> XYZ:
def finv(t: float) -> float:
return (
t ** 3
if t > 6.0 / 29.0
else 3.0 * 6.0 / 29.0 * 6.0 / 29.0 * (t - 4.0 / 29.0)
)
l2 = (lab.l + 0.16) / 1.16
return XYZ(
white_ref.x * finv(l2 + lab.a / 5.0),
white_ref.y * finv(l2),
white_ref.z * finv(l2 - lab.b / 2.0),
)
def xyz_to_lab(xyz: XYZ, white_ref: XYZ = D65) -> Lab:
def f(t: float) -> float:
return (
t ** (1 / 3)
if t > 6.0 / 29.0 * 6.0 / 29.0 * 6.0 / 29.0
else t / 3.0 * 29.0 / 6.0 * 29.0 / 6.0 + 4.0 / 29.0
)
fy = f(xyz.y / white_ref.y)
return Lab(
1.16 * fy - 0.16,
5.0 * (f(xyz.x / white_ref.x) - fy),
2.0 * (fy - f(xyz.z / white_ref.z)),
)
def xyz_to_linear_rgb(xyz: XYZ) -> LinearRGB:
return LinearRGB(
3.2404542 * xyz.x - 1.5371385 * xyz.y - 0.4985314 * xyz.z,
-0.9692660 * xyz.x + 1.8760108 * xyz.y + 0.0415560 * xyz.z,
0.0556434 * xyz.x - 0.2040259 * xyz.y + 1.0572252 * xyz.z,
)
def linear_rgb_to_xyz(rgb: LinearRGB) -> XYZ:
return XYZ(
0.4124564 * rgb.r + 0.3575761 * rgb.g + 0.1804375 * rgb.b,
0.2126729 * rgb.r + 0.7151522 * rgb.g + 0.0721750 * rgb.b,
0.0193339 * rgb.r + 0.1191920 * rgb.g + 0.9503041 * rgb.b,
)
def linear_rgb_to_srgb(
rgb: LinearRGB, delinearize: Callable[[float], float] = linear.delinearize
) -> RGB:
return RGB(delinearize(rgb.r), delinearize(rgb.g), delinearize(rgb.b))
def srgb_to_linear_rgb(
rgb: RGB, linearize: Callable[[float], float] = linear.linearize
) -> LinearRGB:
return LinearRGB(linearize(rgb.r), linearize(rgb.g), linearize(rgb.b))
| 22.901316
| 85
| 0.527722
|
7f39caec5a0c7898f2b7957b4d619c24149d01fe
| 1,992
|
py
|
Python
|
ltpylib/timing.py
|
lancethomps/lt-pylib
|
fb1f3b0b62e70d950db84119facbdf481e3b5e4d
|
[
"MIT"
] | null | null | null |
ltpylib/timing.py
|
lancethomps/lt-pylib
|
fb1f3b0b62e70d950db84119facbdf481e3b5e4d
|
[
"MIT"
] | null | null | null |
ltpylib/timing.py
|
lancethomps/lt-pylib
|
fb1f3b0b62e70d950db84119facbdf481e3b5e4d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import logging
import time
from typing import Callable, SupportsFloat
def format_seconds(frac_seconds):
hours, rem = divmod(float(frac_seconds), 3600)
minutes, seconds = divmod(rem, 60)
return "{0:0>2}:{1:0>2}:{2:06.3f}".format(int(hours), int(minutes), seconds)
def format_millis(millis):
return format_seconds(float(millis) / float(1000))
def get_time_elapsed_msg(start_time):
return format_seconds(time.time() - float(start_time))
def get_time_remaining_formatted_seconds(start_time, count, total):
frac_seconds = time.time() - float(start_time)
if float(count) <= 0:
return "N/A"
estimated_total = frac_seconds * (float(total) / float(count))
return format_seconds(estimated_total - frac_seconds)
def get_time_remaining_msg(start_time, count, total):
frac_seconds = time.time() - float(start_time)
if float(count) <= 0:
return "Elapsed: {0: >12} Remaining: {1: >12}".format(format_seconds(frac_seconds), 'N/A')
estimated_total = frac_seconds * (float(total) / float(count))
return "Elapsed: {0: >12} Remaining: {1: >12}".format(format_seconds(frac_seconds), format_seconds(estimated_total - frac_seconds))
def sleep_and_log(seconds: int, log_level: int = logging.INFO):
logging.log(log_level, "Sleeping %s seconds...", seconds)
time.sleep(seconds)
def has_time_remaining(
start_time: float,
max_seconds: SupportsFloat,
log_time_ran_out: bool = False,
time_ran_out_func: Callable[[float, SupportsFloat, float], None] = None,
) -> bool:
curr_time = time.time()
check_result = (curr_time - start_time) < float(max_seconds)
if not check_result:
if log_time_ran_out:
logging.warning(f"Timed out waiting after {max_seconds:,} seconds")
if time_ran_out_func is not None:
time_ran_out_func(start_time, max_seconds, curr_time)
return check_result
if __name__ == "__main__":
import sys
result = globals()[sys.argv[1]](*sys.argv[2:])
if result is not None:
print(result)
| 28.869565
| 133
| 0.724398
|
de6f6005195abaaf117c10c81673baa72d8ee55e
| 224
|
py
|
Python
|
src/config.py
|
vieenrose/Gender-Recognition-by-Voice
|
d7c57a0229e8fc8fcfe48712f19bf0b57e464e0f
|
[
"MIT"
] | 11
|
2020-08-03T22:05:20.000Z
|
2022-02-28T07:50:21.000Z
|
src/config.py
|
vieenrose/Gender-Recognition-by-Voice
|
d7c57a0229e8fc8fcfe48712f19bf0b57e464e0f
|
[
"MIT"
] | null | null | null |
src/config.py
|
vieenrose/Gender-Recognition-by-Voice
|
d7c57a0229e8fc8fcfe48712f19bf0b57e464e0f
|
[
"MIT"
] | 5
|
2020-09-26T03:28:13.000Z
|
2021-07-09T06:21:10.000Z
|
EPOCHS = 20
TRAIN_BATCH_SIZE = 20
VALID_BATCH_SIZE = 20
DATA_PATH = '../dataset/cv-valid-train.csv'
TEST_DATA_PATH = '../dataset/cv-valid-test.csv'
SCALAR_PATH = 'results/scaler.pkl'
MODEL_PATH = 'results/final_model.model'
| 28
| 47
| 0.758929
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.