max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
py/kubeflow/testing/auto_deploy/server.py | ChanYiLin/testing | 0 | 12763451 | <filename>py/kubeflow/testing/auto_deploy/server.py
"""A flask app for auto deploying Kubeflow.
TODO(jlewi): Rather than use the multiprocessing package it might
make sense just to run the server and reconciler in separate containers.
They are already communicating through the filesystem so using multi-processing
might just be complicating things. One problem we have right now
is that exceptions in the reconciler aren't propogated to the server.
"""
import datetime
from dateutil import parser as date_parser
import fire
import glob
import logging
import os
import yaml
from kubeflow.testing import gcp_util
from kubeflow.testing import kf_logging
from kubeflow.testing.auto_deploy import blueprint_reconciler
from kubeflow.testing.auto_deploy import util
import flask
_deployments_dir = None
app = flask.Flask(__name__)
_deployments_dir = None
app = flask.Flask(__name__)
def _get_deployments():
"""Return dictionary describing deployment manager deployments."""
match = os.path.join(_deployments_dir, "deployments.*")
files = glob.glob(match)
items = []
if not files:
logging.info(f"No matching files for {match}")
else:
files = sorted(files)
latest = files[-1]
logging.info(f"Reading from {latest}")
with open(os.path.join(latest)) as hf:
deployments = yaml.load(hf)
for v, deployments_list in deployments.items():
for d in deployments_list:
create_time = date_parser.parse(d["create_time"])
age = datetime.datetime.now(tz=create_time.tzinfo) - create_time
manifests_commit = d["labels"].get(util.MANIFESTS_COMMIT_LABEL, "")
row = {
"pipeline_run": "",
"pipeline_run_url": "",
"version": v,
"deployment_name": d["deployment_name"],
"creation_time": d.get("create_time", ""),
"age": f"{age}",
"manifests_git": manifests_commit,
"manifests_url": (f"https://github.com/kubeflow/manifests/tree/"
f"{manifests_commit}"),
"kfctl_git": d["labels"].get("kfctl-git", ""),
"endpoint": f"https://{d['deployment_name']}.endpoints."
f"kubeflow-ci-deployment.cloud.goog",
# TODO(jlewi): We are hardcoding the project and zone.
"gcloud_command": (f"gcloud --project=kubeflow-ci-deployment "
f"container clusters get-credentials "
f"--zone={d['zone']} "
f"{d['deployment_name']}")
}
labels = []
for label_key, label_value in d["labels"].items():
labels.append(f"{label_key}={label_value}")
row["labels"] = ", ".join(labels)
items.append(row)
return items
def _get_blueprints():
"""Return dictionary describing blueprints."""
match = os.path.join(_deployments_dir, "clusters.*")
files = glob.glob(match)
items = []
if not files:
logging.info(f"No files matched {match}")
return items
files = sorted(files)
latest = files[-1]
logging.info(f"Reading from {latest}")
with open(os.path.join(latest)) as hf:
deployments = yaml.load(hf)
for _, clusters in deployments.items():
for c in clusters:
create_time = date_parser.parse(c["metadata"]["creationTimestamp"])
age = datetime.datetime.now(tz=create_time.tzinfo) - create_time
commit = c["metadata"]["labels"].get(
blueprint_reconciler.BLUEPRINT_COMMIT_LABEL, "")
pipeline_run = c["metadata"]["labels"].get("tekton.dev/pipelineRun", "")
group = c["metadata"]["labels"].get(
blueprint_reconciler.GROUP_LABEL, blueprint_reconciler.UNKNOWN_GROUP)
name = c["metadata"]["name"]
location = c["spec"]["location"]
location_flag = gcp_util.location_to_type(location)
row = {
"version": group,
"deployment_name": name,
"creation_time": create_time,
"age": f"{age}",
"manifests_git": commit,
# TODO(jlewi):We shouldn't hardcode the url we should add it
# as annotation.
"manifests_url": (f"https://github.com/kubeflow/gcp-blueprints/tree/"
f"{commit}"),
"kfctl_git": "",
"pipeline_run": pipeline_run,
# TODO(jlewi): We shouldn't hardcode endpoint.
"pipline_run_url": (f"https://kf-ci-v1.endpoints.kubeflow-ci.cloud.goog/"
f"tekton/#/namespaces/auto-deploy/pipelineruns/"
f"{pipeline_run}"),
# TODO(jlewi): Don't hard code the project
"endpoint": (f"https://{name}.endpoints."
f"kubeflow-ci-deployment.cloud.goog"),
# TODO(jlewi): We are hardcoding the project and zone.
"gcloud_command": (f"gcloud --project=kubeflow-ci-deployment "
f"container clusters get-credentials "
f"--{location_flag}={location} "
f"{name}")
}
labels = []
for label_key, label_value in c["metadata"]["labels"].items():
labels.append(f"{label_key}={label_value}")
row["labels"] = ", ".join(labels)
items.append(row)
return items
@app.route("/")
def auto_deploy_status():
"""Return the status of the auto deployments."""
logging.info("Handle auto_deploy_status")
try:
logging.info("Get deployments")
items = _get_deployments()
logging.info("Get blueprints")
blueprints = _get_blueprints()
items.extend(blueprints)
# Define a key function for the sort.
# We want to sort by version and age
def key_func(i):
# We want unknown version to appear last
# so we ad a prefix
if i["version"] == "unknown":
prefix = "z"
else:
prefix = "a"
return f"{prefix}-{i['version']}-{i['age']}"
items = sorted(items, key=key_func)
# Return the HTML
logging.info("Render template")
result = flask.render_template("index.html", title="Kubeflow Auto Deployments",
items=items)
# It looks like when flask debug mode is off the Flask provides unhelpful log
# messages in the logs. In debug mode the actual exception is returned in
# the html response.
except Exception as e:
logging.error(f"Exception occured: {e}")
raise
return result
class AutoDeployServer:
def __init__(self):
self._deployments_queue = None
self._deployments_dir = None
def serve(self, template_folder, deployments_dir=None, port=None):
global _deployments_dir # pylint: disable=global-statement
global app # pylint: disable=global-statement
app.template_folder = template_folder
# make sure things reload
FLASK_DEBUG = os.getenv("FLASK_DEBUG", "false").lower()
# Need to convert it to boolean
if FLASK_DEBUG in ["true", "t"]: # pylint: disable=simplifiable-if-statement
FLASK_DEBUG = True
else:
FLASK_DEBUG = False
logging.info(f"FLASK_DEBUG={FLASK_DEBUG}")
if FLASK_DEBUG:
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
_deployments_dir = deployments_dir
logging.info(f"Deployments will be read from {self._deployments_dir}")
app.run(debug=FLASK_DEBUG, host='0.0.0.0', port=port)
if __name__ == '__main__':
# Emit logs in json format. This way we can do structured logging
# and we can query extra fields easily in stackdriver and bigquery.
json_handler = logging.StreamHandler()
json_handler.setFormatter(kf_logging.CustomisedJSONFormatter())
logger = logging.getLogger()
logger.addHandler(json_handler)
logger.setLevel(logging.INFO)
fire.Fire(AutoDeployServer)
| 2.453125 | 2 |
exp/12_fake_ppo.py | GuRongfei/BPG-DBS | 0 | 12763452 | <filename>exp/12_fake_ppo.py
import warnings
warnings.filterwarnings("ignore")
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import gym
import fake_oscillator
from stable_baselines.ppo2 import PPO2
from stable_baselines.common.policies import MlpPolicy
import numpy as np
env_id = "fake_oscillator-v0"
env = gym.make(env_id)
model = PPO2(MlpPolicy, env, learning_rate=1e-5, verbose=1)#, tensorboard_log="MLP/")
#model.learn(int(1e5))
#model.save('../result/model/ppo_fake_1e5.pkl')
model = model.load('../result/model/ppo_fake_1e5.pkl')
env = gym.make(env_id)
obs = env.reset()
for _ in range(500):
action = np.array([(np.random.rand()-0.5)*2])
obs, rewards, dones, info = env.step(action)
print('obs: ', obs)
print("act: ", action)
print('reward: ', rewards)
for _ in range(1000):
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
print('obs: ', obs)
print("act: ", action)
print('reward: ', rewards)
env.show('tst')
env.close()
| 1.890625 | 2 |
MyTools/ODE/ode_simple_derivative.py | fovtran/PyGame_samples | 0 | 12763453 | import numpy as np
from scipy.integrate import odeint
from matplotlib import pyplot as plt
def calc_derivative(ypos, time):
return -2*ypos
time_vec = np.linspace(0, 4, 40)
yvec = odeint(calc_derivative, 1, time_vec)
plt.figure(figsize=(4, 3))
plt.plot(time_vec, yvec)
plt.xlabel('t: Time')
plt.ylabel('y: Position')
plt.tight_layout()
| 3.234375 | 3 |
sphinxcontrib/needs/api/__init__.py | gregegg/sphinxcontrib-needs | 1 | 12763454 | from .configuration import get_need_types, add_need_type, add_extra_option, add_dynamic_function
from .need import add_need, make_hashed_id
| 1.257813 | 1 |
tavastiaevents/apps.py | dymesolutions/HameEvents | 0 | 12763455 | <reponame>dymesolutions/HameEvents
from django.apps import AppConfig
class TavastiaeventsConfig(AppConfig):
name = 'tavastiaevents'
| 1.054688 | 1 |
openaddr/ci/webdotmap.py | MiniCodeMonkey/machine | 101 | 12763456 | import apsw
import boto3
import os
import json
from flask import Blueprint, Response, abort, current_app, render_template, url_for
from . import setup_logger
from .webcommon import log_application_errors, flask_log_level
from .webhooks import get_memcache_client
dots = Blueprint('dots', __name__)
# https://stackoverflow.com/questions/56776974/sqlite3-connect-to-a-database-in-cloud-s3
class S3VFS(apsw.VFS):
def __init__(self, vfsname="s3", basevfs="", cache=None):
self.vfsname = vfsname
self.basevfs = basevfs
self.cache = cache
apsw.VFS.__init__(self, self.vfsname, self.basevfs)
def xOpen(self, name, flags):
return S3VFSFile(self.basevfs, name, flags, self.cache)
class S3VFSFile():
def __init__(self, inheritfromvfsname, filename, flags, cache):
self.s3 = boto3.client('s3')
self.cache = cache
self.bucket = filename.uri_parameter("bucket")
self.key = filename.filename().lstrip("/")
def _cache_key(self, amount, offset):
return '{bucket}/{key}/{amount}/{offset}'.format(
bucket=self.bucket,
key=self.key,
amount=amount,
offset=offset,
)
def xRead(self, amount, offset):
data = None
if self.cache:
cache_key = self._cache_key(amount, offset)
data = self.cache.get(cache_key)
if data is None:
response = self.s3.get_object(Bucket=self.bucket, Key=self.key, Range='bytes={}-{}'.format(offset, offset + amount))
data = response['Body'].read()
if self.cache:
self.cache.set(cache_key, data)
return data
def xFileSize(self):
length = None
if self.cache:
cache_key = '{bucket}/{key}/size'.format(bucket=self.bucket, key=self.key)
length = self.cache.get(cache_key)
if length is None:
response = self.s3.head_object(Bucket=self.bucket, Key=self.key)
length = response['ContentLength']
if self.cache:
self.cache.set(cache_key, length)
return length
def xClose(self):
pass
def xFileControl(self, op, ptr):
return False
def get_mbtiles_connection(bucket, key, cache):
'''
'''
s3vfs = S3VFS(cache=cache)
return apsw.Connection(
'file:/{key}?bucket={bucket}&immutable=1'.format(bucket=bucket, key=key),
flags=apsw.SQLITE_OPEN_READONLY | apsw.SQLITE_OPEN_URI,
vfs=s3vfs.vfsname,
)
def get_mbtiles_metadata(bucket, key, cache):
'''
'''
if cache:
cache_key = '{bucket}/{key}/metadata'.format(bucket=bucket, key=key)
cached = cache.get(cache_key)
if cached:
return cached
connection = get_mbtiles_connection(bucket, key, cache)
cur = connection.cursor()
res = cur.execute('''SELECT name, value FROM metadata
WHERE name IN ('center', 'json')''')
data = dict(res.fetchall())
lon, lat, zoom = map(float, data.get('center', '0,0,0').split(','))
more = json.loads(data.get('json', '{}'))
fields = list(more.get('vector_layers', [])[0].get('fields', {}).keys())
cur.close()
metadata_tuple = (zoom, lat, lon, fields)
if cache:
cache.set(cache_key, metadata_tuple)
return metadata_tuple
def get_mbtiles_tile(bucket, key, row, col, zoom, cache):
'''
'''
if cache:
cache_key = '{bucket}/{key}/{zoom}/{row}/{col}'.format(bucket=bucket, key=key, zoom=zoom, row=row, col=col)
cached = cache.get(cache_key)
if cached:
return cached
connection = get_mbtiles_connection(bucket, key, cache)
cur = connection.cursor()
flipped_row = (2**zoom) - 1 - row
res = cur.execute('''SELECT tile_data FROM tiles
WHERE zoom_level=? AND tile_column=? AND tile_row=?''', (zoom, col, flipped_row))
data = res.fetchone()
cur.close()
if cache:
cache.set(cache_key, data)
return data
@dots.route('/runs/<int:run_id>/dotmap/index.html')
@log_application_errors
def dotmap_preview(run_id):
'''
'''
if not run_id:
return 'invalid run_id', 404
try:
bucket = "data.openaddresses.io"
key = "runs/{run_id}/slippymap.mbtiles".format(run_id=run_id)
mc = get_memcache_client(current_app.config)
zoom, lat, lon, fields = get_mbtiles_metadata(bucket, key, mc)
except ValueError:
abort(500)
return render_template(
'dotmap-index.html',
run_id=run_id,
zoom=zoom,
lat=lat,
lon=lon,
fields=fields,
scene_url=url_for('dots.get_scene', run_id=run_id)
)
@dots.route('/runs/<run_id>/dotmap/scene.yaml')
@log_application_errors
def get_scene(run_id):
if not run_id:
return 'invalid run_id', 404
tile_args = dict(run_id=run_id, zoom=123, col=456, row=789)
tile_url = url_for('dots.get_one_tile', **tile_args).replace('123/456/789', '{z}/{x}/{y}')
return Response(
render_template('dotmap-scene.yaml', tile_url=tile_url),
headers={'Content-Type': 'application/x-yaml'},
)
@dots.route('/runs/<run_id>/dotmap/tiles/<int:zoom>/<int:col>/<int:row>.mvt')
@log_application_errors
def get_one_tile(run_id, zoom, col, row):
'''
'''
if not run_id:
return 'invalid run_id', 404
bucket = "data.openaddresses.io"
key = "runs/{run_id}/slippymap.mbtiles".format(run_id=run_id)
mc = get_memcache_client(current_app.config)
body = get_mbtiles_tile(bucket, key, row, col, zoom, mc)
if not body:
return 'tile not found', 404
headers = {
'Content-Type': 'application/vnd.mapbox-vector-tile',
'Content-Encoding': 'gzip',
}
return Response(body, headers=headers)
def apply_dotmap_blueprint(app):
'''
'''
@dots.after_request
def cache_everything(response):
response.cache_control.max_age = 31556952 # 1 year
response.cache_control.public = True
return response
app.register_blueprint(dots)
@app.before_first_request
def app_prepare():
setup_logger(os.environ.get('AWS_SNS_ARN'), None, flask_log_level(app.config))
| 2.203125 | 2 |
test/tests/stack_limits.py | aisk/pyston | 0 | 12763457 | # Make sure we can recurse at least 900 times on the three different types
# of stacks that we have:
def recurse(n):
print n
if n > 0:
return recurse(n - 1)
return n
print "Recursing on main thread..."
recurse(900)
print "Recursing in a generator..."
def gen():
yield recurse(900)
print list(gen())
print "Recursing in a thread..."
from thread import start_new_thread
import time
done = 0
def thread_target():
global done
recurse(900)
done = 1
start_new_thread(thread_target, ())
while not done:
time.sleep(0.001)
| 3.625 | 4 |
plotskypath.py | agabrown/astrometric-sky-path | 3 | 12763458 | <filename>plotskypath.py
"""
Plot the astrometric path of stars on the sky. That is plot the coordinate direction including the proper
motion and parallax effect as a function of time.
Use the C-program skypath.c to draw astrometric paths of stars on the sky. Requires the SOFA library:
http://www.iausofa.org/ (ANSI-C version).
<NAME> 2011-2018
"""
import subprocess
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import argparse
# Configure matplotlib
rc('text', usetex=True)
rc('font', family='serif', size=18)
rc('xtick.major', size='10')
rc('xtick.minor', size='5')
#rc('xtick', direction='out')
rc('ytick.major', size='10')
rc('ytick.minor', size='5')
#rc('ytick', direction='out')
rc('lines', linewidth=2)
rc('axes', linewidth=2)
def make_plot(args):
"""
Calculate the astrometric path on the sky and plot the result.
Parameters
----------
args - Command line arguments.
Returns
-------
Nothing
"""
skyPathCommand=['./skypath']
refEpoch = np.float(args['refEpochString'])
startEpoch = np.float(args['startEpochString'])
npoints = args['npoints']
if (args['parEllipse']):
skyPathCommandParallaxOnly=['./skypath']
if (args['astrometryString']!=None):
skyPathCommand.append('-astrometry')
skyPathCommand.append(args['astrometryString'])
if (args['phaseSpaceString']!=None):
skyPathCommand.append('-phaseSpace')
skyPathCommand.append(args['phaseSpaceString'])
if (args['refEpochString']!=None):
skyPathCommand.append('-refepoch')
skyPathCommand.append('{0}'.format(args['refEpochString']))
if (args['startEpochString']!=None):
skyPathCommand.append('-start')
skyPathCommand.append('{0}'.format(args['startEpochString']))
if (args['timeIntervalString']!=None):
skyPathCommand.append('-interval')
skyPathCommand.append('{0}'.format(args['timeIntervalString']))
if (args['npoints']!=None):
skyPathCommand.append('-npoints')
skyPathCommand.append('{0}'.format(args['npoints']))
if (args['parEllipse']):
# TODO Better defaults handling in this case.
if (args['astrometryString']==None and args['phaseSpaceString']==None):
args['astrometryString'] = "30,40,100,40,30,30"
if (args['astrometryString']!=None):
skyPathCommandParallaxOnly=['./skypath']
skyPathCommandParallaxOnly.append('-astrometry')
alpha,delta,parlx,mualpha,mudelta,vrad=args['astrometryString'].split(",")
parOnlyString='{0},{1},{2},0,0,0'.format(alpha,delta,parlx)
skyPathCommandParallaxOnly.append(parOnlyString)
skyPathCommandParallaxOnly.append('-interval')
skyPathCommandParallaxOnly.append('1.0')
if (args['refEpochString']!=None):
skyPathCommandParallaxOnly.append('-refepoch')
skyPathCommandParallaxOnly.append('{0}'.format(args['refEpochString']))
if (args['phaseSpaceString']!=None):
skyPathCommandParallaxOnly.append('-astrometry')
x,y,z,vx,vy,vz=args['astrometryString'].split(",")
parOnlyString='{0},{1},{2}.0.0,0.0,0.0'.format(x,y,z)
skyPathCommandParallaxOnly.append(parOnlyString)
skyPathCommandParallaxOnly.append('-interval')
skyPathCommandParallaxOnly.append('1.0')
if (args['startEpochString']!=None):
skyPathCommandParallaxOnly.append('-start')
skyPathCommandParallaxOnly.append('{0}'.format(args['startEpochString']))
result=subprocess.run(skyPathCommand, stdout=subprocess.PIPE)
skyPath=result.stdout.splitlines()
times=np.empty(len(skyPath))
alpha=np.empty(len(skyPath))
delta=np.empty(len(skyPath))
xi=np.empty(len(skyPath))
eta=np.empty(len(skyPath))
for i in range(len(skyPath)):
times[i], alpha[i], delta[i], xi[i], eta[i] = skyPath[i].split()
if (args['parEllipse']):
resultB=subprocess.run(skyPathCommandParallaxOnly, stdout=subprocess.PIPE)
skyPathB=resultB.stdout.splitlines()
timesB=np.empty(len(skyPathB))
alphaParOnly=np.empty(len(skyPathB))
deltaParOnly=np.empty(len(skyPathB))
xiParOnly=np.empty(len(skyPathB))
etaParOnly=np.empty(len(skyPathB))
for i in range(len(skyPathB)):
timesB[i], alphaParOnly[i], deltaParOnly[i], xiParOnly[i], etaParOnly[i] = skyPathB[i].split()
fig=plt.figure(figsize=(9.0,8))
if (args['parEllipse']):
plt.plot(xiParOnly, etaParOnly,'k--',alpha=0.5)
if args['plotDots']:
plt.plot(xi, eta, 'o')
else:
plt.plot(xi, eta)
plt.scatter(xi[0], eta[0], c='r', marker='o', s=50)
indref = np.searchsorted(times, refEpoch, side='right')-1
plt.scatter(xi[indref], eta[indref], c='r', marker='+', s=50)
plt.scatter(xi[-1], eta[-1], c='r', marker='^', s=50)
plt.xlabel("$\\xi$ [mas]")
plt.ylabel("$\\eta$ [mas]")
plt.grid()
if (args['axisLimits']!=None):
plt.xlim(args['axisLimits'])
plt.ylim(args['axisLimits'])
basename = 'skyPathIcrs'
if args['pdfOutput']:
plt.savefig(basename+'.pdf')
elif args['pngOutput']:
plt.savefig(basename+'.png')
else:
plt.show()
def parseCommandLineArguments():
"""
Set up command line parsing.
"""
parser = argparse.ArgumentParser("Draw astrometric paths of stars on the sky.")
parser.add_argument("--astrometry", dest="astrometryString",
help="""Comma-separated list of astrometric parameters
(alpha, delta, parallax, mu_alpha*cos(delta), mu_delta, Vrad)
[deg, deg, mas, mas/yr, mas/yr, km/s]""")
parser.add_argument("--phaseSpace", dest="phaseSpaceString",
help="""Comma-separated list of phase space coordinates (X, Y, Z, Vx, Vy, Vz) [pc, pc, pc,
km/s, km/s, km/s]""")
parser.add_argument("--ref", dest="refEpochString", help="Reference epoch (Julian years)",
default=2017.0)
parser.add_argument("--start", dest="startEpochString", help="Start epoch (Julian years)",
default=2014.5)
parser.add_argument("--interval", dest="timeIntervalString", help="time interval (Julian years)",
default=5.0)
parser.add_argument("--plotLimits", dest="axisLimits", type=float, nargs=2, help="list of plot axis limits (low high) for both x and y")
parser.add_argument("--parEllipse", action="store_true", dest="parEllipse", help="Show parallax ellipse")
parser.add_argument("--npoints", help="""Number of points to calculate between start and end
epoch""", type=int, default=1001)
parser.add_argument("-d", action="store_true", dest="plotDots", help="""Plot individual points instead
of a continuous line.""")
parser.add_argument("-p", action="store_true", dest="pdfOutput", help="Make PDF plot")
parser.add_argument("-b", action="store_true", dest="pngOutput", help="Make PNG plot")
parser.add_argument("-c", action="store_true", dest="colourFigure", help="Make colour plot")
parser.add_argument("-t", action="store_true", dest="forTalk", help="make version for presentations")
args = vars(parser.parse_args())
return args
if __name__ in ('__main__'):
args=parseCommandLineArguments()
make_plot(args)
| 3.625 | 4 |
day06.py | FailedCode/adventofcode-2017 | 3 | 12763459 | #!/usr/bin/python
tests = [
{'in': '0 2 7 0', 'out': 5},
]
with open('input/day06.txt', 'r') as file:
file_text = file.read()
inputs = map(lambda v: int(v), file_text.split())
print ""
print "part 1: count steps"
def redistribute_count(banks):
def highestValueIndex(items):
highest_index = 0
highest_value = items[0]
for index, value in enumerate(items):
if value > highest_value:
highest_value = value
highest_index = index
return highest_index
"""
1. find highest bank
- break tie by lowest id
2. remove blocks from selected, each following block +1
3. add configuration for loop check
"""
bank_length = len(banks)
count = 0
configuration = '-'.join(map(lambda v: str(v), banks))
configurations = set()
while configuration not in configurations:
configurations.add(configuration)
pos = highestValueIndex(banks)
blocks = banks[pos]
banks[pos] = 0
while blocks:
pos = (pos + 1) % bank_length
banks[pos] += 1
blocks -= 1
count += 1
configuration = '-'.join(map(lambda v: str(v), banks))
return count
for test in tests:
in_value = test.get('in')
out_value = test.get('out')
out_result = redistribute_count(map(lambda v: int(v), in_value.split()))
if out_value == out_result:
print "{} => {} - OK".format(in_value, out_result)
else:
print "{} => {} - FAIL ({})".format(in_value, out_result, out_value)
print "puzzle: {}".format(redistribute_count(list(inputs)))
print ""
print "part 2: count loop"
tests = [
{'in': '0 2 7 0', 'out': 4},
]
def redistribute_loop_count(banks):
def highestValueIndex(items):
highest_index = 0
highest_value = items[0]
for index, value in enumerate(items):
if value > highest_value:
highest_value = value
highest_index = index
return highest_index
"""
instead of the count, we calculate the
"""
bank_length = len(banks)
count = 0
configuration = '-'.join(map(lambda v: str(v), banks))
configurations = set()
configuration_list = list()
while configuration not in configurations:
configurations.add(configuration)
configuration_list.append(configuration)
pos = highestValueIndex(banks)
blocks = banks[pos]
banks[pos] = 0
while blocks:
pos = (pos + 1) % bank_length
banks[pos] += 1
blocks -= 1
count += 1
configuration = '-'.join(map(lambda v: str(v), banks))
return len(configuration_list) - configuration_list.index(configuration)
for test in tests:
in_value = test.get('in')
out_value = test.get('out')
out_result = redistribute_loop_count(map(lambda v: int(v), in_value.split()))
if out_value == out_result:
print "{} => {} - OK".format(in_value, out_result)
else:
print "{} => {} - FAIL ({})".format(in_value, out_result, out_value)
print "puzzle: {}".format(redistribute_loop_count(list(inputs)))
| 3.78125 | 4 |
python/code_challenges/Merge_Sort/Merge_Sort.py | Noura-Alquran/data-structures-and-algorithms | 1 | 12763460 | <filename>python/code_challenges/Merge_Sort/Merge_Sort.py
def Merge_Sort(list):
n= len(list)
if n > 1 :
mid = int(n/2)
left =list[0:mid]
right = list[mid:n]
Merge_Sort(left)
Merge_Sort(right)
Merge (left, right, list)
return list
def Merge(left, right, list):
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
list[k] = left[i]
i = i + 1
else :
list[k] = right[j]
j = j + 1
k = k + 1
while i < len(left):
list[k] = left[i]
i += 1
k += 1
while j < len(right):
list[k] = right[j]
j += 1
k += 1
if __name__=="__main__":
list1=[8,4,23,42,16,15]
print(Merge_Sort(list1))
list_2=[20,18,12,8,5,-2] #Reverse-sorted
print(Merge_Sort(list_2)) #[-2, 5, 8, 12, 18, 20]
list_3=[5,12,7,5,5,7] #Few uniques
print(Merge_Sort(list_3)) #[5, 5, 5, 7, 7, 12]
list_4=[2,3,5,7,13,11] #Nearly-sorted
print(Merge_Sort(list_4)) #[2, 3, 5, 7, 11, 13]
| 4.125 | 4 |
vel/modules/layers.py | tigerwlin/vel | 51 | 12763461 | """
Code (partially) based on:
https://github.com/fastai/fastai/blob/master/fastai/layers.py
"""
import torch
import torch.nn as nn
from vel.util.tensor_util import one_hot_encoding
class AdaptiveConcatPool2d(nn.Module):
""" Concat pooling - combined average pool and max pool """
def __init__(self, sz=None):
super().__init__()
sz = sz or (1, 1)
self.ap = nn.AdaptiveAvgPool2d(sz)
self.mp = nn.AdaptiveMaxPool2d(sz)
def forward(self, x):
return torch.cat([self.mp(x), self.ap(x)], 1)
class Lambda(nn.Module):
""" Simple torch lambda layer """
def __init__(self, f):
super().__init__()
self.f = f
def forward(self, x):
return self.f(x)
class Flatten(nn.Module):
""" Flatten input vector """
def __init__(self):
super().__init__()
def forward(self, x):
return x.view(x.size(0), -1)
class Identity(nn.Module):
""" Identity transformation that doesn't do anything """
def __init__(self):
super().__init__()
def forward(self, x):
return x
class OneHotEncode(nn.Module):
""" One-hot encoding layer """
def __init__(self, num_classes):
super().__init__()
self.num_classes = num_classes
def forward(self, x):
return one_hot_encoding(x, self.num_classes)
| 3.265625 | 3 |
courses/__init__.py | ubccapico/python-canvasapi-base | 4 | 12763462 | <gh_stars>1-10
from CanvasAPI.util import callhelper
from CanvasAPI.courses import settings, users
from CanvasAPI import instance
__all__ = ["get", "get_sections", "put", "delete", "settings", "users"]
def get(course_id, *args):
'''Gets a course'''
url_str = "courses/{}{}".format(course_id, callhelper.args_to_params(*args))
return instance.call_api(url_str)
def get_sections(course_id, *args):
'''Gets a course'''
url_str = "courses/{}/sections{}".format(course_id, callhelper.args_to_params(*args))
return instance.all_pages(url_str)
def put(course_id, post_fields, *args):
'''Updates a course
Keyword Arguments:
course_id - id of course
post_fields: - fields to post
course[account_id] : integer
course[name] : string
course[course_code] : string
course[start_at] : DateTime
course[end_at] : DateTime
course[license] : string
course[is_public] : boolean
course[is_public_to_auth_users] : boolean
course[public_syllabus] : boolean
course[public_syllabus_to_auth] : boolean
course[public_description] : string
course[allow_student_wiki_edits] : boolean
course[allow_wiki_comments] : boolean
course[allow_student_forum_attachments] : boolean
course[open_enrollment] : boolean
course[self_enrollment] : boolean
course[restrict_enrollments_to_course_dates] : boolean
course[term_id] : integer
course[sis_course_id] : string
course[integration_id] : string
course[hide_final_grades] : boolean
course[time_zone] : string
course[apply_assignment_group_weights] : boolean
course[storage_quota_mb] : integer
offer : boolean
course[event] : string
course[default_view] : string
course[syllabus_body] : string
course[grading_standard_id] : integer
course[course_format] : string
course[image_id] : integer
course[image_url] : string
course[remove_image] : boolean
course[blueprint] : boolean
course[blueprint_restrictions] : BlueprintRestriction
course[use_blueprint_restrictions_by_object_type] : boolean
course[blueprint_restrictions_by_object_type] : multiple BlueprintRestrictions
'''
url_str = "courses/{}{}".format(course_id, callhelper.args_to_params(*args))
return instance.call_api(url_str, method="PUT", post_fields=post_fields)
def post(account_id, post_fields):
'''Create a new course
Keyword Arguments:
course_id - id of course
post_fields: - fields to post
course[name] : string
course[course_code] : string
course[start_at] : DateTime
course[end_at] : DateTime
course[license] : string
course[is_public] : boolean
course[is_public_to_auth_users] : boolean
course[public_syllabus] : boolean
course[public_syllabus_to_auth] : boolean
course[public_description] : string
course[allow_student_wiki_edits] : boolean
course[allow_wiki_comments] : boolean
course[allow_student_forum_attachments] : boolean
course[open_enrollment] : boolean
course[self_enrollment] : boolean
course[restrict_enrollments_to_course_dates] : boolean
course[term_id] : integer
course[sis_course_id] : string
course[integration_id] : string
course[hide_final_grades] : boolean
course[time_zone] : string
course[apply_assignment_group_weights] : boolean
course[storage_quota_mb] : integer
offer : boolean
course[event] : string
course[default_view] : string
course[syllabus_body] : string
course[grading_standard_id] : integer
course[course_format] : string
course[image_id] : integer
course[image_url] : string
course[remove_image] : boolean
course[blueprint] : boolean
course[blueprint_restrictions] : BlueprintRestriction
course[use_blueprint_restrictions_by_object_type] : boolean
course[blueprint_restrictions_by_object_type] : multiple BlueprintRestrictions
'''
url_str = "accounts/{}/courses".format(account_id)
return instance.call_api(url_str, method="POST", post_fields=post_fields)
def delete(course_id):
'''Deletes a course'''
url_str = "courses/{}".format(course_id)
return instance.call_api(url_str, method="DELETE")
| 2.296875 | 2 |
stytra/bouter/extraction.py | mark-dawn/stytra | 0 | 12763463 | <filename>stytra/bouter/extraction.py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def find_bouts_timeseries(
time, tail_sum, vigour_duration=0.050, vigour_threshold=0.3, diagnostic_axis=None
):
"""
Parameters
----------
time :
tail_sum :
vigour_duration :
(Default value = 0.050)
vigour_threshold :
(Default value = 0.3)
diagnostic_axis :
(Default value = None)
Returns
-------
"""
dt = np.mean(np.diff(time[:10]))
n_vigour_std = int(round(vigour_duration / dt))
vigour = pd.Series(tail_sum).rolling(n_vigour_std).std()
bouting = vigour > vigour_threshold
if diagnostic_axis is not None:
diagnostic_axis.plot(time, tail_sum / np.max(tail_sum), lw=0.5)
mv = np.max(vigour)
diagnostic_axis.axhline(vigour_threshold / mv)
diagnostic_axis.plot(time, vigour / mv, lw=0.5)
diagnostic_axis.plot(time, bouting, lw=0.5)
bout_starts = np.where(np.diff(bouting * 1.) > 0)[0]
bout_ends = np.where(np.diff(bouting * 1.) < 0)[0]
return bout_starts, bout_ends
| 2.375 | 2 |
Code/sorting_recursive.py | caocmai/cs-2.1-trees-sorting | 0 | 12763464 | #!python
def merge(items1, items2):
"""Merge given lists of items, each assumed to already be in sorted order,
and return a new list containing all items in sorted order.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
merge_list = []
index_items1 = 0
index_items2 = 0
# comparing both lists and adding the element to the merge_list or/and to know the point where both list are in order correctly, this loop stops when you have completely traverse one of the arrays to merge
while index_items1 < len(items1) and index_items2 < len(items2):
# compare the two sorted arrays, items1 and items2
if items1[index_items1] < items2[index_items2]:
# element in items1 is smaller than items 2 so append to merge_list
merge_list.append(items1[index_items1])
index_items1 += 1
else:
# element in items1 is greater than items 2 so append element of items2 to list
merge_list.append(items2[index_items2])
index_items2 += 1
# add whatever is left because in our while loop we stop the moment either the index left or index right is > len(items)
merge_list += items1[index_items1:]
merge_list += items2[index_items2:]
return merge_list
def merge_sort(items):
"""Sort given items by splitting list into two approximately equal halves,
sorting each recursively, and merging results into a list in sorted order.
Running time: O(nlogn) because breaking the list down every recursive call; divide and conquer
Memory usage: O(n) because calling function resursively so just grows linearly with input"""
if items == []:
return items
# base case in recursive call
if len(items) == 1:
return items
else: # not necessary cause then return is run the function stops but helpful to understand
mid = len(items) // 2
left = items[0:mid]
right = items[mid:]
# return merge(merge_sort(left), merge_sort(right))
# items[:] = this so it's manipulating the items array instead of returning a new array
items[:] = merge(merge_sort(left), merge_sort(right))
return items[:]
def partition(items, low, high):
"""Return index `p` after in-place partitioning given items in range
`[low...high]` by choosing a pivot (TODO: document your method here) from
that range, moving pivot into index `p`, items less than pivot into range
`[low...p-1]`, and items greater than pivot into range `[p+1...high]`.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
# TODO: Choose a pivot any way and document your method in docstring above
# TODO: Loop through all items in range [low...high]
# TODO: Move items less than pivot into front of range [low...p-1]
# TODO: Move items greater than pivot into back of range [p+1...high]
# TODO: Move pivot item into final position [p] and return index p
pass
def quick_sort(arr, low=None, high=None):
"""Sort given items in place by partitioning items in range `[low...high]`
around a pivot item and recursively sorting each remaining sublist range.
Best case running time: O(nlogn) because your unsort list gets smaller and smaller with each recursive call
Worst case running time: O(n^2) when you pick all high numbers then you need to traverse entire length of array and made only progress sorting on that highest number.
Memory usage: O(n) because calling function recursively"""
# base case in recursive call <=1 because last element is the pivot and poping it
if len(arr) <= 1:
return arr
else: # not necessary cause when return is run the function stops but helpful to understand
# pops and sets first item as pivot _value until len(arr) is 1 or less
pivot_value = arr.pop(0)
# 2 lists for items that are greater or less than pivot
items_greater_pivot = []
items_lower_pivot = []
# loop through array to see if element is less or greater than pivot_value and add to proper list
for num in arr:
if num > pivot_value:
items_greater_pivot.append(num)
else:
items_lower_pivot.append(num)
# arr[:] = just so to get to pass sorting_test.py by mutating original array
# recursively calls items_lower_pivot and items_greater_pivot to add to final sorted array.
# each call will have quick_sort(items_lower_pivot) + pivot_value and pivot_value + quick_sort(items_greater_pivot)
arr[:] = quick_sort(items_lower_pivot) + [pivot_value] + quick_sort(items_greater_pivot)
return arr[:]
| 4.375 | 4 |
ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/setup_atlas_sqoop.py | cas-packone/ambari-chs | 3 | 12763465 | <gh_stars>1-10
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.resources.properties_file import PropertiesFile
from resource_management.core.resources.packaging import Package
from resource_management.core.resources.system import Link
from resource_management.libraries.functions.format import format
from ambari_commons import OSCheck
import os
def setup_atlas_sqoop():
import params
if params.has_atlas:
if not params.host_sys_prepped:
Package(params.atlas_ubuntu_plugin_package if OSCheck.is_ubuntu_family() else params.atlas_plugin_package,
retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability, retry_count=params.agent_stack_retry_count)
PropertiesFile(format('{sqoop_conf_dir}/{atlas_conf_file}'),
properties = params.atlas_props,
owner = params.sqoop_user,
group = params.user_group,
mode = 0644)
atlas_sqoop_hook_dir = os.path.join(params.atlas_home_dir, "hook", "sqoop")
if os.path.exists(atlas_sqoop_hook_dir):
src_files = os.listdir(atlas_sqoop_hook_dir)
for file_name in src_files:
atlas_sqoop_hook_file_name = os.path.join(atlas_sqoop_hook_dir, file_name)
sqoop_lib_file_name = os.path.join(params.sqoop_lib, file_name)
if (os.path.isfile(atlas_sqoop_hook_file_name)):
Link(sqoop_lib_file_name, to = atlas_sqoop_hook_file_name)
| 1.609375 | 2 |
open_peyote.py | huba/OpenPeyote | 2 | 12763466 | #!/usr/bin/env python3
"""
OpenPeyote. A program for designing patterns to be made with the
Peyote beading technique.
Author: <NAME> (<EMAIL>)
Date: 10.11.2014
"""
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import json
from design_widget import *
from catalog_widget import *
from wizards_and_dialogs import *
from util import *
class MainWindow(QMainWindow):
"""The main application window."""
def __init__(self):
"""Init function for the main application."""
super(MainWindow, self).__init__(None)
self.default_bead = BeadType('blank', 'n/a', QBrush(QColor(230, 230, 228)), '#000000', '#000000', 10)
# Calls the functions to prepare each area of the main window
self.create_central_widget()
self.create_menu_bar()
self.create_tool_bar()
self.create_docked_widgets()
self.create_status_bar()
def create_central_widget(self):
"""Sets up the multiple document interface."""
self.mdi_widget = QMdiArea()
self.mdi_widget.setViewMode(QMdiArea.TabbedView)
self.setCentralWidget(self.mdi_widget)
def create_menu_bar(self):
"""Adds all the menus and actions to the menu bar."""
# the file menu...
file_menu = self.menuBar().addMenu('File')
new_action = file_menu.addAction('New Design')
new_action.setShortcuts(QKeySequence.New)
new_action.triggered.connect(self.new_design)
open_action = file_menu.addAction('Open Design')
open_action.setShortcuts(QKeySequence.Open)
open_action.triggered.connect(self.open_design)
save_action = file_menu.addAction('Save Design')
save_action.setShortcuts(QKeySequence.Save)
save_action.triggered.connect(self.save_design)
save_as_action = file_menu.addAction('Save Design As')
# TODO: look into why QKeySequence.SaveAs is not recognized in pyqt5
# save_as_action.setShortcuts(QKeySequence.SaveAs)
save_as_action.triggered.connect(self.save_as)
# the edit menu...
# TODO: all the menus and etc
def create_tool_bar(self):
"""Adds the tools to the toolbar."""
edit_tool_bar = self.addToolBar('Edit')
tool_group = QActionGroup(self)
tool_group.setExclusive(True)
self.bead_tool_action = edit_tool_bar.addAction('Bead Tool')
self.bead_tool_action.setCheckable(True)
tool_group.addAction(self.bead_tool_action)
self.remove_tool_action = edit_tool_bar.addAction('Clear Tool')
self.remove_tool_action.setCheckable(True)
tool_group.addAction(self.remove_tool_action)
def create_status_bar(self):
"""Prepares the status bar"""
self.setStatusBar(QStatusBar(self))
self.statusBar().showMessage('Test', 2000)
def create_docked_widgets(self):
"""Adds the docked widget."""
self.catalog = Catalog()
self.working_bead = self.default_bead
catalog_dock = QDockWidget()
catalog_dock.setWidget(self.catalog)
catalog_dock.setFeatures(QDockWidget.DockWidgetVerticalTitleBar)
catalog_dock.setWindowTitle('Catalog')
self.addDockWidget(Qt.LeftDockWidgetArea, catalog_dock)
self.catalog.currentItemChanged.connect(self.select_type)
def new_design(self):
"""Slot for creating a new design. This function summons Gandalf who will help
the user with creating their design."""
# TODO: hmmm this generates a weird message, might need to look at that...
# Also there are some performance problems...
wizard = NewWizard(self)
wizard.exec_()
def open_design(self):
"""Slot that opens (a) design(s) in a new tab."""
(paths, flt) = QFileDialog.getOpenFileNames(parent=self, caption='Open Design',
filter='Peyote Design (*{})'.format(design_extension))
if flt == '':
# it means they clicked cancel...
return
for path in paths:
self._open_design(path)
def _open_design(self, path):
with open(path, 'r') as file:
rdict = json.load(file)
info = rdict['__info__']
grid = rdict['__beads__']
design = DesignScene(self,
bgrid=grid,
name=info['__name__'],
track_width=info['__track_width__'],
tracks=info['__tracks__'],
height=info['__height__'])
area = PatternArea(design=design)
area.filepath = path
self.mdi_widget.addSubWindow(area)
def save_design(self):
"""Slot for saving the design in the active tab."""
# TODO: handle no tabs being open, it would be a good idea if
# the button was disabled when there are no tabs for example.
design = self.mdi_widget.activeSubWindow().widget().scene()
if not self.mdi_widget.activeSubWindow().widget().filepath:
name_s = '_'.join(design.name.lower().split(' '))
(path, flt) = QFileDialog.getSaveFileName(self, 'Save Design',
'./{}{}'.format(name_s, design_extension),
'Peyote Design (*{})'.format(design_extension))
if flt == '':
# it means they clicked cancel...
return
self.mdi_widget.activeSubWindow().widget().filepath = path
else:
path = self.mdi_widget.activeSubWindow().widget().filepath
with open(path, 'w') as file:
json.dump(design.to_dict(), file)
def save_as(self):
"""Save and force the user to select a new path"""
design = self.mdi_widget.activeSubWindow().widget().scene()
name_s = name_s = '_'.join(design.name.lower().split(' '))
(path, flt) = QFileDialog.getSaveFileName(self, 'Save Design',
'./{}{}'.format(name_s, design_extension),
'Peyote Design (*{})'.format(design_extension))
if flt == '':
# it means they clicked cancel...
return
self.mdi_widget.activeSubWindow().widget().filepath = path
with open(path, 'w') as file:
json.dump(design.to_dict(), file)
def select_type(self, new_selection, prev_selection):
"""Slot used to select a type of bead."""
if new_selection.type() == 1001:
self.working_bead = new_selection
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
mw = MainWindow()
mw.create()
mw.showMaximized()
sys.exit(app.exec_())
| 2.71875 | 3 |
Secao 17 - Algoritmo apriori/mercado2.py | flaviofontes29/Machine-Learning-e-Data-Science-com-Python | 0 | 12763467 | <filename>Secao 17 - Algoritmo apriori/mercado2.py<gh_stars>0
import pandas as pd
dados = pd.read_csv('mercado2.csv', header = None)
transacoes = []
for i in range(0, 7501):
transacoes.append([str(dados.values[i,j]) for j in range(0, 20)])
from apyori import apriori
regras = apriori(transacoes, min_support = 0.003, min_confidence = 0.2, min_lift = 2.0, min_length = 2)
resultados = list(regras)
resultados
resultados2 = [list(x) for x in resultados]
resultados2
resultadoFormatado = []
for j in range(0, 3):
resultadoFormatado.append([list(x) for x in resultados2[j][2]])
resultadoFormatado
| 2.96875 | 3 |
template/code/bazel/cc_opts.bzl | thill/cc-template | 1 | 12763468 | <gh_stars>1-10
def default_copts(ignored = []):
opts = [
"-std=c++20",
"-Wall",
"-Werror",
"-Wextra",
"-Wno-ignored-qualifiers",
"-Wvla",
]
ignored_map = {opt: opt for opt in ignored}
return [opt for opt in opts if ignored_map.get(opt) == None]
| 2.078125 | 2 |
app/models.py | francismuk/blog | 0 | 12763469 | <filename>app/models.py<gh_stars>0
from . import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from . import login_manager
from sqlalchemy.sql import func
@login_manager.user_loader
def load_user(user_id):
'''
@login_manager.user_loader Passes in a user_id to this function
Function queries the database and gets a user's id as a response
'''
return User.query.get(int(user_id))
class User(UserMixin, db.Model):
'''
User class to define a user in the database
'''
# Name of the table
__tablename__ = 'users'
# id column that is the primary key
id = db.Column(db.Integer, primary_key=True)
# username column for username
username = db.Column(db.String(255))
# email column for a user's email address
email = db.Column(db.String(255), unique=True, index=True)
# password_hash column for passwords
password_hash = db.Column(db.String(255))
# relationship between user and line class
blogs = db.relationship('Blog', backref='user', lazy='dynamic')
# relationship between user and comment class
comments = db.relationship('Comment', backref='user', lazy='dynamic')
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return f'User {self.username}'
class Category(db.Model):
__tablename__ = 'categories'
id = db.Column(db.Integer, primary_key=True)
name= db.Column(db.String(255))
blogs= db.relationship('Blog', backref= 'blog', lazy='dynamic')
def save_category(self):
'''
Function that saves a new category to the groups table
'''
db.session.add(self)
db.session.commit()
def delete_category(self):
db.session.delete(self)
db.session.commit()
@classmethod
def get_categories(cls):
'''
Function that queries the Groups Table in the database and returns all the information from the Groups Table
Returns:
groups : all the information in the groups table
'''
categories = Category.query.all()
return categories
class Blog(db.Model):
'''
Line class to define the pitches
'''
# Name of the table
__tablename__ = 'blogs'
# id column that is the primary key
id = db.Column(db.Integer, primary_key = True)
# linecontent column for the one minute pitch a user writes
blog_content = db.Column(db.String(200))
# group_id column for linking a line to a specific group
category_id = db.Column(db.Integer, db.ForeignKey("categories.id"))
# user_id column for linking a line to a specific group
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
# relationship between line and comment class
comments = db.relationship('Comment', backref='line', lazy='dynamic')
# relationship between line and comment class
def save_blog(self):
'''
Function that saves a new pitch to the lines table
'''
db.session.add(self)
db.session.commit()
# def delete_blog(self):
# db.session.delete(self)
# db.session.commit()
@classmethod
def get_blogs(cls,category_id):
'''
Function that queries the Lines Table in the database and returns only information with the specified group id
Args:
group_id : specific group_id
Returns:
lines : all the information for lines with the specific group id
'''
blogs = Blog.query.order_by(Blog.id.desc()).filter_by(category_id=category_id).all()
return blogs
class Comment(db.Model):
'''
Comment class to define the feedback from users
'''
# Name of the table
__tablename__ = 'comments'
# id column that is the primary key
id = db.Column(db.Integer, primary_key = True)
# comment_content for the feedback a user gives toa pitch
comment_content = db.Column(db.String)
# line_id column for linking a line to a specific line
blog_id = db.Column(db.Integer, db.ForeignKey("blogs.id") )
# user_id column for linking a line to a specific group
user_id = db.Column(db.Integer, db.ForeignKey("users.id") )
def save_comment(self):
'''
Function that saves a new comment given as feedback to a pitch
'''
db.session.add(self)
db.session.commit()
@classmethod
def get_comments(cls,blog_id):
'''
Function that queries the Comments Table in the database and returns only information with the specified line id
Args:
line_id : specific line_id
Returns:
comments : all the information for comments with the specific line id
'''
comments = Comment.query.filter_by(blog_id=blog_id).all()
return comments | 2.78125 | 3 |
examples/e2e/conftest.py | anneschuth/pact-python | 0 | 12763470 |
from testcontainers.compose import DockerCompose
import pytest
def pytest_addoption(parser):
parser.addoption(
"--publish-pact", type=str, action="store",
help="Upload generated pact file to pact broker with version"
)
parser.addoption(
"--provider-url", type=str, action="store",
help="The url to our provider."
)
# This fixture is to simulate a managed Pact Broker or Pactflow account
# Do not do this yourself but setup one of the above
# https://github.com/pact-foundation/pact_broker
@pytest.fixture(scope='session', autouse=True)
def broker(request):
version = request.config.getoption('--publish-pact')
publish = True if version else False
# yield
if not publish:
yield
return
print('Starting broker')
with DockerCompose("../broker",
compose_file_name=["docker-compose.yml"],
pull=True) as compose:
stdout, stderr = compose.get_logs()
if stderr:
print("Errors\\n:{}".format(stderr))
print(stdout)
yield
| 2.203125 | 2 |
akshay-kamath-individual-project/Code/01_DataManipulation.py | purvithakor/convolutional-neural-network-on-svhn | 0 | 12763471 | import warnings
warnings.filterwarnings('ignore')
import numpy as np
import scipy.io as sio
import seaborn as sns
import matplotlib.pyplot as plt
import wget
import os
from random import shuffle
import cv2
from PIL import Image
sns.set_style("white")
# ----------------------Downloading DATA--------------------------
folder_name = 'svhn_data'
filename_list = ['train_32x32.mat', 'test_32x32.mat', 'extra_32x32.mat']
print('\n')
print('Checking if ' + folder_name + ' directory exists')
print('\n')
if not os.path.exists(folder_name):
print('Directory does not exist. Creating ' + folder_name + ' directory now')
print('\n')
os.mkdir(folder_name)
print('Directory ' + folder_name + ' created')
else:
print('Directory ' + folder_name + ' already exists.')
print('\n')
print('Downloading svhn data files...')
print('\n')
for filename in filename_list:
filepath = './svhn_data/' + filename
if not os.path.exists(filepath):
print('Downloading ' + filename + ' file')
print('\n')
url = 'http://ufldl.stanford.edu/housenumbers/' + filename
wget.download(url, filepath)
else:
print('File ' + filename + ' already exists.')
print('\n')
print(20*"+")
print('Downloading done')
# ------------------------------------------------------------------------
def image_compare(img,lab,fig_name):
plt.figure(str(fig_name))
for i in range(1, 10):
plt.subplot(3, 3, i)
plt.imshow(img[:,:,:,i])
plt.title('Num ' + str(lab[i]))
plt.xticks()
plt.yticks()
plt.tight_layout()
plt.show(block=False)
return
# ---------------------------LOADING SVHN DATA----------------------------
# These file contains dictionaries.
# The dictionaries keys are: dict_keys(['y', 'X', '__version__', '__header__', '__globals__'])
# We are only concerned with the 'y' and 'X'.
# The 'y' key contains the labels (What the number is in the image)
# The 'X' key contains the actual images.
train_data = sio.loadmat('svhn_data/train_32x32.mat')
test_data = sio.loadmat('svhn_data/test_32x32.mat')
extra_data = sio.loadmat('svhn_data/extra_32x32.mat')
# Combining X from train, test & extra & stacking them one above the other
x_train = np.array(train_data['X'])
x_test = np.array(test_data['X'])
x_extra = np.array(extra_data['X'])
x = np.concatenate((x_train,x_test,x_extra),axis=-1)
print(20*"+")
print("Combined all image matrices!")
# Combining y from train, test & extra & converting label 10 to 0 across the entire target variable
y_train = train_data['y']
y_test = test_data['y']
y_extra = extra_data['y']
y = np.concatenate((y_train,y_test,y_extra))
y[y == 10] = 0 # label 10 has been converted to 0
print(20*"+")
print("Combined all labels!")
ind_list = [i for i in range(len(x[1,1,1,:]))]
shuffle(ind_list)
x_s = x[:,:,:,ind_list]
y_s = y[ind_list,]
print(20*"+")
print("Data Shuffled!")
# Splitting into train & test
train_pct_index = int(0.8 * (len(x[1,1,1,:])))
X_train, X_test = x_s[:,:,:,:train_pct_index], x_s[:,:,:,train_pct_index:]
y_train, y_test = y_s[:train_pct_index], y_s[train_pct_index:]
#####################################################################
unique1, train_counts = np.unique(y_train, return_counts=True)
train_counts = np.asarray( (unique1, train_counts) ).T
unique2, test_counts = np.unique(y_test, return_counts=True)
test_counts = np.asarray( (unique2, test_counts) ).T
ax1 = plt.subplot(121)
ax1.grid(False)
sns.set_style("white")
sns.barplot(np.arange(0,len(train_counts)),train_counts[:,-1])
plt.xlabel("Categories")
plt.ylabel("Counts")
plt.title("Labels distribution in Train Dataset")
ax2 = plt.subplot(122,sharey=ax1)
ax2.grid(False)
sns.set_style("white")
sns.barplot(np.arange(0,len(test_counts)),test_counts[:,-1])
plt.xlabel("Categories")
plt.ylabel("Counts")
plt.title("Labels distribution in Test Dataset")
plt.show()
#####################################################################
print(20*"+")
print("Data Splitting Completed!")
# PLOTTING IMAGES
# Normalizing images
IMAGE_WIDTH = 32
IMAGE_HEIGHT = 32
def transform_img(img, img_width, img_height):
#Histogram Equalization
img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
img[:, :, 1] = cv2.equalizeHist(img[:, :, 1])
img[:, :, 2] = cv2.equalizeHist(img[:, :, 2])
img = cv2.resize(img, (img_width, img_height), interpolation = cv2.INTER_CUBIC)
return img
x_train_normalized = []
x_test_normalized = []
image_compare(X_train,y_train,"before normalizing")
tot_train_images = len(X_train[1,1,1,:])
for i in range(tot_train_images):
image = X_train[:,:,:,i]
img = transform_img(image, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT)
x_train_normalized.append(img)
x_train_normalized = np.array(x_train_normalized)
x_train_normalized = np.transpose(x_train_normalized,(1,2,3,0))
image_compare(x_train_normalized,y_train,"after normalizing")
print(20*"+")
print("Normalized Training Images!")
tot_test_images = len(X_test[1,1,1,:])
for i in range(tot_test_images):
image = X_test[:,:,:,i]
img = transform_img(image, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT)
x_test_normalized.append(img)
x_test_normalized = np.array(x_test_normalized)
x_test_normalized = np.transpose(x_test_normalized,(1,2,3,0))
print(20*"+")
print("Normalized Testing Images!")
print(20*"+")
print("Preprocessing Completed!")
# Note - Data has been combined, shuffled, splitted, normalized here. Also label 10 has been converted to 0
# Now we work on Frameworks
#### SHAPE OF X_TRAIN_NORMALIZED IS (32, 32, 3, 504336)
#### SHAPE OF X_TEST_NORMALIZED IS (32, 32, 3, 126084)
#### SHAPE OF Y_TRAIN_NORMALIZED IS (504336, 1)
#### SHAPE OF Y_TEST_NORMALIZED IS (126084, 1)
dict_train = {'x_train':x_train_normalized,'y_train':y_train}
dict_test = {'x_test':x_test_normalized,'y_test':y_test}
sio.savemat('./svhn_data/train_processed.mat',dict_train,format='5')
sio.savemat('./svhn_data/test_processed.mat',dict_test,format='5')
print(20*"+")
print("Files Created!")
print("Execute 02_LMDB_Creator now!")
print(20*"+")
| 2.734375 | 3 |
main.py | LJ-LiJiahe/camera_calibration | 0 | 12763472 | <reponame>LJ-LiJiahe/camera_calibration<gh_stars>0
import glob
import numpy as np
import cv2
import config as cfg
# Initialization of parameters
board_size_x = cfg.board_size_x
board_size_y = cfg.board_size_y
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((board_size_y*board_size_x, 3), np.float32)
objp[:,:2] = np.mgrid[0:board_size_x, 0:board_size_y].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob(cfg.img_location)
for fname in images:
img = cv2.imread(fname)
img = cv2.resize(img, cfg.img_resize)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (board_size_x,board_size_y),None)
print(ret)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners2)
# Draw and display the corners
cv2.imshow('Original', img)
img = cv2.drawChessboardCorners(img, (board_size_x, board_size_y), corners2, ret)
cv2.imshow('Find Corner', img)
cv2.waitKey(cfg.imshow_time)
# Calibration
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
# Undistortion
for fname in images:
img = cv2.imread(fname)
img = cv2.resize(img, cfg.img_resize)
h, w = img.shape[:2]
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
# undistort
dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
# crop the image
x,y,w,h = roi
dst = dst[y:y+h, x:x+w]
# print(dst)
cv2.imshow('img', img)
cv2.imshow('dst', dst)
cv2.waitKey(cfg.imshow_time)
# cv2.imwrite('calibresult.png',dst) | 2.4375 | 2 |
authors/apps/articles/pagination.py | C3real-kill3r/binary-jungle-backend | 0 | 12763473 | from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
class StandardResultsSetPagination(PageNumberPagination):
"""
A page number style that supports page numbers as
query params
`example usage`
http://localhost:8000/api/articles/?page=3
http://localhost:8000/api/articles?page=4&page_size=100
"""
# the default page size
page_size = 20
# client can control the page using this query parameter
page_size_query_param = 'page_size'
# limits the max page size the client may request
max_page_size = 10
# overiding this method removes the posibility of pagination
# yielding inconsistent results with unordered object_list
def paginate_queryset(self, queryset, request, view=None):
self.django_paginator_class._check_object_list_is_ordered = lambda s: None
return super().paginate_queryset(queryset, request, view=view)
def get_paginated_response(self, data):
return Response({
'links': {
'next': self.get_next_link(),
'previous': self.get_previous_link()
},
'count': self.page.paginator.count,
'total_pages': self.page.paginator.num_pages,
'results': data
})
| 2.65625 | 3 |
setup.py | bodenr/onmcast | 0 | 12763474 | <reponame>bodenr/onmcast
from setuptools import setup, find_packages
setup(
name='onmcast',
version='0.1',
description='oslo-messaging notification multicast',
author='Boden',
author_email='<EMAIL>',
classifiers=[
'Development Status :: 1 - Beta',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Intended Audience :: Developers'
],
platforms=['Any'],
include_package_data=True,
zip_safe=False,
provides=['onmcast'],
scripts=[],
packages=find_packages(),
install_requires=[],
setup_requires=[],
entry_points={
'oslo.messaging.notify.drivers': [
'messaging-multicast = onmcast.notify.driver:AMQPMulticastDriver'
]
}
) | 1.296875 | 1 |
UDP/UDP_SERVER.py | Craziertexas/GPS_TRACKER_PC | 0 | 12763475 | import socket
def init_server(server_ip,server_port):
global server,i
try:
server=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address=(server_ip,server_port)
server.bind(server_address)
i=0
print("Hello!! Im on {} at port {} :3".format(*server_address))
except:
print("An error occurred during the server initialization")
def server_recive():
global server,i
try:
data, address=server.recvfrom(1024)
print("\n"+"Message: ",(data.decode()))
print("From: ",(address))
except:
print("An error occurred")
| 3.53125 | 4 |
4_selenium/selenium.py | strohne/autocol | 1 | 12763476 | <reponame>strohne/autocol
# Selenium package to remote control the browser
from selenium import webdriver
from selenium.webdriver.common.by import By
# Driver for Chrome browser
#from selenium.webdriver.chrome.service import Service
#from webdriver_manager.chrome import ChromeDriverManager
#chrome_service = Service(ChromeDriverManager().install())
# Driver for Firefox browser
from selenium.webdriver.firefox.service import Service
from webdriver_manager.firefox import GeckoDriverManager
firefox_service = Service(GeckoDriverManager().install())
#
# Open browser
#
# For chrome
#browser = webdriver.Chrome(service=chrome_service)
# For Firefox
browser = webdriver.Firefox(service=firefox_service)
# Open page
browser.get("https://www.google.com/")
# Wait up to 10 seconds when using elements,
# needed because elements are only accessible when page has been loaded
browser.implicitly_wait(10)
# Find search input
# See the documentation: https://selenium-python.readthedocs.io/locating-elements.html
input_search = browser.find_element(By.NAME,"q")
# Type into the search input
input_search.send_keys("<NAME> find ")
# Submit
input_search.submit()
#
# Extract data
#
# Get number of search results
results = browser.find_element(By.ID,'result-stats')
print(results.text)
# Extract number of search results
import re
number = re.search('([0-9\.]+) Ergebnisse', results.text).group(1)
# Remove dot from text
number = number.replace('.','')
# Convert to integer number (int)
number = int(number)
print(number)
#
# Multiple searches
#
# URL und keyword list for multiple search terms
url = "https://www.google.com/"
keywords = ["Computational","Statistical","Interpretive"]
# Empty list for search results
results = []
# Google every search term,
# extract the number,
# and add them to the results list
for keyword in keywords:
print(keyword)
browser.get(url)
input_search = browser.find_element(By.NAME,"q")
input_search.send_keys(keyword)
input_search.submit()
number = browser.find_element(By.ID,'result-stats').text
results.append({'keyword':keyword, 'count':number })
import pandas as pd
pd.DataFrame(results) | 3.390625 | 3 |
bitbucket-google-chat-notifications/router.py | Twb3/bitbucket-google-chat-notifications | 0 | 12763477 | from flask_restful import Resource
from flask import request
import events
from os import environ
import hmac
import hashlib
import logging
logger = logging.getLogger("bbgc")
def not_implemented(event_key):
logger.error("%s is not currently supported.", event_key)
return {"Error": event_key + " is not currently supported."}, 400
def verify_signature(request_data):
secret_token = environ.get('BBGC_SECRET_TOKEN')
if secret_token is None:
return True
if not request.headers.get("X-Hub-Signature"):
return False
request_signature = request.headers.get("X-Hub-Signature").split("=")[1]
digest = hmac.new(
secret_token.encode("ascii"),
request_data,
hashlib.sha256).hexdigest()
logger.debug("Request Signature: %s", request_signature)
logger.debug("HMAC Digest: %s", digest)
return digest == request_signature
class RequestRouter(Resource):
def post(self):
data = request.json
raw_data = request.data
if request.headers.get("X-Event-Key") == "diagnostics:ping":
if "test" in data:
return {"test": "successful"}, 200
if not verify_signature(raw_data):
logger.error("Request signatures do not match!")
return {"error": "signature does not match"}, 401
if "eventKey" not in data:
logger.debug(
"eventKey missing, not a valid bitbucket request: %s", data)
return {"error": "not a valid bitbucket request"}, 400
if data["eventKey"].startswith("mirror:"):
not_implemented(data["eventKey"])
return events.handle_event(data, request.args)
| 2.359375 | 2 |
orcinus/gui/tooltip.py | geem-lab/orcinus | 7 | 12763478 | <gh_stars>1-10
#!/usr/bin/python3
"""Simple tooltip widget."""
from tkinter import TclError
from tkinter import Toplevel
from tkinter.ttk import Frame
from tkinter.ttk import Label
# http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml
class Tooltip(Frame):
"""Simple tooltip."""
def __init__(self, widget):
"""Construct object."""
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def show_tip(self, text):
"""Display text in tooltip window."""
self.text = text
if self.tipwindow or not self.text:
return
x, y, cx, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() + 27
self.tipwindow = tw = Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call(
"::tk::unsupported::MacWindowStyle",
"style",
tw._w,
"help",
"noActivates",
)
except TclError:
pass
label = Label(
tw,
text=self.text,
justify="left",
background="#ffffe0",
relief="solid",
borderwidth=1,
)
label.pack(ipadx=1)
def hide_tip(self):
"""Hide tooltip."""
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
def create_tooltip(widget, text):
"""Create a tooltip for widget."""
toolTip = Tooltip(widget)
def enter(event):
toolTip.show_tip(text)
def leave(event):
toolTip.hide_tip()
widget.bind("<Enter>", enter)
widget.bind("<Leave>", leave)
| 3.15625 | 3 |
pyMightLED/pwm_controller.py | iorodeo/pyMightLED | 0 | 12763479 | <reponame>iorodeo/pyMightLED<filename>pyMightLED/pwm_controller.py
"""
Copyright 2010 IO Rodeo Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import led_controller
class PwmController(object):
"""
Controls led intesity using pwm based on the strobe mode of the
mighex LED controllers.
"""
def __init__(self,port,freq=1000,iset=[1000,1000,1000,1000]):
self._createEnabledList()
self.freq = float(freq)
self.iset = iset
self.ledController = led_controller.LedController(port)
self.disableAll()
self.setImaxAll(led_controller.MAX_CURRENT)
self.setValueAll([0,0,0,0])
def _createEnabledList(self):
self.enabledList = []
for i in range(led_controller.NUM_CHANNELS):
self.enabledList.append(False)
def enable(self,chan):
self.ledController.setMode(chan,'strobe')
self.enabledList[chan-1] = True
def disable(self,chan):
self.ledController.setMode(chan,'disable')
self.enabledList[chan-1] = False
def enableAll(self):
for i in range(led_controller.NUM_CHANNELS):
self.enable(i+1)
def disableAll(self):
for i in range(led_controller.NUM_CHANNELS):
self.disable(i+1)
def getPeriod(self):
"""
Calculates the period in us
"""
period = 1.0/float(self.freq)
period = period*1.0e6
return int(period)
def setValue(self,chan,value):
"""
Set the output value for the given channel.
chan = channel number 1,2,3 or 4
value = channel value (float between 0 and 1)
"""
period = self.getPeriod()
timeHigh = int(value*period)
timeLow = period - timeHigh
if timeHigh == 0:
self.disable(chan)
else:
iset = self.iset[chan-1]
self.ledController.setStrobeModeProfile(chan,0,iset,timeHigh)
self.ledController.setStrobeModeProfile(chan,1,0,timeLow)
if self.enabledList[chan-1]:
self.enable(chan)
def setValueAll(self,valueList):
"""
Set the output values for all channels
"""
for i,value in enumerate(valueList):
self.setValue(i+1,value)
def setImax(self,chan,imax):
self.ledController.setStrobeModeParams(chan,imax,'forever')
def setImaxAll(self,imax):
for i in range(led_controller.NUM_CHANNELS):
self.setImax(i+1,imax)
# -----------------------------------------------------------------------------
if __name__ == '__main__':
import time
dev = PwmController('/dev/ttyUSB0')
dev.enable(1)
dev.setValue(1,0.1)
time.sleep(1)
dev.disable(1)
| 2.578125 | 3 |
ninjarmmpy/__init__.py | StuffbyYuki/ninjarmmpy | 2 | 12763480 | from ninjarmmpy.client import Client | 1.023438 | 1 |
metadeploy/api/migrations/0053_merge_20190213_1622.py | sfdc-qbranch/MetaDeploy | 33 | 12763481 | # Generated by Django 2.1.7 on 2019-02-13 16:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("api", "0052_merge_20190213_1447"),
("api", "0052_merge_20190213_1501"),
]
operations = []
| 1.304688 | 1 |
purchaseRequests/apps.py | MLavrentyev/TeamManager | 1 | 12763482 | from django.apps import AppConfig
class PurchaserequestsConfig(AppConfig):
name = 'purchaseRequests'
| 1.203125 | 1 |
motor.py | hazimgharib21/RaspberryPiLineFollowRobot | 0 | 12763483 | # -----------------------------------------------------------------------------
# This class is for HAT-MDD10
# https://www.cytron.io/p-hat-mdd10
# -----------------------------------------------------------------------------
import RPi.GPIO as GPIO
import time
class motor:
def __init__(self, motorDir1=26, motorDir2=24, motorPWM1=12, motorPWM2=13):
self.pwm1 = motorPWM1
self.pwm2 = motorPWM2
self.dir1 = motorDir1
self.dir2 = motorDir2
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
time.sleep(1)
GPIO.setup(4,GPIO.OUT)
GPIO.setup(self.pwm2, GPIO.OUT)
GPIO.setup(self.pwm1, GPIO.OUT)
GPIO.setup(self.dir1, GPIO.OUT)
GPIO.setup(self.dir2, GPIO.OUT)
GPIO.output(4,GPIO.HIGH)
time.sleep(1)
GPIO.output(4,GPIO.LOW)
self.motorSpeedLeft = 0.0
self.motorSpeedRight = 0.0
self.motor1 = GPIO.PWM(self.pwm1, 100)
self.motor1.start(0)
self.motor1.ChangeDutyCycle(0)
self.motor2 = GPIO.PWM(self.pwm2, 100)
self.motor2.start(0)
self.motor2.ChangeDutyCycle(0)
# move the motor
def move(self):
if(self.motorSpeedLeft < 0):
self.motorSpeedLeft = 0 - self.motorSpeedLeft
GPIO.output(self.dir1, GPIO.HIGH)
self.motor1.ChangeDutyCycle(self.motorSpeedLeft)
elif(self.motorSpeedLeft >= 0):
GPIO.output(self.dir1, GPIO.LOW)
self.motor1.ChangeDutyCycle(self.motorSpeedLeft)
if(self.motorSpeedRight < 0):
self.motorSpeedRight = 0 - self.motorSpeedRight
GPIO.output(self.dir2, GPIO.LOW)
self.motor2.ChangeDutyCycle(self.motorSpeedRight)
elif(self.motorSpeedRight >= 0):
GPIO.output(self.dir2, GPIO.HIGH)
self.motor2.ChangeDutyCycle(self.motorSpeedRight)
# set left motor speed
def setLeftMotorSpeed(self, leftSpeed):
self.motorSpeedLeft = leftSpeed
# set right motor speed
def setRightMotorSpeed(self, rightSpeed):
self.motorSpeedRight = rightSpeed
# stop and cleanup pi GPIO
def clear(self):
self.motor1.ChangeDutyCycle(0)
self.motor2.ChangeDutyCycle(0)
GPIO.cleanup()
| 2.71875 | 3 |
src/probabilistic_models/proba_model_test.py | pfreifer/zxcvbn | 0 | 12763484 | <gh_stars>0
import pickle
import probabilistic_models.grammar_utils as gru
import probabilistic_models.random_set as rs
from probabilistic_models import grammars
from probabilistic_models.probabilistic_model import probabilistic_model_guesses
fl = {
"1": "aaaaa123bb,ccc**azerty".split(","),
"2": "123jaaj!,888abc320,320lll123".split(",")
}
def build_ranked_dict(ordered_list):
return {word: idx for idx, word in enumerate(ordered_list, 1)}
RD = {}
def add_frequency_lists(frequency_lists_):
for name, lst in frequency_lists_.items():
RD[name] = build_ranked_dict(lst)
if __name__ == "__main__":
add_frequency_lists(fl)
# grammars.construct_grammar_model()
# rs.scores(1000000)
(cb_counter, Q) = pickle.load(open("cb_dictionary.p", "rb"))
(sb_counter, B) = pickle.load(open("sb_dictionary.p", "rb"))
print("cloudpasswordmoon" in B)
print(gru.score("qwerty", cb_counter, sb_counter, Q, B))
print(gru.score("abc123", cb_counter, sb_counter, Q, B))
print(probabilistic_model_guesses("password"))
| 2.546875 | 3 |
notebooks-text-format/linreg_hierarchical_non_centered_numpyro.py | arpitvaghela/probml-notebooks | 166 | 12763485 | <reponame>arpitvaghela/probml-notebooks
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/linreg_hierarchical_non_centered_numpyro.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="f_py_lrTPdK1"
#
#
# # Hierarchical non-centered Bayesian Linear Regression in NumPyro
#
# The text and code for this notebook are taken directly from [this blog post](https://twiecki.io/blog/2017/02/08/bayesian-hierchical-non-centered/)
# by <NAME>. [Original notebook](https://github.com/twiecki/WhileMyMCMCGentlySamples/blob/master/content/downloads/notebooks/GLM_hierarchical_non_centered.ipynb)
#
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="XcsJEi91Qelr" outputId="8a943870-b8fe-4ef7-aa9f-0006e3266ae7"
# !pip install -q numpyro@git+https://github.com/pyro-ppl/numpyro arviz
# !pip install arviz
# !pip install seaborn
# + [markdown] id="J3PmS3woW962"
# First, we will import the libraries we need to:
# + id="QPTA4cZCPdK1"
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import arviz as az
import numpyro
from numpyro.infer import MCMC, NUTS, Predictive
import numpyro.distributions as dist
from jax import random
sns.set_style('whitegrid')
np.random.seed(123)
# + [markdown] id="JzDno90bHlrO"
# Then, we'll load the data:
# + id="c4BgCIlclQXX"
url = 'https://github.com/twiecki/WhileMyMCMCGentlySamples/blob/master/content/downloads/notebooks/radon.csv?raw=true'
data = pd.read_csv(url)
# + id="17ISOnzPlSR1"
county_names = data.county.unique()
county_idx = data.county_code.values
# + [markdown] id="KdWGECP9PdK1"
# ## The intuitive specification
#
# Usually, hierachical models are specified in a *centered* way. In a regression model, individual slopes would be centered around a group mean with a certain group variance, which controls the shrinkage:
# + id="R3K2OfGGnWlZ"
def hierarchical_model_centered(county, floor, log_of_radon):
# Hyperpriors
mu_a = numpyro.sample("mu_a", dist.Normal(0., 100**2))
sigma_a = numpyro.sample("sigma_a", dist.HalfCauchy(5.))
mu_b = numpyro.sample("mu_b", dist.Normal(0., 100**2))
sigma_b = numpyro.sample("sigma_b", dist.HalfCauchy(5.))
unique_counties = np.unique(county)
n_counties = len(unique_counties)
with numpyro.plate("counties", n_counties):
# Intercept for each county, distributed around group mean mu_a
a = numpyro.sample("a", dist.Normal(mu_a, sigma_a))
# Intercept for each county, distributed around group mean mu_a
b = numpyro.sample("b", dist.Normal(mu_b, sigma_b))
# Model error
eps = numpyro.sample("eps", dist.HalfCauchy(scale=5.))
# Expected value
radon_est = a[county_idx] + b[county_idx] * floor
with numpyro.plate("data", len(county)):
# Data likelihood
numpyro.sample("obs", dist.Normal(radon_est, eps), obs=log_of_radon)
# + colab={"base_uri": "https://localhost:8080/"} id="pmpzyT74Cj17" outputId="b0540268-3b40-4f57-fffa-d1880b859369"
nuts_kernel = NUTS(hierarchical_model_centered)
mcmc = MCMC(nuts_kernel, num_samples=5000, num_warmup=1000, num_chains=2)
rng_key = random.PRNGKey(0)
mcmc.run(rng_key, data.county.values, data.floor.values, data.log_radon.values)
hierarchical_centered_trace = mcmc.get_samples(True)
# Eliminates the first 1000 samples
hierarchical_centered_trace = {k: v[:, 1000:, :] if len(v.shape)==3 else v[:, 1000:] for k,v in hierarchical_centered_trace.items()}
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="M6hlZ2905Eoo" outputId="5e8a6fe3-6555-4fcb-9e9e-9e39d7bc348a"
inference_data = az.from_numpyro(mcmc)
az.plot_trace(inference_data, compact=True);
# + [markdown] id="OAbZ_QXGPdK2"
# I have seen plenty of traces with terrible convergences but this one might look fine to the unassuming eye. Perhaps `sigma_b` has some problems, so let's look at the Rhat:
# + id="EdTq66JUPdK2" colab={"base_uri": "https://localhost:8080/"} outputId="93b8896f-326b-43b1-b059-a95f79966804"
print('Rhat(sigma_b) = {}'.format(numpyro.diagnostics.gelman_rubin(hierarchical_centered_trace['sigma_b'])))
# + [markdown] id="JHSPBEbQPdK2"
# Not too bad -- well below 1.1. I used to think this wasn't a big deal but <NAME> in his [StanCon 2017 talk](https://www.youtube.com/watch?v=DJ0c7Bm5Djk&feature=youtu.be&t=4h40m9s) makes a strong point that it is actually very problematic. To understand what's going on, let's take a closer look at the slopes `b` and their group variance (i.e. how far they are allowed to move from the mean) `sigma_b`. I'm just plotting a single chain now.
# + id="AzfoQz2RPdK2" colab={"base_uri": "https://localhost:8080/", "height": 268} outputId="f439fe30-1b94-40ed-df80-719878b576dc"
fig, axs = plt.subplots(nrows=2)
axs[0].plot(hierarchical_centered_trace['sigma_b'][1], alpha=.5);
axs[0].set(ylabel='sigma_b');
axs[1].plot(hierarchical_centered_trace['b'][1], alpha=.5);
axs[1].set(ylabel='b');
# + [markdown] id="0zBgOlmnPdK2"
# `sigma_b` seems to drift into this area of very small values and get stuck there for a while. This is a common pattern and the sampler is trying to tell you that there is a region in space that it can't quite explore efficiently. While stuck down there, the slopes `b_i` become all squished together. We've entered **The Funnel of Hell** (it's just called the funnel, I added the last part for dramatic effect).
# + [markdown] id="iTckxwW7PdK2"
# ## The Funnel of Hell (and how to escape it)
#
# Let's look at the joint posterior of a single slope `b` (I randomly chose the 75th one) and the slope group variance `sigma_b`.
# + id="e1gZ_JZSPdK2" colab={"base_uri": "https://localhost:8080/", "height": 441} outputId="2703eeff-e39a-4d4b-b02e-3a46b1034023"
x = pd.Series(hierarchical_centered_trace['b'][:, :, 75].flatten(), name='slope b_75')
y = pd.Series(hierarchical_centered_trace['sigma_b'][:, :].flatten(), name='slope group variance sigma_b')
sns.jointplot(x=x, y=y, ylim=(0, .7));
# + [markdown] id="byYER5es2l_l"
# This makes sense, as the slope group variance goes to zero (or, said differently, we apply maximum shrinkage), individual slopes are not allowed to deviate from the slope group mean, so they all collapose to the group mean.
#
# While this property of the posterior in itself is not problematic, it makes the job extremely difficult for our sampler. Imagine a [Metropolis-Hastings](https://twiecki.github.io/blog/2015/11/10/mcmc-sampling/) exploring this space with a medium step-size (we're using NUTS here but the intuition works the same): in the wider top region we can comfortably make larger jumps to explore the space efficiently. However, once we move to the narrow bottom region we can change `b_75` and `sigma_b` only by tiny amounts. This causes the sampler to become trapped in that region of space. Most of the proposals will be rejected because our step-size is too large for this narrow part of the space and exploration will be very inefficient.
#
# You might wonder if we could somehow choose the step-size based on the denseness (or curvature) of the space. Indeed that's possible and it's called [Riemannian HMC](https://arxiv.org/abs/0907.1100). It works very well but is quite costly to run. Here, we will explore a different, simpler method.
#
# Finally, note that this problem does not exist for the intercept parameters `a`. Because we can determine individual intercepts `a_i` with enough confidence, `sigma_a` is not small enough to be problematic. Thus, the funnel of hell can be a problem in hierarchical models, but it does not have to be. (Thanks to <NAME> for pointing this out).
#
#
# ## Reparameterization
#
# If we can't easily make the sampler step-size adjust to the region of space, maybe we can adjust the region of space to make it simpler for the sampler? This is indeed possible and quite simple with a small reparameterization trick, we will call this the *non-centered* version.
# + id="HZp-OZ_RLWxN"
def hierarchical_model_non_centered(county, floor, log_of_radon):
# Hyperpriors
mu_a = numpyro.sample("mu_a", dist.Normal(0., 100**2))
sigma_a = numpyro.sample("sigma_a", dist.HalfCauchy(5.))
mu_b = numpyro.sample("mu_b", dist.Normal(0., 100**2))
sigma_b = numpyro.sample("sigma_b", dist.HalfCauchy(5.))
unique_counties = np.unique(county)
n_counties = len(unique_counties)
with numpyro.plate("counties", n_counties):
# Intercept for each county, distributed around group mean mu_a
a_offset = numpyro.sample("a_offset", dist.Normal(0, 1))
a = numpyro.deterministic("a", mu_a + a_offset * sigma_a)
# Intercept for each county, distributed around group mean mu_a
b_offset = numpyro.sample("b_offset", dist.Normal(0, 1))
b = numpyro.deterministic("b", mu_b + b_offset * sigma_b)
# Model error
eps = numpyro.sample("eps", dist.HalfCauchy(scale=5.))
# Expected value
radon_est = a[county_idx] + b[county_idx] * floor
with numpyro.plate("data", len(county)):
# Data likelihood
numpyro.sample("obs", dist.Normal(radon_est, eps), obs=log_of_radon)
# + id="eCnNxlmD2g-G" colab={"base_uri": "https://localhost:8080/"} outputId="a9df6771-8bfc-4d6f-9ef7-dc1a04c9f9ed"
nuts_kernel = NUTS(hierarchical_model_non_centered)
mcmc = MCMC(nuts_kernel, num_samples=5000, num_warmup=1000, num_chains=2)
mcmc.run(rng_key, data.county.values, data.floor.values, data.log_radon.values)
hierarchical_non_centered_trace = mcmc.get_samples(True)
hierarchical_non_centered_trace = {k: v[:, 1000:, :] if len(v.shape)==3 else v[:, 1000:] for k,v in hierarchical_non_centered_trace.items()}
# + [markdown] id="3Be9WYvFPdK3"
# Pay attention to the definitions of `a_offset`, `a`, `b_offset`, and `b` and compare them to before (commented out). What's going on here? It's pretty neat actually. Instead of saying that our individual slopes `b` are normally distributed around a group mean (i.e. modeling their absolute values directly), we can say that they are offset from a group mean by a certain value (`b_offset`; i.e. modeling their values relative to that mean). Now we still have to consider how far from that mean we actually allow things to deviate (i.e. how much shrinkage we apply). This is where `sigma_b` makes a comeback. We can simply multiply the offset by this scaling factor to get the same effect as before, just under a different parameterization. For a more formal introduction, see e.g. [Betancourt & Girolami (2013)](https://arxiv.org/pdf/1312.0906.pdf).
#
# Critically, `b_offset` and `sigma_b` are now mostly independent. This will become more clear soon. Let's first look at if this transform helped our sampling:
# + id="zzrN4osl2kMq" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="a46c60da-cf05-4382-9603-7f7b87526fc9"
var_names = ['a', 'b', 'mu_a', 'mu_b', 'sigma_a', 'sigma_b', 'eps']
inference_data = az.from_numpyro(mcmc)
az.plot_trace(inference_data, var_names=var_names, compact=True);
# + [markdown] id="b1lMZjlxPdK3"
# That looks much better as also confirmed by the joint plot:
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="_dcp7FYr2-dH" outputId="892efbac-6411-4b51-8d94-2641d6fcb174"
fig, axs = plt.subplots(ncols=2, sharex=True, sharey=True)
x = pd.Series(hierarchical_centered_trace['b'][:, :, 75].flatten(), name='slope b_75')
y = pd.Series(hierarchical_centered_trace['sigma_b'][:, :].flatten(), name='slope group variance sigma_b')
axs[0].plot(x, y, '.');
axs[0].set(title='Centered', ylabel='sigma_b', xlabel='b_75')
x = pd.Series(hierarchical_non_centered_trace['b'][:, :, 75].flatten(), name='slope b_75')
y = pd.Series(hierarchical_non_centered_trace['sigma_b'].flatten(), name='slope group variance sigma_b')
axs[1].plot(x, y, '.');
axs[1].set(title='Non-centered', xlabel='b_75');
# + [markdown] id="Q_W701t6PdK3"
# To really drive this home, let's also compare the `sigma_b` marginal posteriors of the two models:
# + id="XJxFSFbnPdK3" colab={"base_uri": "https://localhost:8080/", "height": 313} outputId="ae23d007-188a-435a-a2c9-d786dc18708e"
az.plot_kde(np.stack([hierarchical_centered_trace['sigma_b'], hierarchical_non_centered_trace['sigma_b'], ]).T)
plt.axvline(hierarchical_centered_trace['sigma_b'].mean(), color='b', linestyle='--')
plt.axvline(hierarchical_non_centered_trace['sigma_b'].mean(), color='g', linestyle='--')
plt.legend(['Centered', 'Non-cenetered', 'Centered posterior mean', 'Non-centered posterior mean']);
plt.xlabel('sigma_b'); plt.ylabel('Probability Density');
# + [markdown] id="QXe9_4vIPdK3"
# That's crazy -- there's a large region of very small `sigma_b` values that the sampler could not even explore before. In other words, our previous inferences ("Centered") were severely biased towards higher values of `sigma_b`. Indeed, if you look at the [previous blog post](https://twiecki.github.io/blog/2014/03/17/bayesian-glms-3/) the sampler never even got stuck in that low region causing me to believe everything was fine. These issues are hard to detect and very subtle, but they are meaningful as demonstrated by the sizable difference in posterior mean.
#
# But what does this concretely mean for our analysis? Over-estimating `sigma_b` means that we have a biased (=false) belief that we can tell individual slopes apart better than we actually can. There is less information in the individual slopes than what we estimated.
# + [markdown] id="3G2KQzuvPdK3"
# ### Why does the reparameterized model work better?
#
# To more clearly understand why this model works better, let's look at the joint distribution of `b_offset`:
# + colab={"base_uri": "https://localhost:8080/", "height": 441} id="Uje-j5FJ5WM0" outputId="b0d4f19e-f3e5-4fb1-ccfd-41954d36caa0"
x = pd.Series(hierarchical_non_centered_trace['b'][:, :, 75].flatten(), name='slope b_offset_75')
y = pd.Series(hierarchical_non_centered_trace['sigma_b'][:, :].flatten(), name='slope group variance sigma_b')
sns.jointplot(x=x, y=y, ylim=(0, .7));
# + [markdown] id="iUUIWErkPdK3"
# This is the space the sampler sees; you can see how the funnel is flattened out. We can freely change the (relative) slope offset parameters even if the slope group variance is tiny as it just acts as a scaling parameter.
#
# Note that the funnel is still there -- it's a perfectly valid property of the model -- but the sampler has a much easier time exploring it in this different parameterization.
# + [markdown] id="5Klof7DEPdK3"
# ## Why hierarchical models are Bayesian
#
# Finally, I want to take the opportunity to make another point that is not directly related to hierarchical models but can be demonstrated quite well here.
#
# Usually when talking about the perils of Bayesian statistics we talk about priors, uncertainty, and flexibility when coding models using Probabilistic Programming. However, an even more important property is rarely mentioned because it is much harder to communicate. <NAME> touched on this point in his tweet:
# + [markdown] id="i4dat7gDPdK3"
# <blockquote class="twitter-tweet" data-lang="en"><p lang="en" dir="ltr">It's interesting that many summarize Bayes as being about priors; but real power is its focus on integrals/expectations over maxima/modes</p>— <NAME> (@rosstaylor90) <a href="https://twitter.com/rosstaylor90/status/827263854002401281">February 2, 2017</a></blockquote>
# <script async src="//platform.twitter.com/widgets.js" charset="utf-8"></script>
# + [markdown] id="4tJwmkxRPdK3"
# <NAME> makes a similar point when he says ["Expectations are the only thing that make sense."](https://www.youtube.com/watch?v=pHsuIaPbNbY&t=8s)
#
# But what's wrong with maxima/modes? Aren't those really close to the posterior mean (i.e. the expectation)? Unfortunately, that's only the case for the simple models we teach to build up intuitions. In complex models, like the hierarchical one, the MAP can be far away and not be interesting or meaningful at all.
#
# Let's compare the posterior mode (i.e. the MAP) to the posterior mean of our hierachical linear regression model:
# + id="df4orfyOPdK3" colab={"base_uri": "https://localhost:8080/"} outputId="37e89240-dd0f-45c6-cdb3-fe6ba7cb6958"
hierarchical_centered_trace['b'].reshape(8000,-1).mean(axis=0)
# + id="rsadfvlSPdK3" colab={"base_uri": "https://localhost:8080/"} outputId="b1ce064b-25fb-4a3e-b427-bca426c2a275"
hierarchical_centered_trace['sigma_b'].reshape(1,-1).std(axis=1)
# + [markdown] id="muQpdSipPdK3"
# As you can see, the slopes are all identical and the group slope variance is effectively zero. The reason is again related to the funnel. The MAP only cares about the probability **density** which is highest at the bottom of the funnel.
#
# But if you could only choose one point in parameter space to summarize the posterior above, would this be the one you'd pick? Probably not.
#
# Let's instead look at the **Expected Value** (i.e. posterior mean) which is computed by integrating probability **density** and **volume** to provide probabilty **mass** -- the thing we really care about. Under the hood, that's the integration performed by the MCMC sampler.
# + colab={"base_uri": "https://localhost:8080/"} id="5uXWUhPbnowC" outputId="af8ac9fb-e6d4-4c23-8a69-b7ec280156c4"
hierarchical_non_centered_trace['b'].reshape(8000,-1).mean(axis=0)
# + id="9h-FzVGJPdK3" colab={"base_uri": "https://localhost:8080/"} outputId="38fa1e6c-99e7-4d98-f8cb-bf4d8d9f391b"
hierarchical_non_centered_trace['sigma_b'].reshape(1,-1).mean(axis=1)
# + [markdown] id="-AL504GdPdK3"
# Quite a difference. This also explains why it can be a bad idea to use the MAP to initialize your sampler: in certain models the MAP is not at all close to the region you want to explore (i.e. the "typical set").
#
# This strong divergence of the MAP and the Posterior Mean does not only happen in hierarchical models but also in high dimensional ones, where our intuitions from low-dimensional spaces gets twisted in serious ways. [This talk by <NAME>](https://www.youtube.com/watch?v=pHsuIaPbNbY&t=8s) makes the point quite nicely.
#
# So why do people -- especially in Machine Learning -- still use the MAP/MLE? As we all learned in high school first hand, integration is much harder than differentation. This is really the only reason.
#
# Final disclaimer: This might provide the impression that this is a property of being in a Bayesian framework, which is not true. Technically, we can talk about Expectations vs Modes irrespective of that. Bayesian statistics just happens to provide a very intuitive and flexible framework for expressing and estimating these models.
#
# See [here](https://rawgithub.com/twiecki/WhileMyMCMCGentlySamples/master/content/downloads/notebooks/GLM_hierarchical_non_centered.ipynb) for the underlying notebook of this blog post.
# + [markdown] id="SzMHO6fNPdK3"
# ## Acknowledgements
#
# Thanks to [<NAME>](https://twitter.com/jonsedar) for helpful comments on an earlier draft.
| 2.0625 | 2 |
server/app/endpoints/sessions.py | sublinus/pngme | 0 | 12763486 | <filename>server/app/endpoints/sessions.py
import hashlib
from fastapi import APIRouter, Request, Depends, WebSocket, WebSocketDisconnect
from pydantic import BaseModel, Field
from typing import List, Dict
from app.schemas import Client, Session, UserView, Join, Leave
router = APIRouter()
sessions : Dict[str, Session] = {}
def encoded_ip(ip: str) -> str:
return hashlib.md5(str(ip).encode("utf-8")).hexdigest()
def get_encoded_ip(websocket: WebSocket) -> str:
client_host = websocket.headers.get('x-client-ip')
return encoded_ip(client_host)
def get_user_session(session_id : str = Depends(get_encoded_ip)) -> Session:
if not sessions.get(session_id):
sessions[session_id] = Session(active_connections={})
return sessions[session_id]
@router.get("/", response_model=List[UserView])
async def get_session(request: Request):
session = sessions.get(encoded_ip(request.headers.get('x-client-ip')))
if not session:
return []
return list(map(lambda client: {"client_id": client[0], **vars(client[1])}, session.active_connections.items()))
@router.get("/all", response_model=List[str])
async def get_sessions():
return list(sessions.keys())
@router.websocket("/join/{client_id}")
async def join(websocket: WebSocket, client_id: str, name: str = "anon", client_type: str = "pinger", session: Session = Depends(get_user_session)):
await session.connect(websocket, client_id, name, client_type)
await session.broadcast(Join(user={"client_id": client_id, "client_type": client_type, "name": name}).json())
try:
while True:
data = await websocket.receive_text()
await session.broadcast(data)
except WebSocketDisconnect:
session.disconnect(client_id)
if len(session.active_connections) == 0:
del sessions[get_encoded_ip(websocket)]
else:
await session.broadcast(Leave(user={"client_id": client_id, "client_type": client_type, "name": name}).json())
| 2.296875 | 2 |
src/storage-preview/azext_storage_preview/vendored_sdks/azure_storagev2/fileshare/v2020_02_10/_shared/request_handlers.py | Mannan2812/azure-cli-extensions | 2,728 | 12763487 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import ( # pylint: disable=unused-import
Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
TYPE_CHECKING
)
import logging
from os import fstat
from io import (SEEK_END, SEEK_SET, UnsupportedOperation)
import isodate
from azure.core.exceptions import raise_with_traceback
_LOGGER = logging.getLogger(__name__)
def serialize_iso(attr):
"""Serialize Datetime object into ISO-8601 formatted string.
:param Datetime attr: Object to be serialized.
:rtype: str
:raises: ValueError if format invalid.
"""
if not attr:
return None
if isinstance(attr, str):
attr = isodate.parse_datetime(attr)
try:
utc = attr.utctimetuple()
if utc.tm_year > 9999 or utc.tm_year < 1:
raise OverflowError("Hit max or min date")
date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format(
utc.tm_year, utc.tm_mon, utc.tm_mday,
utc.tm_hour, utc.tm_min, utc.tm_sec)
return date + 'Z'
except (ValueError, OverflowError) as err:
msg = "Unable to serialize datetime object."
raise_with_traceback(ValueError, msg, err)
except AttributeError as err:
msg = "ISO-8601 object must be valid Datetime object."
raise_with_traceback(TypeError, msg, err)
def get_length(data):
length = None
# Check if object implements the __len__ method, covers most input cases such as bytearray.
try:
length = len(data)
except: # pylint: disable=bare-except
pass
if not length:
# Check if the stream is a file-like stream object.
# If so, calculate the size using the file descriptor.
try:
fileno = data.fileno()
except (AttributeError, UnsupportedOperation):
pass
else:
try:
return fstat(fileno).st_size
except OSError:
# Not a valid fileno, may be possible requests returned
# a socket number?
pass
# If the stream is seekable and tell() is implemented, calculate the stream size.
try:
current_position = data.tell()
data.seek(0, SEEK_END)
length = data.tell() - current_position
data.seek(current_position, SEEK_SET)
except (AttributeError, UnsupportedOperation):
pass
return length
def read_length(data):
try:
if hasattr(data, 'read'):
read_data = b''
for chunk in iter(lambda: data.read(4096), b""):
read_data += chunk
return len(read_data), read_data
if hasattr(data, '__iter__'):
read_data = b''
for chunk in data:
read_data += chunk
return len(read_data), read_data
except: # pylint: disable=bare-except
pass
raise ValueError("Unable to calculate content length, please specify.")
def validate_and_format_range_headers(
start_range, end_range, start_range_required=True,
end_range_required=True, check_content_md5=False, align_to_page=False):
# If end range is provided, start range must be provided
if (start_range_required or end_range is not None) and start_range is None:
raise ValueError("start_range value cannot be None.")
if end_range_required and end_range is None:
raise ValueError("end_range value cannot be None.")
# Page ranges must be 512 aligned
if align_to_page:
if start_range is not None and start_range % 512 != 0:
raise ValueError("Invalid page blob start_range: {0}. "
"The size must be aligned to a 512-byte boundary.".format(start_range))
if end_range is not None and end_range % 512 != 511:
raise ValueError("Invalid page blob end_range: {0}. "
"The size must be aligned to a 512-byte boundary.".format(end_range))
# Format based on whether end_range is present
range_header = None
if end_range is not None:
range_header = 'bytes={0}-{1}'.format(start_range, end_range)
elif start_range is not None:
range_header = "bytes={0}-".format(start_range)
# Content MD5 can only be provided for a complete range less than 4MB in size
range_validation = None
if check_content_md5:
if start_range is None or end_range is None:
raise ValueError("Both start and end range requied for MD5 content validation.")
if end_range - start_range > 4 * 1024 * 1024:
raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.")
range_validation = 'true'
return range_header, range_validation
def add_metadata_headers(metadata=None):
# type: (Optional[Dict[str, str]]) -> Dict[str, str]
headers = {}
if metadata:
for key, value in metadata.items():
headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value
return headers
| 2.265625 | 2 |
atomic/bin/util/generate_mission_times.py | usc-psychsim/atomic_domain_definitions | 0 | 12763488 | <reponame>usc-psychsim/atomic_domain_definitions
import argparse
import json
import logging
import os
import pandas as pd
from model_learning.util.io import create_clear_dir, get_files_with_extension, get_file_name_without_extension
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__desc__ = 'Loads several CSV log files + corresponding metadata files and generates new CSVs containing the ' \
'mission times.'
OUTPUT_DIR = 'output/csv_mission_times'
MISSION_TIME_PARAM = 'mission_timer'
TIME_STAMP_PARAM = '@timestamp'
CSV_PREFIX = 'processed_'
def get_files(files_dir, extension):
if os.path.isfile(files_dir):
files = [files_dir]
elif os.path.isdir(files_dir):
files = list(get_files_with_extension(files_dir, extension))
else:
raise ValueError('Input path is not a valid file or directory: {}.'.format(args.replays))
return files
def get_stamp_mission_time(mission_times, timestamp):
timestamp = timestamp.tz_localize(None)
for i in range(len(mission_times)):
ts, mission_time = mission_times[i]
if ts > timestamp:
return mission_time if i == 0 else mission_times[i - 1][1]
return mission_times[-1][1]
if __name__ == '__main__':
# parse command-line arguments
parser = argparse.ArgumentParser(description=__desc__)
parser.add_argument('-r', '--replays', required=True, type=str,
help='Directory containing the replay logs or single replay file to process.')
parser.add_argument('-m', '--metadata', required=True, type=str,
help='Directory containing the metadata files or single metadata file to process.')
parser.add_argument('-o', '--output', type=str, default=OUTPUT_DIR, help='Directory in which to save results.')
parser.add_argument('-c', '--clear', help='Whether to clear output directories before generating results.',
action='store_true')
parser.add_argument('-v', '--verbosity', action='count', default=0, help='Verbosity level.')
args = parser.parse_args()
# sets up log to file
log_level = logging.WARN if args.verbosity == 0 else logging.INFO if args.verbosity == 1 else logging.DEBUG
logging.basicConfig(format='%(message)s', level=log_level)
# create output
output_dir = os.path.join(args.output, os.path.basename(args.replays))
create_clear_dir(output_dir, args.clear)
# checks input files
csv_files = get_files(args.replays, 'csv')
meta_files = get_files(args.metadata, 'metadata')
csv_to_meta = {}
for csv_file in csv_files:
csv_meta_file = get_file_name_without_extension(csv_file.replace(CSV_PREFIX, '')) + '.metadata'
for meta_file in meta_files:
if os.path.basename(meta_file) == csv_meta_file:
csv_to_meta[csv_file] = meta_file
break
not_found_csv = set(csv_files) - set(csv_to_meta.keys())
logging.info('Could not find matching metadata file for:\n\t{}'.format('\n\t'.join(not_found_csv)))
logging.info('Processing {} log files from\n\t"{}"\n\t"{}"...'.format(
len(csv_to_meta), args.replays, args.metadata))
for csv_file, meta_file in csv_to_meta.items():
logging.info('Processing "{}" and "{}"...'.format(csv_file, meta_file))
# reads metadata file, registers mission times
mission_times = []
with open(meta_file, 'r') as f:
for line in f:
entry = json.loads(line)
if 'data' not in entry and 'message' in entry:
entry = json.loads(entry['message'])
if 'data' not in entry:
continue
if MISSION_TIME_PARAM in entry['data'] and \
entry['data'][MISSION_TIME_PARAM] != 'Mission Timer not initialized.':
minutes, seconds = [int(value) for value in entry['data'][MISSION_TIME_PARAM].split(':')]
mission_time = minutes * 60 + seconds
if len(mission_times) > 0 and mission_times[-1][1] == mission_time:
continue
timestamp = entry[TIME_STAMP_PARAM] if TIME_STAMP_PARAM in entry else entry['header']['timestamp']
timestamp = pd.to_datetime(timestamp, infer_datetime_format=True, exact=False)
timestamp = timestamp.tz_localize(None)
mission_times.append((timestamp, mission_time))
if len(mission_times) == 0:
logging.info('Could not process file "{}", incorrect timestamps'.format(meta_file))
continue
df = pd.read_csv(csv_file, index_col=0)
df[TIME_STAMP_PARAM] = pd.to_datetime(df[TIME_STAMP_PARAM], infer_datetime_format=True, exact=False)
df[MISSION_TIME_PARAM] = df.apply(lambda r: get_stamp_mission_time(mission_times, r[TIME_STAMP_PARAM]), axis=1)
file_path = os.path.join(output_dir, os.path.basename(csv_file))
df.to_csv(file_path)
logging.info('Processed "{}", saved to "{}"'.format(csv_file, file_path))
| 2.578125 | 3 |
pycmap/baseGraph.py | mdashkezari/pycmap | 4 | 12763489 | <reponame>mdashkezari/pycmap
"""
Author: <NAME> <<EMAIL>>
Date: 2019-06-26
Function: Abstraction of base class for graphs.
"""
from abc import ABCMeta, abstractmethod
import os
from .common import inline, get_vizEngine, get_figure_dir
import numpy as np
import pandas as pd
import matplotlib as plt
import matplotlib.cm as cm
from bokeh.io import output_notebook
import plotly
class ValidationException(Exception):
pass
class BaseGraph(object):
"""
This is the base class for other visualization classes.
Use class for inheritance purposes.
"""
__metaclass__ = ABCMeta
def __init__(self):
"""
:param int width: graph's width in pixels.
:param int height: graph's height in pixels.
:param array x: input data to be visualized.
:param array y: input data to be visualized.
:param array z: input data to be visualized.
:param array xErr: uncertainty associated with the input data.
:param array yErr: uncertainty associated with the input data.
:param array zErr: uncertainty associated with the input data.
:param str title: the graphs's title.
:param str xlabel: the graphs's x-axis label.
:param str ylabel: the graphs's y-axis label.
:param str zlabel: the graphs's z-axis label.
:param str legend: the graphs's legend.
:param str unit: data unit (if applicable).
:param float vmin: lower bound of data range (applicable to plots like maps and contours).
:param float vmax: upper bound of data range (applicable to plots like maps and contours).
:param str cmap: color map (applicable to plots like maps and contours).
:param str plotlyConfig: plotly config object (only applicable to plotly graphs).
"""
self.__width = 800
self.__height = 400
self.__x = np.array([])
self.__y = np.array([])
self.__z = np.array([])
self.__xErr = np.array([])
self.__yErr = np.array([])
self.__zErr = np.array([])
self.__title = ''
self.__xlabel = ''
self.__ylabel = ''
self.__zlabel = ''
self.__legend = ''
self.__unit = ''
self.__vmin = None
self.__vmax = None
self.__cmap = ''
self.__plotlyConfig = {
'showLink': False,
'editable':False,
'staticPlot': False
}
return
@abstractmethod
def render(self):
"""Parent render function; will be extended by derived classes."""
if inline():
if get_vizEngine().lower().strip() == 'bokeh': output_notebook()
if get_vizEngine().lower().strip() == 'plotly': plotly.offline.init_notebook_mode(connected=False)
else:
figureDir = get_figure_dir()
if not os.path.exists(figureDir): os.makedirs(figureDir)
@staticmethod
def valid_data(data):
"""validate the input data tye."""
res = isinstance(data, list) or isinstance(data, np.ndarray) or isinstance(data, pd.core.series.Series)
msg = 'The input data should be of type list, or numpy array, or pandas series.'
return res, msg
@staticmethod
def enable_plotly_in_cell():
"""
Enables plotly on colab cells.
Presumably, this is not necessay at plotly 4+ (calling fig.show() will be enough).
Currently, plotly version 3+ is installed on colab.
"""
import IPython
from plotly.offline import init_notebook_mode
display(IPython.core.display.HTML('''
<script src="/static/components/requirejs/require.js"></script>
'''))
init_notebook_mode(connected=False)
def _save_plotly_(self, go, data, layout):
"""
Saves a plotly figure on local disk.
Not meant to be called by user.
"""
fig = go.Figure(data=data, layout=layout)
if not self.__plotlyConfig.get('staticPlot'):
if inline():
self.enable_plotly_in_cell()
plotly.offline.iplot(fig, config=self.plotlyConfig)
else:
plotly.offline.plot(fig, config=self.plotlyConfig, filename=get_figure_dir() + self.variable + '.html')
else:
plotly.io.write_image(fig, get_figure_dir() + self.variable + '.png')
def _save_figure_factory_(self, fig):
"""
Saves a plotly figure_factory on local disk.
Not meant to be called by user.
"""
fname = 'annotated_heatmap'
if self.variable is not None and self.variable != '': fname = 'annotated_heatmap_' + self.variable
if not self.__plotlyConfig.get('staticPlot'):
if inline():
plotly.offline.iplot(fig, config=self.plotlyConfig)
else:
plotly.offline.plot(fig, config=self.plotlyConfig, filename=get_figure_dir() + fname + '.html')
else:
plotly.io.write_image(fig, get_figure_dir() + fname + '.png')
@property
def width(self):
return self.__width
@width.setter
def width(self, width):
self.__width = width
@property
def height(self):
return self.__height
@height.setter
def height(self, height):
self.__height = height
@property
def x(self):
return self.__x
@x.setter
def x(self, x):
valid, msg = self.valid_data(x)
if not valid: raise ValidationException(msg)
self.__x = x
@property
def y(self):
return self.__y
@y.setter
def y(self, y):
valid, msg = self.valid_data(y)
if not valid: raise ValidationException(msg)
self.__y = y
@property
def z(self):
return self.__z
@z.setter
def z(self, z):
valid, msg = self.valid_data(z)
if not valid: raise ValidationException(msg)
self.__z = z
@property
def xErr(self):
return self.__xErr
@xErr.setter
def xErr(self, xErr):
valid, msg = self.valid_data(xErr)
if not valid: raise ValidationException(msg)
self.__xErr = xErr
@property
def yErr(self):
return self.__yErr
@yErr.setter
def yErr(self, yErr):
valid, msg = self.valid_data(yErr)
if not valid: raise ValidationException(msg)
self.__yErr = yErr
@property
def zErr(self):
return self.__zErr
@zErr.setter
def zErr(self, zErr):
valid, msg = self.valid_data(zErr)
if not valid: raise ValidationException(msg)
self.__zErr = zErr
@property
def title(self):
return self.__title
@title.setter
def title(self, title):
if not isinstance(title, str): raise ValidationException('title must be of type string.')
self.__title = title
@property
def xlabel(self):
return self.__xlabel
@xlabel.setter
def xlabel(self, xlabel):
if not isinstance(xlabel, str): raise ValidationException('xlabel must be of type string.')
self.__xlabel = xlabel
@property
def ylabel(self):
return self.__ylabel
@ylabel.setter
def ylabel(self, ylabel):
if not isinstance(ylabel, str): raise ValidationException('ylabel must be of type string.')
self.__ylabel = ylabel
@property
def zlabel(self):
return self.__zlabel
@zlabel.setter
def zlabel(self, zlabel):
if not isinstance(zlabel, str): raise ValidationException('zlabel must be of type string.')
self.__zlabel = zlabel
@property
def legend(self):
return self.__legend
@legend.setter
def legend(self, legend):
if not isinstance(legend, str): raise ValidationException('legend must be of type string.')
self.__legend = legend
@property
def unit(self):
return self.__unit
@unit.setter
def unit(self, unit):
if not isinstance(unit, str): raise ValidationException('unit must be of type string.')
self.__unit = unit
@property
def vmin(self):
return self.__vmin
@vmin.setter
def vmin(self, vmin):
if not isinstance(vmin, float) and not isinstance(vmin, int):
raise ValidationException('vmin must be of type int or float.')
self.__vmin = vmin
@property
def vmax(self):
return self.__vmax
@vmax.setter
def vmax(self, vmax):
if not isinstance(vmax, float) and not isinstance(vmax, int):
raise ValidationException('vmax must be of type int or float.')
self.__vmax = vmax
@property
def cmap(self):
return self.__cmap
@cmap.setter
def cmap(self, cmap):
"""Gets cmap as string (matplotlib colormap names) or cmocean colormap and makes it compatible with suppoerted vizEngine."""
colormap =cm.get_cmap(cmap)
if get_vizEngine().lower().strip() == 'bokeh':
paletteName = [plt.colors.rgb2hex(m) for m in colormap(np.arange(colormap.N))]
cmap = paletteName
elif get_vizEngine().lower().strip() == 'plotly':
pl_entries = 255
h = 1.0/(pl_entries-1)
pl_colorscale = []
for k in range(pl_entries):
C = list(map(np.uint8, np.array(colormap(k*h)[:3])*255))
pl_colorscale.append([k*h, 'rgb'+str((C[0], C[1], C[2]))])
cmap = pl_colorscale
self.__cmap = cmap
@property
def plotlyConfig(self):
return self.__plotlyConfig
@plotlyConfig.setter
def plotlyConfig(self, plotlyConfig):
if not isinstance(plotlyConfig, dict):
raise ValidationException('plotlyConfig must be of type dict.')
self.__plotlyConfig = plotlyConfig
| 2.875 | 3 |
score/develop/dev_score/block_access_sample_score.py | LoopNewtorkInc/Loopchain | 2 | 12763490 | <reponame>LoopNewtorkInc/Loopchain
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import json
import loopchain.configure as conf
from loopchain.blockchain import ScoreBase
class UserScore(ScoreBase):
""" peer service, score service 연동 샘플 스코어
마지막 블럭 해시를 구한 다음 전체 블럭을 조회해서 순회하는 샘플 코드
run this: ./peer.py -d -p 7100 -c develop/dev_score
"""
def invoke(self, transaction, block):
logging.debug("in block access sample score invoke...")
# block loop sample by hash
logging.debug("::block loop sample by hash")
block_hash = self._score_service.get_last_block_hash()
total_tx = 0
total_height = -1
logging.debug("get last block hash: " + block_hash)
while block_hash is not None:
response = self._score_service.get_block_by_hash(block_hash)
logging.debug("block is: " + str(response))
if len(response.block_data_json) > 0:
block_data = json.loads(response.block_data_json)
logging.debug("[block height: " + str(block_data["height"]) +
", hash: " + str(block_data["block_hash"]) + "]")
if len(response.tx_data_json) == 1:
tx_data = json.loads(response.tx_data_json[0])
logging.debug("has tx: " + str(tx_data["tx_hash"]))
else:
logging.debug("has tx: " + str(len(response.tx_data_json)))
total_height += 1
total_tx += len(response.tx_data_json)
block_hash = block_data["prev_block_hash"]
if int(block_data["height"]) == 0:
block_hash = None
else:
block_hash = None
logging.debug("\nblock chain height: " + str(total_height) + ", total tx: " + str(total_tx))
# block loop sample by height
logging.debug("::block loop sample by height")
response = self._score_service.get_block() # null request, return last block
total_tx = 0
total_height = -1
if response.response_code == message_code.Response.success:
logging.debug("block is: " + str(response))
block_data = json.loads(response.block_data_json)
logging.debug("[block height: " + str(block_data["height"]) +
", hash: " + str(block_data["block_hash"]) + "]")
logging.debug("get last block height: " + str(block_data["height"]))
block_height = int(block_data["height"])
logging.debug("block_data_json: " + response.block_data_json)
while block_height >= 0:
response = self._score_service.get_block_by_height(block_height)
logging.debug("in while, block is: " + str(response))
if len(response.block_data_json) > 0:
block_data = json.loads(response.block_data_json)
logging.debug("[block height: " + str(block_data["height"]) +
", hash: " + str(block_data["block_hash"]) + "]")
if len(response.tx_data_json) == 1:
tx_data = json.loads(response.tx_data_json[0])
logging.debug("has tx: " + str(tx_data["tx_hash"]))
else:
logging.debug("has tx: " + str(len(response.tx_data_json)))
total_height += 1
total_tx += len(response.tx_data_json)
block_height = int(block_data["height"]) - 1
else:
block_height = -1
logging.debug("\nblock chain height: " + str(total_height) + ", total tx: " + str(total_tx))
def query(self, params):
logging.debug("in block access sample score query...")
return params
def info(self):
# TODO Score info (package.json) 을 로드하여 json object 를 리턴하여야 한다.
return None
| 2.328125 | 2 |
app/api/v0_1/stack.py | andromia/solverstack-crud | 0 | 12763491 | <filename>app/api/v0_1/stack.py
from flask import request, jsonify, make_response
from flask_jwt_extended import jwt_required, get_jwt_identity
import logging
from . import bp, errors
from app import db
from app.models import Stack, StackChain
@bp.route("/stack", methods=["GET", "POST"])
@jwt_required
def stack():
if request.method == "GET":
stacks = Stack.query.get_or_404(1).to_dict() # TODO: can query multiple stacks
return make_response({"stacks": [stacks]}, 200)
if request.method == "POST":
if not request.is_json:
raise errors.InvalidUsage(
"Incorrect request format! Request data must be JSON"
)
data = request.get_json(silent=True)
if not data:
raise errors.InvalidUsage(
"Invalid JSON received! Request data must be JSON"
)
if "stack" in data:
stack = data["stack"]
else:
raise errors.InvalidUsage("'stack' missing in request data")
if "chain" in data:
chain = data["chain"]
else:
raise errors.InvalidUsage("'chain' missing in request data")
if not isinstance(stack, dict):
raise errors.InvalidUsage("'stack' should be a dict")
if not isinstance(chain, list):
raise errors.InvalidUsage("'stack' should be a list")
if not stack:
raise errors.InvalidUsage("'stack' is empty")
if not chain:
raise errors.InvalidUsage("'chain' is empty")
stack["user_id"] = get_jwt_identity()["id"]
# Using dict unpacking for creation
new_stack = Stack(**stack)
db.session.add(new_stack)
db.session.commit()
for st in chain:
chained = StackChain(stack_id=new_stack.id, chained_id=st["id"])
db.session.add(chained)
db.session.commit()
return make_response({"stack": new_stack.to_dict()}, 201)
| 2.65625 | 3 |
magnetizer/item.py | magnusdahlgren/magnetizer | 1 | 12763492 | """A module to provides the class Item.
TODO: Better documentation
"""
import html
from re import sub
from re import search
from datetime import datetime
from markdown import markdown
from template import Template
from mutil import link_h1, downgrade_headings, wrap_it_in_a_link, strip_tags_from_html, \
first_image_url_from_html, abstract_from_html, COLOUR_ERROR, COLOUR_END
class Item:
"""A class to represent content items.
Typical use is:
item = Item(website)
item.from_md_filename()
An item can be either an ARTICLE_ITEM ("blog post") or a STATIC_ITEM ("static page").
Which one it is is determined by the filename. If the filename starts with a number and
a dash (e.g. '001-'), it is an ARTICLE_ITEM, otherwise it's STATIC.
TODO: Document available attributes and methods
"""
ARTICLE_ITEM = "magnetizer-article-item"
STATIC_ITEM = "magnetizer-static-item"
ARTICLE_ITEM_TEMPLATE_FILENAME = "_article_item_template.html"
STATIC_ITEM_TEMPLATE_FILENAME = "_static_item_template.html"
def __init__(self, website):
self.website = website
self.markdown_source = None
self.filename = None
self.html_summary = None
self.html_full = None
self.date = None
self.type = None
def from_md_filename(self, filename):
"""Populate Item with contents from file.
Keyword arguments:
filename -- the filename, without any path
"""
if filename.split('.', 1)[1] == 'md':
with open(self.website.config.value('source_path') + filename, 'r') as myfile:
self.markdown_source = myfile.read()
if not self.is_valid():
print(COLOUR_ERROR + ' (!) ' + COLOUR_END +
"'%s' must include exactly one h1 and a date)" % filename)
return False
filename = filename.split('.', 1)[0] + '.html'
self.type = Item.item_type(filename)
# Remove first part of filename if it is a number
if filename.split('-', 1)[0].isdigit():
filename = filename.split('-', 1)[1]
template = Template(self.website.tag['content'],
self.website.config.value('template_path') +
self.template_filename())
self.filename = filename
self.html_full = template.render(markdown(self.markdown_source))
if self.type == Item.STATIC_ITEM:
self.date = None
self.html_full = self.html_full.replace(
self.website.tag['item_footer'],
self.website.static_footer_html, 1)
else:
self.date = self.date_from_markdown_source()
self.html_full = self.html_full.replace(
self.website.tag['item_footer'],
self.website.article_footer_html, 1)
self.html_full = self.html_full.replace(self.website.tag['break'], '')
if self.html_full.count(self.website.tag['creative_commons']) > 0:
self.html_full = self.html_full.replace(
self.website.tag['cc_here'], self.cc_license(), 1)
self.html_full = self.html_full.replace(
self.website.tag['creative_commons'], '')
summary = self.markdown_source.split(self.website.tag['break'], maxsplit=1)[0]
# Show 'read more' if post has been abbreviated
if summary != self.markdown_source:
readmore = "<a href='%s' class='magnetizer-more'>Read more</a>" % \
self.filename
else:
readmore = ""
self.html_summary = markdown(summary) + readmore
self.html_summary = link_h1(self.html_summary, self.filename)
self.html_summary = downgrade_headings(self.html_summary)
self.html_summary = template.render(self.html_summary)
self.html_summary = self.html_summary.replace(
self.website.tag['item_footer'], '', 1)
self.html_summary = sub(r'<!-- MAGNETIZER_INCLUDE (.*?)-->', '', self.html_summary)
date_html = self.date_html_from_date()
if date_html is not None:
self.html_full = self.html_full.replace(
self.website.tag['date'], date_html, 1)
# date in short html should be a link
self.html_summary = self.html_summary.replace(
self.website.tag['date'],
wrap_it_in_a_link(date_html, self.filename), 1)
return True
return False
def title(self):
"""Identify the title for the Item, i.e. the contents of the first <h1>."""
title = "Untitled"
if self.html_full is not None:
match = search(r"<h1>(.*?)<\/h1>", self.html_full)
if match:
title = strip_tags_from_html(match.group(1))
return '%s - %s' % (title, self.website.config.value('website_name'))
def meta_description(self):
"""Identify the meta_description for the Item, i.e. the contents of the
<!-- META_DESCRIPTION --> tag.
"""
match = search(r"<!-- *META_DESCRIPTION *= *(.*?) *-->", self.markdown_source)
if match:
return match.group(1)
return None
def feed_entry(self):
"""Returns an Atom feed entry for the item"""
full_url = '%s/%s' % (self.website.config.value('website_base_url'), self.filename)
entry = '<entry>'
entry += '<title>%s</title>' % html.escape(self.title(), False)
entry += '<link href="%s"/>' % full_url
entry += '<id>%s</id>' % full_url
entry += '<updated>%sT00:00:01Z</updated>' % self.date
entry += '<summary>%s</summary>' % html.escape(self.abstract(), False)
entry += '</entry>'
return entry
def date_html_from_date(self):
"""Renders a html <time> element based on the item's date.
e.g. "<time datetime='2019-08-03'>3 August 2019</time>
"""
if self.date is not None:
result = "<time datetime='%s'>" % self.date.isoformat()
result += self.date.strftime('%-d %B %Y')
result += "</time>"
return result
return None
def date_from_markdown_source(self):
"""Identify the date for the Item from a comment in the markdown source,
e.g. <!-- 24/12/2019 -->
"""
match = search(r'.*<!-- (\d\d?/\d\d?/\d\d\d\d?) -->.*', self.markdown_source)
if match:
return datetime.strptime(match[1], '%d/%m/%Y').date()
return None
def cc_license(self):
"""Renders html to show Creative Commons license information for the item"""
return ('<p class="magntetizer-license">' +
'<a rel="license" href="http://creativecommons.org/licenses/by/4.0/">' +
'<img alt="Creative Commons Licence" style="border-width:0" ' +
'src="https://i.creativecommons.org/l/by/4.0/88x31.png" />' +
'</a><br />This work by <a xmlns:cc="http://creativecommons.org/ns#" href="' +
self.website.config.value('website_base_url') + '/' + self.filename +
'" property="cc:attributionName" rel="cc:attributionURL">' +
self.website.config.value('website_author') +
'</a> is licensed under a <a rel="license" ' +
'href="http://creativecommons.org/licenses/by/4.0/">' +
'Creative Commons Attribution 4.0 International License</a>.' +
'</p>')
def twitter_card(self):
"""Generates meta data for a Twitter card for the item"""
twitter_handle = self.website.config.value('website_twitter')
card = '<meta name="twitter:card" content="summary_large_image" />'
card += '<meta name="twitter:site" content="%s" />' % twitter_handle
card += '<meta name="twitter:title" content="%s" />' % self.title()
img_url = first_image_url_from_html(markdown(self.markdown_source))
card += '<meta name="twitter:description" content="%s" />' % self.abstract()
if img_url:
if not img_url.startswith('http'):
img_url = self.website.config.value('website_base_url') + '/' + img_url
card += '<meta name="twitter:image" content="%s" />' % img_url
return card
def abstract(self):
"""Generate a short abstract for the item"""
return abstract_from_html(markdown(self.markdown_source))
def is_valid(self):
"""Determine whether the item is valid, i.e. whether it contains a <h1> and a date."""
return "<h1>" in markdown(self.markdown_source) and \
search(r'.*<!-- (\d\d?/\d\d?/\d\d\d\d?) -->.*', self.markdown_source)
def is_indexable(self):
"""Determine whether the item should be indexable by search engines by looking
for noindex tag in the markdown source
"""
return not self.website.tag['noindex'] in self.markdown_source
def template_filename(self):
"""Determine which template to use for the item."""
filename = None
if self.type == Item.ARTICLE_ITEM:
filename = Item.ARTICLE_ITEM_TEMPLATE_FILENAME
elif self.type == Item.STATIC_ITEM:
filename = Item.STATIC_ITEM_TEMPLATE_FILENAME
return filename
@staticmethod
def item_type(filename):
"""Determine the item type for this file, based on its filename.
Result:
Item.ARTICLE_ITEM - if filename starts with a number and a dash, e.g. '001-'
Item.STATIC_ITEM - otherwise
"""
if filename.split('-', 1)[0].isdigit():
return Item.ARTICLE_ITEM
return Item.STATIC_ITEM
@staticmethod
def html_contents_from_multiple_md_files(website, filenames):
"""Splice the html contents of the files specified into one blob of html
Keyword Arguments:
filenames - a list of filenames
"""
item = Item(website)
html_content = ''
for filename in filenames:
if filename.split('-', 1)[0].isdigit():
if item.from_md_filename(filename):
html_content += item.html_summary
return html_content
| 3.484375 | 3 |
flask-06-SQL_Alchemy/code/models/item.py | ysabel31/Apprendre-Python | 0 | 12763493 | <filename>flask-06-SQL_Alchemy/code/models/item.py
from db import db
class ItemModel(db.Model):
__tablename__ = 'items'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
price = db.Column(db.Float(precision=2))
store_id = db.Column(db.Integer, db.ForeignKey('stores.id'))
store = db.relationship('StoreModel')
def __init__(self, name, price, store_id):
self.name = name
self.price = price
self.store_id = store_id
def json(self):
return { 'name' : self.name, 'price' : self.price }
@classmethod
def find_by_name(cls,name):
return cls.query.filter_by(name=name).first()
# Select * from items where name = name LIMIT 1
def save_to_db(self):
# Session in this instance is a collection of objects
# that we are going to write to the database
# we can add multiple objects to the session and then write mutiple at once
# in this case one
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit() | 3.28125 | 3 |
utils/warnings.py | Jakob-Unfried/msc-legacy | 1 | 12763494 | <filename>utils/warnings.py
import sys
def custom_warn(msg, category=UserWarning, filename='', lineno=-1, *args, **kwargs):
print(f'{category.__name__}: {msg}', file=sys.stderr, flush=True)
print(f' issued from: {filename}:{lineno}', file=sys.stderr, flush=True)
| 2.484375 | 2 |
KnnModel.py | gitmax681/hand-gesture-recognition | 3 | 12763495 | <gh_stars>1-10
"""
Note: do not tweak this code unless you know what you're doing.'
"""
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from pandas import read_csv
import numpy as np
import json
from init import main
with open('config.json', 'r') as f:
_d = json.load(f)
labels = _d['labels']
dataversion = _d['CurrentData']
data = read_csv(f'data/{dataversion}', sep=',')
y = data.iloc[:, -1]
X = np.array(data.iloc[:, 0:-2]).astype(np.float)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
model = KNeighborsClassifier(n_neighbors=5)
model.fit(X_train, y_train)
main('knnModel', labels, model=model)
| 2.71875 | 3 |
ssod/datasets/pipelines/formatting.py | huimlight/SoftTeacher | 604 | 12763496 | import numpy as np
from mmdet.datasets import PIPELINES
from mmdet.datasets.pipelines.formating import Collect
from ssod.core import TrimapMasks
@PIPELINES.register_module()
class ExtraAttrs(object):
def __init__(self, **attrs):
self.attrs = attrs
def __call__(self, results):
for k, v in self.attrs.items():
assert k not in results
results[k] = v
return results
@PIPELINES.register_module()
class ExtraCollect(Collect):
def __init__(self, *args, extra_meta_keys=[], **kwargs):
super().__init__(*args, **kwargs)
self.meta_keys = self.meta_keys + tuple(extra_meta_keys)
@PIPELINES.register_module()
class PseudoSamples(object):
def __init__(
self, with_bbox=False, with_mask=False, with_seg=False, fill_value=255
):
"""
Replacing gt labels in original data with fake labels or adding extra fake labels for unlabeled data.
This is to remove the effect of labeled data and keep its elements aligned with other sample.
Args:
with_bbox:
with_mask:
with_seg:
fill_value:
"""
self.with_bbox = with_bbox
self.with_mask = with_mask
self.with_seg = with_seg
self.fill_value = fill_value
def __call__(self, results):
if self.with_bbox:
results["gt_bboxes"] = np.zeros((0, 4))
results["gt_labels"] = np.zeros((0,))
if "bbox_fields" not in results:
results["bbox_fields"] = []
if "gt_bboxes" not in results["bbox_fields"]:
results["bbox_fields"].append("gt_bboxes")
if self.with_mask:
num_inst = len(results["gt_bboxes"])
h, w = results["img"].shape[:2]
results["gt_masks"] = TrimapMasks(
[
self.fill_value * np.ones((h, w), dtype=np.uint8)
for _ in range(num_inst)
],
h,
w,
)
if "mask_fields" not in results:
results["mask_fields"] = []
if "gt_masks" not in results["mask_fields"]:
results["mask_fields"].append("gt_masks")
if self.with_seg:
results["gt_semantic_seg"] = self.fill_value * np.ones(
results["img"].shape[:2], dtype=np.uint8
)
if "seg_fields" not in results:
results["seg_fields"] = []
if "gt_semantic_seg" not in results["seg_fields"]:
results["seg_fields"].append("gt_semantic_seg")
return results
| 2.234375 | 2 |
src/04_Programming_Hadoop_with_Spark/FilteredLowestRatedMovieDateFrame.py | MilovanTomasevic/The-Ultimate-Hands-On-Hadoop-Tame-your-Big-Data | 0 | 12763497 | <gh_stars>0
from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark.sql import functions
def loadMovieNames():
movieNames = {}
with open('ml-100k/u.item') as f:
for line in f:
fields = line.split('|')
movieNames[int(fields[0])] = fields[1]
return movieNames
def parseInput(line):
fields = line.split()
return Row(movieID = int(fields[1]), rating = float(fields[2]))
if __name__ == "__main__":
# Create a SparkSession
spark = SparkSession.builder.appName("PopularMovies").getOrCreate()
# load up our movie ID -> name dictionary
movieNames = loadMovieNames()
# get the raw data
lines = spark.sparkContext.textFile("hdfs:///user/maria_dev/ml-100k/u.data")
# Convert it to a RDD of Row objects with (movieID, rating)
movies = lines.map(parseInput)
# Convert that to a DataFrame
movieDataset = spark.createDataFrame(movies)
# Compute counts of ratings for each movieID
counts = movieDataset.groupBy("movieID").count()
# Filter the count of ratings for movies < 10 or fewer times
filteredCounts = counts.filter("count > 10")
# Compute average rating for each movieID
averageRatings = movieDataset.groupBy("movieID").avg("rating")
# Join the two together (We now have movieID, avg(rating), and count columns)
averagesAndCounts = filteredCounts.join(averageRatings, "movieID")
# Pull the top 10 results
topTen = averagesAndCounts.orderBy("avg(rating)").take(10)
# Print them out, converting movie ID's to names as we go.
for movie in topTen:
print (movieNames[movie[0]], movie[1], movie[2])
# Stop the session
| 3.46875 | 3 |
Challenges-2/movieReview.py | chandrakant100/Python-Project | 0 | 12763498 | <reponame>chandrakant100/Python-Project
# Challenge 4 : Create a function named movie_review() that has one parameter named rating.
# If rating is less than or equal to 5, return "Avoid at all costs!".
# If rating is between 5 and 9, return "This one was fun.".
# If rating is 9 or above, return "Outstanding!"
# Date : Thu 28 May 2020 07:31:11 AM IST
def movie_review(rating):
if rating <= 5:
return "Avoid at all costs!"
elif (rating >= 5) and (rating <= 9):
return "This one was fun."
elif rating >= 9:
return "Outstanding!"
print(movie_review(9))
print(movie_review(4))
print(movie_review(6))
| 4.125 | 4 |
mango/tradehistory.py | roelantc/mango-explorer | 0 | 12763499 | # # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:<EMAIL>)
import logging
import numpy
import pandas
import os
import os.path
import requests
import time
import typing
from datetime import datetime, timedelta
from dateutil import parser
from decimal import Decimal
from solana.publickey import PublicKey
from .account import Account
from .context import Context
# # 🥭 TradeHistory class
#
# Downloads and unifies trade history data.
#
class TradeHistory:
COLUMNS = ["Timestamp", "Market", "Side", "MakerOrTaker", "Change", "Price", "Quantity", "Fee",
"SequenceNumber", "FeeTier", "MarketType", "OrderId"]
__perp_column_name_mapper = {
"loadTimestamp": "Timestamp",
"seqNum": "SequenceNumber",
"price": "Price",
"quantity": "Quantity"
}
__spot_column_name_mapper = {
"loadTimestamp": "Timestamp",
"seqNum": "SequenceNumber",
"price": "Price",
"size": "Quantity",
"side": "Side",
"feeCost": "Fee",
"feeTier": "FeeTier",
"orderId": "OrderId"
}
__decimal_spot_columns = [
"openOrderSlot",
"feeTier",
"nativeQuantityReleased",
"nativeQuantityPaid",
"nativeFeeOrRebate",
"orderId",
"clientOrderId",
"source",
"seqNum",
"baseTokenDecimals",
"quoteTokenDecimals",
"price",
"feeCost",
"size"
]
__decimal_perp_columns = [
"seqNum",
"makerFee",
"takerFee",
"makerOrderId",
"takerOrderId",
"price",
"quantity"
]
__column_converters = {
"Timestamp": lambda value: parser.parse(value),
"SequenceNumber": lambda value: Decimal(value),
"Price": lambda value: Decimal(value),
"Change": lambda value: Decimal(value),
"Quantity": lambda value: Decimal(value),
"Fee": lambda value: Decimal(value),
"FeeTier": lambda value: Decimal(value),
"OrderId": lambda value: Decimal(value)
}
def __init__(self, seconds_pause_between_rest_calls: int = 1) -> None:
self._logger: logging.Logger = logging.getLogger(self.__class__.__name__)
self.__seconds_pause_between_rest_calls: int = seconds_pause_between_rest_calls
self.__trades: pandas.DataFrame = pandas.DataFrame(columns=TradeHistory.COLUMNS)
@staticmethod
def __market_lookup(context: Context) -> typing.Callable[[pandas.Series], str]:
def __safe_lookup(row: pandas.Series) -> str:
address: PublicKey = PublicKey(row["address"])
market = context.market_lookup.find_by_address(address)
if market is None:
raise Exception(f"No market found with address {address}")
return market.symbol
return __safe_lookup
@staticmethod
def __download_json(url: str) -> typing.Any:
response = requests.get(url)
response.raise_for_status()
return response.json()
@staticmethod
def __download_all_perps(context: Context, account: Account) -> pandas.DataFrame:
url = f"https://event-history-api.herokuapp.com/perp_trades/{account.address}?page=all"
data = TradeHistory.__download_json(url)
trades: pandas.DataFrame = TradeHistory.__perp_data_to_dataframe(context, account, data)
return trades
@staticmethod
def __download_updated_perps(context: Context, account: Account, newer_than: typing.Optional[datetime], seconds_pause_between_rest_calls: int) -> pandas.DataFrame:
trades: pandas.DataFrame = pandas.DataFrame(columns=TradeHistory.COLUMNS)
page: int = 0
complete: bool = False
while not complete:
page += 1
url = f"https://event-history-api.herokuapp.com/perp_trades/{account.address}?page={page}"
data = TradeHistory.__download_json(url)
frame: pandas.DataFrame = TradeHistory.__perp_data_to_dataframe(context, account, data)
if len(frame) == 0:
complete = True
else:
trades = trades.append(frame)
if (newer_than is not None) and (frame.loc[frame.index[-1], "Timestamp"] < newer_than):
complete = True
else:
time.sleep(seconds_pause_between_rest_calls)
return trades
@staticmethod
def __perp_data_to_dataframe(context: Context, account: Account, data: typing.Any) -> pandas.DataFrame:
# Perp data is an array of JSON packages like:
# {
# "loadTimestamp": "2021-09-02T10:54:56.000Z",
# "address": <PUBLIC-KEY-STRING>,
# "seqNum": "2831",
# "makerFee": "0",
# "takerFee": "0.0004999999999988347",
# "takerSide": "sell",
# "maker": <PUBLIC-KEY-STRING>,
# "makerOrderId": <BIG-INT>,
# "taker": <PUBLIC-KEY-STRING>,
# "takerOrderId": <BIG-INT>,
# "price": "50131.9",
# "quantity": "0.019"
# },
def __side_lookup(row: pandas.Series) -> str:
if row["MakerOrTaker"] == "taker":
return str(row["takerSide"])
elif row["takerSide"] == "buy":
return "sell"
else:
return "buy"
def __fee_calculator(row: pandas.Series) -> Decimal:
price: Decimal = row["Price"]
quantity: Decimal = row["Quantity"]
fee_rate: Decimal
if row["MakerOrTaker"] == "maker":
fee_rate = row["makerFee"]
else:
fee_rate = row["takerFee"]
return price * quantity * fee_rate
if len(data["data"]) <= 1:
return pandas.DataFrame(columns=TradeHistory.COLUMNS)
trade_data = data["data"][:-1]
for trade in trade_data:
for column_name in TradeHistory.__decimal_perp_columns:
trade[column_name] = Decimal(trade[column_name])
frame = pandas.DataFrame(trade_data).rename(mapper=TradeHistory.__perp_column_name_mapper, axis=1, copy=True)
frame["Timestamp"] = frame["Timestamp"].apply(lambda timestamp: parser.parse(timestamp).replace(microsecond=0))
frame["Market"] = frame.apply(TradeHistory.__market_lookup(context), axis=1)
frame["MarketType"] = "perp"
this_address = f"{account.address}"
frame["MakerOrTaker"] = frame["maker"].apply(lambda addy: "maker" if addy == this_address else "taker")
frame["FeeTier"] = -1
frame["Fee"] = frame.apply(__fee_calculator, axis=1)
frame["Side"] = frame.apply(__side_lookup, axis=1)
frame["Change"] = (frame["Price"] * frame["Quantity"]) - frame["Fee"]
frame["Change"] = frame["Change"].where(frame["Side"] == "sell", other=-frame["Change"])
frame["OrderId"] = numpy.where(frame["MakerOrTaker"] == "maker",
frame["makerOrderId"], frame["takerOrderId"])
return frame[TradeHistory.COLUMNS]
@staticmethod
def __download_all_spots(context: Context, account: Account) -> pandas.DataFrame:
trades: pandas.DataFrame = pandas.DataFrame(columns=TradeHistory.COLUMNS)
for spot_open_orders_address in account.spot_open_orders:
url = f"https://event-history-api.herokuapp.com/trades/open_orders/{spot_open_orders_address}?page=all"
data = TradeHistory.__download_json(url)
frame = TradeHistory.__spot_data_to_dataframe(context, account, data)
trades = trades.append(frame)
return trades
@staticmethod
def __download_updated_spots(context: Context, account: Account, newer_than: typing.Optional[datetime], seconds_pause_between_rest_calls: int) -> pandas.DataFrame:
trades: pandas.DataFrame = pandas.DataFrame(columns=TradeHistory.COLUMNS)
for spot_open_orders_address in account.spot_open_orders:
page: int = 0
complete: bool = False
while not complete:
page += 1
url = f"https://event-history-api.herokuapp.com/trades/open_orders/{spot_open_orders_address}?page={page}"
data = TradeHistory.__download_json(url)
frame = TradeHistory.__spot_data_to_dataframe(context, account, data)
if len(frame) == 0:
complete = True
else:
trades = trades.append(frame)
earliest_in_frame = frame.loc[frame.index[-1], "Timestamp"]
if (newer_than is not None) and (earliest_in_frame < newer_than):
complete = True
else:
time.sleep(seconds_pause_between_rest_calls)
return trades
@staticmethod
def __spot_data_to_dataframe(context: Context, account: Account, data: typing.Any) -> pandas.DataFrame:
# Spot data is an array of JSON packages like:
# {
# "loadTimestamp": "2021-10-05T16:04:50.717Z",
# "address": <PUBLIC-KEY-STRING>,
# "programId": <PUBLIC-KEY-STRING>,
# "baseCurrency": "SOL",
# "quoteCurrency": "USDC",
# "fill": true,
# "out": false,
# "bid": true,
# "maker": true,
# "openOrderSlot": "0",
# "feeTier": "4",
# "nativeQuantityReleased": "3000000000",
# "nativeQuantityPaid": "487482712",
# "nativeFeeOrRebate": "146288",
# "orderId": <BIG-INT>,
# "openOrders": <PUBLIC-KEY-STRING>,
# "clientOrderId": <BIG-INT>,
# "uuid": <LONG-OPAQUE-UUID-STRING>,
# "source": "2",
# "seqNum": "24827175",
# "baseTokenDecimals": 9,
# "quoteTokenDecimals": 6,
# "side": "buy",
# "price": 162.543,
# "feeCost": -0.146288,
# "size": 3
# }
if len(data["data"]) == 0:
return pandas.DataFrame(columns=TradeHistory.COLUMNS)
else:
trade_data = data["data"]
for trade in trade_data:
for column_name in TradeHistory.__decimal_spot_columns:
trade[column_name] = Decimal(trade[column_name])
frame = pandas.DataFrame(trade_data).rename(
mapper=TradeHistory.__spot_column_name_mapper, axis=1, copy=True)
frame["Timestamp"] = frame["Timestamp"].apply(
lambda timestamp: parser.parse(timestamp).replace(microsecond=0))
frame["Market"] = frame.apply(TradeHistory.__market_lookup(context), axis=1)
frame["MakerOrTaker"] = numpy.where(frame["maker"], "maker", "taker")
frame["Change"] = (frame["Price"] * frame["Quantity"]) - frame["Fee"]
frame["Change"] = frame["Change"].where(frame["Side"] == "sell", other=-frame["Change"])
frame["MarketType"] = "spot"
return frame[TradeHistory.COLUMNS]
@property
def trades(self) -> pandas.DataFrame:
return self.__trades.copy(deep=True)
def download_latest(self, context: Context, account: Account, cutoff: datetime) -> None:
# Go back further than we need to so we can be sure we're not skipping any trades due to race conditions.
# We remove duplicates a few lines further down.
self._logger.info(f"Downloading spot trades from {cutoff}")
spot: pandas.DataFrame = TradeHistory.__download_updated_spots(context,
account,
cutoff,
self.__seconds_pause_between_rest_calls)
self._logger.info(f"Downloading perp trades from {cutoff}")
perp: pandas.DataFrame = TradeHistory.__download_updated_perps(context,
account,
cutoff,
self.__seconds_pause_between_rest_calls)
all_trades: pandas.DataFrame = pandas.concat([self.__trades, spot, perp])
all_trades = all_trades[all_trades["Timestamp"] >= cutoff]
distinct_trades = all_trades.drop_duplicates()
sorted_trades = distinct_trades.sort_values(["Timestamp", "Market", "SequenceNumber"], axis=0, ascending=True)
self._logger.info(f"Download complete. Data contains {len(sorted_trades)} trades.")
self.__trades = sorted_trades
def update(self, context: Context, account: Account) -> None:
latest_trade: typing.Optional[datetime] = self.__trades.loc[self.__trades.index[-1],
"Timestamp"] if len(self.__trades) > 0 else None
spot: pandas.DataFrame
perp: pandas.DataFrame
if latest_trade is None:
self._logger.info("Downloading all spot trades.")
spot = TradeHistory.__download_all_spots(context, account)
self._logger.info("Downloading all perp trades.")
perp = TradeHistory.__download_all_perps(context, account)
else:
# Go back further than we need to so we can be sure we're not skipping any trades due to race conditions.
# We remove duplicates a few lines further down.
cutoff_safety_margin: timedelta = timedelta(hours=1)
cutoff: datetime = latest_trade - cutoff_safety_margin
self._logger.info(
f"Downloading spot trades from {cutoff}, {cutoff_safety_margin} before latest stored trade at {latest_trade}")
spot = TradeHistory.__download_updated_spots(context, account,
cutoff, self.__seconds_pause_between_rest_calls)
self._logger.info(
f"Downloading perp trades from {cutoff}, {cutoff_safety_margin} before latest stored trade at {latest_trade}")
perp = TradeHistory.__download_updated_perps(context, account,
cutoff, self.__seconds_pause_between_rest_calls)
all_trades = pandas.concat([self.__trades, spot, perp])
distinct_trades = all_trades.drop_duplicates()
sorted_trades = distinct_trades.sort_values(["Timestamp", "Market", "SequenceNumber"], axis=0, ascending=True)
self._logger.info(f"Download complete. Data contains {len(sorted_trades)} trades.")
self.__trades = sorted_trades
def load(self, filename: str, ok_if_missing: bool = False) -> None:
if not os.path.isfile(filename):
if not ok_if_missing:
raise Exception(f"File {filename} does not exist or is not a file.")
else:
existing = pandas.read_csv(filename,
float_precision="round_trip",
converters=TradeHistory.__column_converters)
self.__trades = self.__trades.append(existing)
def save(self, filename: str) -> None:
self.__trades.to_csv(filename, index=False, mode="w")
def __str__(self) -> str:
return f"« TradeHistory containing {len(self.__trades)} trades »"
def __repr__(self) -> str:
return f"{self}"
| 1.789063 | 2 |
Curso-em-video/115-exerciciospython/d052_num_primo.py | FabianoBill/Estudos-em-python | 1 | 12763500 | <reponame>FabianoBill/Estudos-em-python
# Exercício Python 52: Faça um programa que leia um número inteiro e diga se ele é ou não um número primo.
n = int(input("Digite um número inteiro: "))
co = 0
for c in range(1, n + 1):
if n % c == 0:
co += 1
if co == 2:
print(f"{n} é primo.")
else:
print(f"{n} não é primo.")
| 3.75 | 4 |
D__cell_enrichments.py | garedaba/baby-brains | 2 | 12763501 | import numpy as np
import pandas as pd
import os
from collections import Counter
from scipy.stats import hypergeom
fdr_threshold = 0.05
def main():
os.makedirs('results/enrichment', exist_ok=True)
os.makedirs('results/GO', exist_ok=True)
# LOAD
# single cell gene data
all_gene_data = pd.read_csv('data/gene_lists/all-scRNA-data.csv')
# normalised RPKM bulk data corrected for age, sex, etc
bulk_data = pd.read_csv('data/processed_psychencode/PsychENCODE-prenatal-bulk-RPKM-data-scRNA-filtered-Winsor-log2-residualised.csv')
# gene-wise correlation with PC components
correlation_results = pd.read_csv('results/gene_correlations/PCA_correlations-KendallTau-residualisedRPKM.csv')
# fetal background geneset = all filtered genes in bulk data
background_genes = pd.read_csv('data/gene_lists/background_genes.txt', header=None)[0]
print('number of background genes: {:}'.format(len(background_genes)))
# get gene lists
print('gathering gene lists')
# genes differentially expressed by classes or categories, returning all genes, as well as those that are unique to each class
# CELL TIMING: PRECURSOR OR MATURE
cell_timing, cell_timing_genes, cell_timing_unique_genes = get_gene_lists(all_gene_data, background_genes, class_type='timing')
# CELL CLASS
cell_classes, cell_class_genes, cell_class_unique_genes = get_gene_lists(all_gene_data, background_genes, class_type='class')
# CELL TYPE
cell_types, cell_type_genes, cell_type_unique_genes = get_gene_lists(all_gene_data, background_genes, class_type='cluster_study')
# get significant genes
significant_genes = pd.read_csv('results/gene_correlations/PCA_correlations-KendallTau-PC-significant_genes-p' + str(fdr_threshold) + '.csv')
# genes positively correlated to PC component
positive_significant_genes_list = list(significant_genes.loc[significant_genes['PC1_tau']>0,'symbol'])
# genes negatively correlated to PC component
negative_significant_genes_list = list(significant_genes.loc[significant_genes['PC1_tau']<0,'symbol'])
# ENRICHMENT
print("cell enrichments")
cell_timing_enrichment_results = run_enrichment(cell_timing, cell_timing_genes, cell_timing_unique_genes, positive_significant_genes_list, negative_significant_genes_list, background_genes)
cell_timing_enrichment_results.to_csv('results/enrichment/cell_timing_enrichment-PC1-significant_genes-p' + str(fdr_threshold) + '.csv', index=False)
print("see results/enrichment/cell_timing_enrichment-significant_genes-p" + str(fdr_threshold) + ".csv")
cell_class_enrichment_results = run_enrichment(cell_classes, cell_class_genes, cell_class_unique_genes, positive_significant_genes_list, negative_significant_genes_list, background_genes)
cell_class_enrichment_results.to_csv('results/enrichment/cell_class_enrichment-PC1-significant_genes-p' + str(fdr_threshold) + '.csv', index=False)
print("see results/enrichment/cell_class_enrichment-significant_genes-p" + str(fdr_threshold) + ".csv")
cell_type_enrichment_results = run_enrichment(cell_types, cell_type_genes, cell_type_unique_genes, positive_significant_genes_list, negative_significant_genes_list, background_genes)
cell_type_enrichment_results.to_csv('results/enrichment/cell_type_enrichment-PC1-significant_genes-p' + str(fdr_threshold) + '.csv', index=False)
print("see results/enrichment/cell_type_enrichment-significant_genes-p" + str(fdr_threshold) + ".csv")
# save out lists for webgestalt
np.savetxt('results/GO/positive_genes.txt', positive_significant_genes_list, fmt='%s')
np.savetxt('results/GO/negative_genes.txt', negative_significant_genes_list, fmt='%s')
### HELPERS ##############################################################################################
def get_gene_lists(data, background_genes, class_type='class'):
# ALL GENES EXPRESSED IN A GIVEN CELL CLASS
class_genes = []
classes = np.unique(data[class_type])
# remove neuron class if it's there (not split into excit. and inhib.),
classes = list(set(classes) - set(['neuron']))
for cell_class in classes:
# genes in cell class
class_data = np.array(data.loc[data[class_type]==cell_class, 'gene'].values, dtype=str).reshape(-1)
# only keep genes that are also present in bulk data
class_data = class_data[pd.DataFrame(class_data).isin(list(background_genes)).values.reshape(-1)]
class_genes.append(np.unique(class_data))
# ALL GENES *UNIQUELY* EXPRESSED IN A GIVEN CELL CLASS
ctr = Counter(np.hstack(class_genes))
gene_count = pd.DataFrame([list(ctr.keys()), list(ctr.values())]).T
repeat_genes = gene_count.loc[(gene_count[1]>1),0].values
class_unique_genes = class_genes.copy()
for n,cell_class in enumerate(classes):
# remove shared
class_unique_genes[n] = list(set(class_unique_genes[n])-set(repeat_genes))
return classes, class_genes, class_unique_genes
def safe_div(x,y):
if y == 0:
return np.array([0])
return x / y
def calculate_enrichment(hit_list, top_genes, full_gene_list):
x = sum(pd.DataFrame(top_genes).isin(hit_list).values) # how many top genes in cell list
n = sum(pd.DataFrame(hit_list).isin(full_gene_list).values)[0] # how many cell genes in full list
N = len(top_genes) # number of samples
M = len(full_gene_list) # total number in population
enrichment = safe_div( (x/N) , ((n-x) / (M-N)) )
p = hypergeom.sf(x-1, M, n, N)
return enrichment, p
def run_enrichment(classes, gene_lists, unique_gene_lists, positive_genes, negative_genes, background_genes):
enrichment_results = []
num_genes = []
# for each cell class/type
for i in np.arange(len(classes)):
# for full and unique gene lists
for gl in [gene_lists, unique_gene_lists]:
# as long as there are some genes
if len(gl[i])>0:
# calculate enrichment in the postively and negatively correlated lists
for g in [positive_genes, negative_genes]:
enrichment_results.append(calculate_enrichment(list(gl[i]), list(g), list(background_genes)))
num_genes.append(len(gl[i]))
# otherwise is nan
else:
for g in [positive_genes, negative_genes]:
enrichment_results.append((np.array([np.nan]),np.array([np.nan])))
num_genes.append(np.array([0]))
# collate into dataframe
results = pd.DataFrame(np.hstack(enrichment_results).T)
results.columns=['enrichment', 'p']
results['class'] = np.repeat(classes, 4)
results['loading'] = ['positive', 'negative']*(len(classes)*2)
results['gene_list'] = np.hstack([np.repeat(['all', 'unique'], 2)]*len(classes))
results['num_genes'] = np.squeeze(num_genes)
results = results.loc[:,['class','loading','gene_list','num_genes','enrichment','p']]
return results
if __name__ == '__main__':
main()
| 2.390625 | 2 |
World 1/First attempts/ex001 - First Try.py | MiguelChichorro/PythonExercises | 2 | 12763502 | colors = {"clean": "\033[m",
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"purple": "\033[35m",
"cian": "\033[36m"}
msg = "Hello World!!!"
print("{}{}".format(colors["cian"], msg))
| 2.828125 | 3 |
auto/auto.py | subtleseeker/Mask-RCNN | 1 | 12763503 | """
Mask R-CNN
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 auto.py train --dataset=/path/to/auto/dataset --weights=coco
# Resume training a model that you had trained earlier
python3 auto.py train --dataset=/path/to/auto/dataset --weights=last
# Train a new model starting from ImageNet weights
python3 auto.py train --dataset=/path/to/auto/dataset --weights=imagenet
# Apply color overlay to an image
python3 auto.py overlay --weights=/path/to/weights/file.h5 --image=<URL or path to file>
# Apply color overlay to video using the last weights you trained
python3 auto.py overlay --weights=last --video=<URL or path to file>
"""
import os
import sys
import datetime
import enum
import numpy as np
import skimage.draw
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
from math import isnan
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
############################################################
# Configurations
############################################################
#classchange
class ClassName(enum.Enum):
lane = 1
pedestrian = 2
vehicle = 3
sign_board = 4
street_light = 5
class AutoConfig(Config):
"""Configuration for training on the auto dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "auto"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 5 # Background + 5 custom classes
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
global dataset_dir
############################################################
# Dataset
############################################################
class AutoDataset(utils.Dataset):
def load_auto(self, dataset_dir, subset):
"""Load a subset of the Auto dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. #classchange
self.add_class("auto", 1, "lane")
self.add_class("auto", 2, "pedestrian")
self.add_class("auto", 3, "vehicle")
self.add_class("auto", 4, "sign_board")
self.add_class("auto", 5, "street_light")
# Train or validation dataset?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)
images = os.listdir(dataset_dir)
for i in images:
if i == ".directory":
continue
image_path = os.path.join(dataset_dir, i)
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
source='auto',
image_id=i, # use file name as a unique image id
path=image_path,
width=width, height=height,
)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
info = self.image_info[image_id]
mask_dir = os.path.join(dataset_dir, "../masks")
image_path = os.path.join(mask_dir, str(info["id"]))
image = skimage.io.imread(image_path)
#classchange :start:
lane = np.all(image == (0, 255, 0), axis=-1)
pedestrian = np.all(image == (255, 0, 255), axis=-1)
vehicle = np.all(image == (0, 255, 255), axis=-1)
sign_board = np.all(image == (255, 0, 0), axis=-1)
street_light = np.all(image == (255, 255, 0), axis=-1)
mask = np.stack((lane, pedestrian, vehicle, sign_board, street_light), axis=2).astype(np.bool)
class_ids = np.arange(1, 6).astype(np.int32) #classchange (includes background)
#classchange :end:
return mask, class_ids
def train(model):
"""Train the model."""
# Training dataset.
dataset_train = AutoDataset()
dataset_train.load_auto(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = AutoDataset()
dataset_val.load_auto(args.dataset, "val")
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=30,
layers='heads')
def color_overlay(image, mask, class_ids):
"""Apply color overlay.
image: RGB image [height, width, 3]
mask: segmentation mask [height, width, #classes]
Returns result image.
"""
overlayed = image
print("Found classes: ", [ClassName(class_id).name for class_id in class_ids])
if mask.shape[-1] > 0:
for i in range(mask.shape[-1]):
m = mask[:, :, i]
m = np.stack((m, m, m), axis=2)
# classchange
if class_ids[i] == 1:
overlayed = np.where(m, (115, 255, 115), overlayed).astype(np.uint8)
elif class_ids[i] == 2:
overlayed = np.where(m, (255, 115, 255), overlayed).astype(np.uint8)
elif class_ids[i] == 3:
overlayed = np.where(m, (115, 255, 255), overlayed).astype(np.uint8)
elif class_ids[i] == 4:
overlayed = np.where(m, (255, 115, 115), overlayed).astype(np.uint8)
elif class_ids[i] == 5:
overlayed = np.where(m, (255, 255, 115), overlayed).astype(np.uint8)
else:
overlayed = overlayed.astype(np.uint8)
return overlayed
def detect_and_overlay(model, image_path=None, video_path=None):
assert image_path or video_path
# Image or video?
if image_path:
# Run model detection and generate the color overlay
print("Running on {}".format(args.image))
# Read image
image = skimage.io.imread(args.image)
# Convert grayscale images to 3D
if len(image.shape) == 2:
image = np.stack((image, image, image), axis=2)
# Detect objects
r = model.detect([image], verbose=1)[0]
# Color overlay
overlay = color_overlay(image, r['masks'], r['class_ids'])
# Save output
file_name = "overlay_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
skimage.io.imsave(file_name, overlay)
elif video_path:
import cv2
# Video capture
vcapture = cv2.VideoCapture(video_path)
width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = vcapture.get(cv2.CAP_PROP_FPS)
# Define codec and create video writer
file_name = "overlay_{:%Y%m%dT%H%M%S}.avi".format(datetime.datetime.now())
vwriter = cv2.VideoWriter(file_name,
cv2.VideoWriter_fourcc(*'MJPG'),
fps, (width, height))
count = 0
success = True
while success:
print("frame: ", count)
# Read next image
success, image = vcapture.read()
if success:
# OpenCV returns images as BGR, convert to RGB
image = image[..., ::-1]
# Detect objects
r = model.detect([image], verbose=0)[0]
# Color overlay
overlay = color_overlay(image, r['masks'], r['class_ids'])
# RGB -> BGR to save image to video
overlay = overlay[..., ::-1]
# Add image to video writer
vwriter.write(overlay)
count += 1
vwriter.release()
print("Saved to ", file_name)
############################################################
# Evaluate
############################################################
def get_mask(image, mask, class_ids):
"""Apply color overlay.
image: RGB image [height, width, 3]
mask: instance segmentation mask [height, width, instance count]
Returns result image.
"""
overlay = image
pd_mask = np.zeros([overlay.shape[0], overlay.shape[1], 5], dtype=np.uint8) #classchange
if mask.shape[-1] > 0:
for i in range(mask.shape[-1]):
m = mask[:, :, i:i+1]
pd_mask[:,:,class_ids[i]-1:class_ids[i]] = np.where(m, True, pd_mask[:,:,class_ids[i]-1:class_ids[i]]).astype(np.uint8)
############## For visualizing mask ##############
# pd_mask = np.zeros([overlay.shape[0], overlay.shape[1], 3], dtype=np.uint8)
# if mask.shape[-1] > 0:
# for i in range(mask.shape[-1]):
# m = mask[:, :, i]
# m = np.stack((m, m, m), axis=2)
# #classchange
# if class_ids[i] == 1:
# pd_mask = np.where(m, (0, 255, 0), pd_mask).astype(np.uint8)
# elif class_ids[i] == 2:
# pd_mask = np.where(m, (255, 0, 255), pd_mask).astype(np.uint8)
# elif class_ids[i] == 3:
# pd_mask = np.where(m, (0, 255, 255), pd_mask).astype(np.uint8)
# elif class_ids[i] == 4:
# pd_mask = np.where(m, (255, 0, 0), pd_mask).astype(np.uint8)
# elif class_ids[i] == 5:
# pd_mask = np.where(m, (255, 255, 0), pd_mask).astype(np.uint8)
#################################################
return pd_mask
def evaluate(model, dataset, limit=0, image_ids=None):
"""Evaluates a set of data for IOU scores.
dataset: A Dataset object with validation data
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
import time
t_prediction = 0
t_start = time.time()
results = []
total_iou_score = 0
total_class_iou = np.zeros(5) #classchange
for i, image_id in enumerate(image_ids):
# Load image
image = dataset.load_image(image_id)
gt_mask, class_ids = dataset.load_mask(image_id)
# Run detection
t = time.time()
r = model.detect([image], verbose=0)[0]
t_prediction += (time.time() - t)
################ Save predicted images ##############
# Color overlay
overlay = color_overlay(image, r['masks'], r['class_ids'])
# Save output
file_name = "overlay_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
skimage.io.imsave("dataset/images/predicted/" + file_name, overlay)
#####################################################
pd_mask = get_mask(image, r['masks'], r['class_ids'])
intersection = np.logical_and(gt_mask, pd_mask)
union = np.logical_or(gt_mask, pd_mask)
iou_score = np.sum(intersection) / np.sum(union)
total_iou_score += iou_score
class_iou = np.zeros(5) #classchange
for j in range(5): #classchange
inter = np.logical_and(gt_mask[:,:,j], pd_mask[:,:,j])
un = np.logical_or(gt_mask[:,:,j], pd_mask[:,:,j])
class_iou[j] = np.sum(inter) / np.sum(un)
if not isnan(class_iou[j]):
total_class_iou[j] += class_iou[j]
class_names = [ClassName(class_id).name for class_id in class_ids]
print(f"Class IOU scores")
for j in range(5): #classchange
print(class_names[j].ljust(14) + ": " + str(class_iou[j]))
print(f"IOU score for {image_id} = {iou_score}")
print("".ljust(50,'-'))
results.extend((image_id, iou_score))
print("IOUs = ", results)
print()
print("".ljust(50,'-'))
class_names = [ClassName(class_id).name for class_id in class_ids]
print(f"Average Class IOU scores")
for j in range(5): #classchange
print(class_names[j].ljust(14) + ": " + str((total_class_iou[j]/len(image_ids))))
print(f"------ Average IOU score = {total_iou_score/len(image_ids)} ------\n".ljust(50,'-'))
print("Prediction time: {}. \nAverage time: {}/image".format(
t_prediction, t_prediction / len(image_ids)))
print("Total time: ", time.time() - t_start)
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN to detect classes for autonomous driving.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'overlay'")
parser.add_argument('--dataset', required=False,
metavar="/path/to/auto/dataset/",
help='Directory of the required dataset')
parser.add_argument('--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--image', required=False,
metavar="path or URL to image",
help='Image to apply the color overlay on')
parser.add_argument('--video', required=False,
metavar="path or URL to video",
help='Video to apply the color overlay on')
args = parser.parse_args()
# Validate arguments
if args.command == "train":
assert args.dataset, "Argument --dataset is required for training"
elif args.command == "overlay":
assert args.image or args.video, \
"Provide --image or --video to apply color overlay"
print("Weights: ", args.weights)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
dataset_dir = args.dataset
# Configurations
if args.command == "train":
config = AutoConfig()
else:
class InferenceConfig(AutoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()
elif args.weights.lower() == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model)
elif args.command == "overlay":
detect_and_overlay(model, image_path=args.image,
video_path=args.video)
elif args.command == "evaluate":
# Validation dataset
dataset_val = AutoDataset()
dataset_val.load_auto(args.dataset, "val")
dataset_val.prepare()
# print("Running COCO evaluation on {} images.".format(args.limit))
evaluate(model, dataset_val)
else:
print("'{}' is not recognized. "
"Use 'train' or 'overlay'".format(args.command)) | 2.34375 | 2 |
Converter/TensorRT/Source/Build.py | EmilPi/PuzzleLib | 52 | 12763504 | <filename>Converter/TensorRT/Source/Build.py
import sys, os
import pybind11
from PuzzleLib.Compiler.Toolchain import guessToolchain, guessNVCCToolchain
from PuzzleLib.Compiler.BuildSystem import Rule, LinkRule, build
def buildDriver(debugmode=0):
cc, nvcc = prepareCompilers(debugmode=debugmode)
rules, linkrule = createRules(cc, nvcc)
build(rules, linkrule)
cc.clearPath("..")
return linkrule.target
def findLibraryPath():
if sys.platform == "linux":
CUDA_PATH = "/usr/local/cuda"
elif sys.platform == "win32":
CUDA_PATH = os.environ["CUDA_PATH"]
else:
raise NotImplementedError(sys.platform)
TRT_PATH = os.environ.get("TRT_PATH", None)
TRT_PATH = CUDA_PATH if TRT_PATH is None else TRT_PATH
return CUDA_PATH, TRT_PATH
def prepareCompilers(debugmode=0):
level, debuglevel = (0, 3) if debugmode > 0 else (4, 0)
cc = guessToolchain(verbose=2).withOptimizationLevel(level=level, debuglevel=debuglevel).cppMode(True)
nvcc = guessNVCCToolchain(verbose=2).withOptimizationLevel(level=level, debuglevel=debuglevel)
CUDA_PATH, TRT_PATH = findLibraryPath()
if sys.platform == "linux":
cc.includeDirs.extend(
(pybind11.get_include(user=True), "/usr/local/include/python%s.%s" % sys.version_info[:2])
)
cc.addLibrary(
"tensorrt",
[os.path.join(TRT_PATH, "include")],
[os.path.join(TRT_PATH, "lib64")],
["cudart", "nvinfer", "nvcaffe_parser", "nvonnxparser"]
)
cc.addLibrary(
"cuda",
[os.path.join(CUDA_PATH, "include")],
[os.path.join(CUDA_PATH, "lib64")],
["cudnn"]
)
elif sys.platform == "win32":
cc.addLibrary(
"tensorrt",
[os.path.join(TRT_PATH, "include")],
[os.path.join(TRT_PATH, "lib/x64")],
["cudart", "nvinfer", "nvparsers", "nvonnxparser"]
)
cc.addLibrary(
"cuda",
[os.path.join(CUDA_PATH, "include")],
[os.path.join(CUDA_PATH, "lib/x64")],
["cudnn"]
)
else:
raise NotImplementedError(sys.platform)
return cc, nvcc
def createRules(cc, nvcc):
rules = [
Rule(target="InstanceNorm2D" + nvcc.oext, deps=[
"Plugins.h",
"InstanceNorm2D.cpp"
], toolchain=cc),
Rule(target="ReflectPad1D" + nvcc.oext, deps=[
"Plugins.h",
"ReflectPad1D.cu"
], toolchain=nvcc),
Rule(target="Plugins%s" % cc.oext, deps=[
"Plugins.h",
"Plugins.cpp"
], toolchain=cc),
Rule(target="Driver%s" % cc.oext, deps=[
"Plugins.h",
"Driver.cpp"
], toolchain=cc)
]
linkrule = LinkRule(target="../Driver%s" % cc.pydext, deps=rules, toolchain=cc)
return rules, linkrule
def main():
return buildDriver(debugmode=0)
if __name__ == "__main__":
main()
| 1.984375 | 2 |
ext/roles.py | kenunotdeveloper/leaks.ro-discord-bot | 0 | 12763505 | <reponame>kenunotdeveloper/leaks.ro-discord-bot<gh_stars>0
import discord
from discord.ext import commands
import os, datetime
import re
import asyncio
import aiohttp
import json
import feedparser
import time
class Roles:
def __init__(self, bot):
self.bot = bot
self.loop = bot.loop
self._leaks = {}
self._it = {}
self._web = {}
self._samp = {}
self.loop.create_task(self.feedparser())
async def feedparser(self):
await self.bot.wait_until_ready()
channel = self.bot.get_channel("452115433164767233")
server = self.bot.get_server("451066437789024256")
leaks_role = discord.utils.get(server.roles, id="452114719390564362")
it_role = discord.utils.get(server.roles, id="452115033686671360")
web_role = discord.utils.get(server.roles, id="452115127936745472")
samp_role = discord.utils.get(server.roles, id="452409982206345216")
print("\033[1;90m[{time}]\033[1;37m [FEED] fetching information, please wait..".format(time=time.strftime("%H:%M:%S")))
while 1:
if self._samp:
async with aiohttp.ClientSession() as ses:
async with ses.get("http://tm5qwtee.leaks.ro/index.php?/rss/forums/6-samp-rss/") as url:
samp_to_parse = await url.text()
modified_samp = feedparser.parse(samp_to_parse)
if not self._samp['last_modified'] != datetime.datetime.strptime(modified_samp.feed.published, "%a, %d %b %Y %H:%M:%S %z").replace(tzinfo=None):
await asyncio.sleep(1)
else:
await self.bot.edit_role(server, samp_role, mentionable=True)
await self.bot.send_message(channel, "{1.mention} ➜ **{0.title}**\n{0.link}".format(modified_samp.entries[0], samp_role))
await self.bot.edit_role(server, samp_role, mentionable=False)
del self._samp['last_modified']
print("\033[1;90m[{time}]\033[1;37m [SAMP] message sent".format(time=time.strftime("%H:%M:%S")))
else:
async with aiohttp.ClientSession() as ses:
async with ses.get("http://tm5qwtee.leaks.ro/index.php?/rss/forums/6-samp-rss/") as url:
samp_to_parse = await url.text()
feed_samp = feedparser.parse(samp_to_parse)
self._samp['last_modified'] = datetime.datetime.strptime(feed_samp.feed.published, "%a, %d %b %Y %H:%M:%S %z").replace(tzinfo=None)
print("\033[1;90m[{time}]\033[1;37m [SAMP] content parsed".format(time=time.strftime("%H:%M:%S")))
if self._leaks:
async with aiohttp.ClientSession() as ses:
async with ses.get("http://tm5qwtee.leaks.ro/index.php?/rss/forums/3-leaks-rss/") as url:
leaks_to_parse = await url.text()
modified_leaks = feedparser.parse(leaks_to_parse)
if not self._leaks['last_modified'] != datetime.datetime.strptime(modified_leaks.feed.published, "%a, %d %b %Y %H:%M:%S %z").replace(tzinfo=None):
await asyncio.sleep(1)
else:
await self.bot.edit_role(server, leaks_role, mentionable=True)
await self.bot.send_message(channel, "{1.mention} ➜ **{0.title}**\n{0.link}".format(modified_leaks.entries[0], leaks_role))
await self.bot.edit_role(server, leaks_role, mentionable=False)
del self._leaks['last_modified']
print("\033[1;90m[{time}]\033[1;37m [LEAKS] message sent".format(time=time.strftime("%H:%M:%S")))
else:
async with aiohttp.ClientSession() as ses:
async with ses.get("http://tm5qwtee.leaks.ro/index.php?/rss/forums/3-leaks-rss/") as url:
leaks_to_parse = await url.text()
feed_leaks = feedparser.parse(leaks_to_parse)
self._leaks['last_modified'] = datetime.datetime.strptime(feed_leaks.feed.published, "%a, %d %b %Y %H:%M:%S %z").replace(tzinfo=None)
print("\033[1;90m[{time}]\033[1;37m [LEAKS] content parsed".format(time=time.strftime("%H:%M:%S")))
if self._it:
async with aiohttp.ClientSession() as ses:
async with ses.get("http://tm5qwtee.leaks.ro/index.php?/rss/forums/4-it-rss/") as url:
it_to_parse = await url.text()
modified_it = feedparser.parse(it_to_parse)
if not self._it['last_modified'] != datetime.datetime.strptime(modified_it.feed.published, "%a, %d %b %Y %H:%M:%S %z").replace(tzinfo=None):
await asyncio.sleep(1)
else:
await self.bot.edit_role(server, it_role, mentionable=True)
await self.bot.send_message(channel, "{1.mention} ➜ **{0.title}**\n{0.link}".format(modified_it.entries[0], it_role))
await self.bot.edit_role(server, it_role, mentionable=False)
del self._it['last_modified']
print("\033[1;90m[{time}]\033[1;37m [IT] message sent".format(time=time.strftime("%H:%M:%S")))
else:
async with aiohttp.ClientSession() as ses:
async with ses.get("http://tm5qwtee.leaks.ro/index.php?/rss/forums/4-it-rss/") as url:
it_to_parse = await url.text()
feed_it = feedparser.parse(it_to_parse)
self._it['last_modified'] = datetime.datetime.strptime(feed_it.feed.published, "%a, %d %b %Y %H:%M:%S %z").replace(tzinfo=None)
print("\033[1;90m[{time}]\033[1;37m [IT] content parsed".format(time=time.strftime("%H:%M:%S")))
if self._web:
async with aiohttp.ClientSession() as ses:
async with ses.get("http://tm5qwtee.leaks.ro/index.php?/rss/forums/5-web-rss/") as url:
web_to_parse = await url.text()
modified_web = feedparser.parse(web_to_parse)
if not self._web['last_modified'] != datetime.datetime.strptime(modified_web.feed.published, "%a, %d %b %Y %H:%M:%S %z").replace(tzinfo=None):
await asyncio.sleep(1)
else:
await self.bot.edit_role(server, web_role, mentionable=True)
await self.bot.send_message(channel, "{1.mention} ➜ **{0.title}**\n{0.link}".format(modified_web.entries[0], web_role))
await self.bot.edit_role(server, web_role, mentionable=False)
del self._web['last_modified']
print("\033[1;90m[{time}]\033[1;37m [WEB] message sent".format(time=time.strftime("%H:%M:%S")))
else:
async with aiohttp.ClientSession() as ses:
async with ses.get("http://tm5qwtee.leaks.ro/index.php?/rss/forums/5-web-rss/") as url:
web_to_parse = await url.text()
feed_web = feedparser.parse(web_to_parse)
self._web['last_modified'] = datetime.datetime.strptime(feed_web.feed.published, "%a, %d %b %Y %H:%M:%S %z").replace(tzinfo=None)
print("\033[1;90m[{time}]\033[1;37m [WEB] content parsed".format(time=time.strftime("%H:%M:%S")))
#add try catch for errors when requests ain't working (None.feed.published)
print("\033[1;90m[{time}]\033[1;37m [ALL] categories have been refreshed".format(time=time.strftime("%H:%M:%S")))
await asyncio.sleep(120)
@commands.group(pass_context=True)
async def subscribe(self, ctx):
if ctx.invoked_subcommand is None:
return await self.bot.say("Categorii disponibile: `samp`, `leaks`, `it`, `web`.\nPentru a te abona la o categorie, foloseste comanda: `/subscribe <categorie>`.")
@subscribe.command(pass_context=True)
async def samp(self, ctx):
role = discord.utils.get(ctx.message.server.roles, id="452409982206345216")
if not role in ctx.message.author.roles:
await self.bot.add_roles(ctx.message.author, role)
await self.bot.say("Ți-a fost adăugat gradul de `SA:MP`. Acum vei primi notificări atunci când se postează ceva nou.")
else:
await self.bot.remove_roles(ctx.message.author, role)
await self.bot.say("Ți-a fost scos gradul de `SA:MP`.")
@subscribe.command(pass_context=True)
async def leaks(self, ctx):
role = discord.utils.get(ctx.message.server.roles, id="452114719390564362")
if not role in ctx.message.author.roles:
await self.bot.add_roles(ctx.message.author, role)
await self.bot.say("Ți-a fost adăugat gradul de `LEAKS`. Acum vei primi notificări atunci când se postează ceva nou.")
else:
await self.bot.remove_roles(ctx.message.author, role)
await self.bot.say("Ți-a fost scos gradul de `LEAKS`.")
@subscribe.command(pass_context=True)
async def it(self, ctx):
role = discord.utils.get(ctx.message.server.roles, id="452115033686671360")
if not role in ctx.message.author.roles:
await self.bot.add_roles(ctx.message.author, role)
await self.bot.say("Ți-a fost adăugat gradul de `IT`. Acum vei primi notificări atunci când se postează ceva nou.")
else:
await self.bot.remove_roles(ctx.message.author, role)
await self.bot.say("Ți-a fost scos gradul de `IT`.")
@subscribe.command(pass_context=True)
async def web(self, ctx):
role = discord.utils.get(ctx.message.server.roles, id="452115127936745472")
if not role in ctx.message.author.roles:
await self.bot.add_roles(ctx.message.author, role)
await self.bot.say("Ți-a fost adăugat gradul de `WEB`. Acum vei primi notificări atunci când se postează ceva nou.")
else:
await self.bot.remove_roles(ctx.message.author, role)
await self.bot.say("Ți-a fost scos gradul de `WEB`.")
@commands.command(pass_context=True)
async def nsfw(self, ctx):
role = discord.utils.get(ctx.message.server.roles, id="451079198627594240")
if not role in ctx.message.author.roles:
await self.bot.add_roles(ctx.message.author, role)
await self.bot.say("Ți-a fost adăugat gradul de `NSFW`.")
else:
await self.bot.remove_roles(ctx.message.author, role)
await self.bot.say("Ți-a fost scos gradul de `NSFW`.")
def setup(bot):
bot.add_cog(Roles(bot))
| 2.359375 | 2 |
11-dnn-keras/mnist_ann.py | iproduct/coulse-ml | 1 | 12763506 | import datetime
from keras import layers
from keras import models
from keras.datasets import mnist
from keras.utils import to_categorical
import tensorflow as tf
import os
if __name__ == '__main__':
os.environ["XLA_FLAGS"] = '--xla_gpu_cuda_data_dir="D:/Program Files/CUDA/v11.2/development"'
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True) # important!
tf.config.optimizer.set_jit(True)
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape((60000, 28 * 28))
train_images = train_images.astype('float32') / 255
test_images = test_images.reshape((10000, 28 * 28))
test_images = test_images.astype('float32') / 255
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
network = models.Sequential()
network.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,)))
network.add(layers.Dense(10, activation='softmax'))
network.summary()
network.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
logdir = os.path.join("logs", datetime.datetime.now().strftime("!%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
network.fit(train_images, train_labels, epochs=5, batch_size=128, callbacks=[tensorboard_callback])
test_loss, test_acc = network.evaluate(test_images, test_labels)
print(f'Test Accuracy: {test_acc}')
print(f'Test Loss: {test_loss}')
print('Demo finished')
| 2.515625 | 3 |
proj04/proj04.py | NathanaelLeclercq/VSA-literate-waddlee | 0 | 12763507 | # Name: <NAME>
# Date: June 13
"""
proj04
Asks the user for a string and prints out whether or not the string is a palindrome.
"""\
mystring = raw_input("enter the word to see if it is a palindrome or not: ")
wordlist = [mystring]
#print wordlist
stringlist = []
for letter in mystring:
stringlist.append(letter)
#print stringlist
stringlist_reverse = []
lst = stringlist
while lst:
stringlist_reverse.append(lst[-1])
lst = lst[:-1]
print stringlist_reverse
# if stringlist_reverse == stringlist:
# print "This is a Palindrome"
# else:
# print "This is not a Palindrome" | 4.1875 | 4 |
setup.py | MartinBorcin/jsonl-to-conll | 5 | 12763508 | <filename>setup.py
from setuptools import setup, find_packages
from codecs import open
from os import path
import re
here = path.abspath(path.dirname(__file__))
version = re.search(
'^__version__\s*=\s*"(.*)"',
open('jsonl_to_conll/__init__.py').read(),
re.M
).group(1)
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'jsonl_to_conll',
packages = find_packages(), # this must be the same as the name above
version = version,
description = 'A simple tool to convert JSONL files to CONLL',
long_description = long_description,
author = 'joeyism',
author_email = '<EMAIL>',
entry_points = {
"console_scripts": ['jsonl-to-conll = jsonl_to_conll.cli:main']
},
url = 'https://github.com/joeyism/jsonl-to-conll', # use the URL to the github repo
download_url = 'https://github.com/joeyism/jsonl-to-conll/archive/{}.tar.gz'.format(version),
keywords = ['jsonl', 'conll', 'converter', 'convert', 'machine', 'learning', 'training', 'train', 'data', 'ETL'],
install_requires = [package.split("\n")[0] for package in open("requirements.txt", "r").readlines()],
classifiers = [],
)
| 1.78125 | 2 |
scale_down.py | roman-smirnov/mmn-11-digital-image-processing | 0 | 12763509 | import cv2
import numpy as np
from skimage.viewer import ImageViewer
def remove_rows(image, rows, cols):
newrows = int(rows / 2)
newimg = np.zeros((newrows, cols), np.uint8)
for r in range(1, newrows + 1):
newimg[r - 1:r, :] = image[r * 2 - 1:r * 2, :]
return newimg
img = cv2.imread('pirate.jpg', cv2.IMREAD_GRAYSCALE)
print(img.shape)
img = remove_rows(img, img.shape[0], img.shape[1])
viewer = ImageViewer(img)
viewer.show()
| 3.140625 | 3 |
scrapy_bench/middlewares.py | whalebot-helmsman/scrapy-bench | 0 | 12763510 | import os
from scrapy import Request
class RandomPayloadMiddleware:
def __init__(self, size):
"""
Middleware adds a random payload to request's meta. Such behaviour
simulates requests with large size and helps understand performance
impact during usage of external storages (disk, message queues etc.)
for requests.
:param name: size of payload added in bytes
:type name: int
"""
self.size = size
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings.getint('SCRAPY_BENCH_RANDOM_PAYLOAD_SIZE'))
def handle_request(self, smth):
if not isinstance(smth, Request):
return smth
request = smth
if self.size == 0:
return request
request.meta['random_payload'] = os.urandom(self.size)
return request
def process_start_requests(self, start_requests, spider):
for request in start_requests:
yield self.handle_request(request)
def process_spider_output(self, response, result, spider):
for smth in result:
yield self.handle_request(smth)
| 2.875 | 3 |
audream_v0.9/main.py | Lavabar/audream | 0 | 12763511 | from tkinter import Tk, Label
from tkinter.filedialog import askopenfilename
import fileinput
import threading
import pyaudio
import wave
import mywindows as win
root = Tk()
root.withdraw()
win.showReg(root)
root.mainloop()
| 2.375 | 2 |
backend/src/error.py | rainflame/pith-api | 4 | 12763512 | import enum
class Errors(enum.Enum):
SERVER_ERR = -1
"""
Internal server error.
"""
BAD_REQUEST = -2
"""
Malformed JSON request.
"""
DNE_BOARD = -3
"""
Given board ID does not exist.
"""
DNE_DISC = -4
"""
Given discussion ID does not exist.
"""
DNE_UNIT = -5
"""
Given unit ID does not exist.
"""
DNE_LINK = -6
"""
Given link ID does not exist.
"""
DNE_USER = -7
"""
Given user ID does not exist.
"""
EXISTS_NAME = -8
"""
Given nickname for user already is in use in discussion.
"""
NOT_CHAT = -9
"""
Given unit is not a chat unit.
"""
NOT_BOARD = -10
"""
Given unit is not a board unit.
"""
| 2.953125 | 3 |
PokeBot/Cache/Cache.py | danrneal/PokeBot | 0 | 12763513 | import logging
from datetime import datetime
log = logging.getLogger('Cache')
class Cache(object):
def __init__(self):
self._mon_hist = {}
self._egg_hist = {}
self._raid_hist = {}
def monster_expiration(self, mon_id, expiration=None):
if expiration is not None:
self._mon_hist[mon_id] = expiration
return self._mon_hist.get(mon_id)
def egg_expiration(self, egg_id, expiration=None):
if expiration is not None:
self._egg_hist[egg_id] = expiration
return self._egg_hist.get(egg_id)
def raid_expiration(self, raid_id, expiration=None):
if expiration is not None:
self._raid_hist[raid_id] = expiration
return self._raid_hist.get(raid_id)
def clean_and_save(self):
self._clean_hist()
def _clean_hist(self):
for hist in (self._mon_hist, self._egg_hist, self._raid_hist):
old = []
now = datetime.utcnow()
for key, expiration in hist.items():
if expiration < now:
old.append(key)
for key in old:
del hist[key]
| 3.078125 | 3 |
utilities/region.py | OCHA-DAP/hdx-scraper-covid-viz | 6 | 12763514 | <filename>utilities/region.py<gh_stars>1-10
import logging
import sys
from hdx.scraper import add_population
from hdx.scraper.readers import read_hdx
from hdx.utilities.dictandlist import dict_of_lists_add, dict_of_sets_add
from hdx.utilities.text import get_fraction_str, get_numeric_if_possible, number_format
logger = logging.getLogger(__name__)
class Region:
def __init__(self, region_config, today, downloader, gho_countries, hrp_countries):
self.region_config = region_config
_, iterator = read_hdx(downloader, region_config, today=today)
self.iso3_to_region = dict()
self.iso3_to_region_and_hrp = dict()
regions = set()
for row in iterator:
countryiso = row[region_config["iso3"]]
if countryiso and countryiso in gho_countries:
region = row[region_config["region"]]
if region == "NO COVERAGE":
continue
regions.add(region)
dict_of_sets_add(self.iso3_to_region_and_hrp, countryiso, region)
self.iso3_to_region[countryiso] = region
self.regions = sorted(list(regions))
region = "HRPs"
self.regions.insert(0, region)
for countryiso in hrp_countries:
dict_of_sets_add(self.iso3_to_region_and_hrp, countryiso, region)
region = "GHO"
self.regions.insert(0, region)
for countryiso in gho_countries:
dict_of_sets_add(self.iso3_to_region_and_hrp, countryiso, region)
self.hrp_countries = hrp_countries
@staticmethod
def get_float_or_int(valuestr):
if not valuestr or valuestr == "N/A":
return None
if "." in valuestr:
return float(valuestr)
else:
return int(valuestr)
@classmethod
def get_numeric(cls, valuestr):
if isinstance(valuestr, str):
total = 0
hasvalues = False
for value in valuestr.split("|"):
value = cls.get_float_or_int(value)
if value:
hasvalues = True
total += value
if hasvalues is False:
return ""
return total
return valuestr
@staticmethod
def get_headers_and_columns(desired_headers, input_headers, input_columns, message):
headers = [list(), list()]
columns = list()
for header in desired_headers:
try:
index = input_headers[0].index(header)
headers[0].append(header)
headers[1].append(input_headers[1][index])
columns.append(input_columns[index])
except ValueError:
logger.error(message.format(header))
return headers, columns
def should_process(self, process_info, region, countryiso):
subset = process_info.get("subset")
if subset:
# "hrps" is the only subset defined right now
if (
subset == "hrps"
and region != "GHO"
and countryiso not in self.hrp_countries
):
return False
return True
@classmethod
def process(cls, process_info, valdicts, regional_headers, index):
valdict = valdicts[-1]
action = process_info["action"]
if action == "sum" or action == "mean":
for region, valuelist in valdict.items():
total = ""
novals = 0
for valuestr in valuelist:
value = ""
if isinstance(valuestr, int) or isinstance(valuestr, float):
value = valuestr
else:
if valuestr:
value = cls.get_numeric(valuestr)
if value != "":
novals += 1
if total == "":
total = value
else:
total += value
if action == "mean":
if not isinstance(total, str):
total /= novals
if isinstance(total, float):
valdict[region] = number_format(total, trailing_zeros=False)
else:
valdict[region] = total
elif action == "range":
for region, valuelist in valdict.items():
min = sys.maxsize
max = -min
for valuestr in valuelist:
if valuestr:
value = cls.get_numeric(valuestr)
if value > max:
max = value
if value < min:
min = value
if min == sys.maxsize or max == -sys.maxsize:
valdict[region] = ""
else:
if isinstance(max, float):
max = number_format(max, trailing_zeros=False)
if isinstance(min, float):
min = number_format(min, trailing_zeros=False)
valdict[region] = f"{str(min)}-{str(max)}"
elif action == "eval":
formula = process_info["formula"]
for region, valuelist in valdict.items():
toeval = formula
for j in range(index):
value = valdicts[j].get(region, "")
if value == "":
value = None
toeval = toeval.replace(regional_headers[0][j], str(value))
valdict[region] = eval(toeval)
def get_regional(
self,
regionlookup,
national_headers,
national_columns,
population_lookup=None,
*args,
):
if population_lookup is None:
process_cols = self.region_config["process_cols"]
else:
process_cols = {"Population": {"action": "sum"}}
desired_headers = process_cols.keys()
message = "Regional header {} not found in national headers!"
regional_headers, regional_columns = self.get_headers_and_columns(
desired_headers, national_headers, national_columns, message
)
valdicts = list()
for i, header in enumerate(regional_headers[0]):
valdict = dict()
valdicts.append(valdict)
process_info = process_cols[header]
column = regional_columns[i]
for countryiso in column:
for region in regionlookup.iso3_to_region_and_hrp[countryiso]:
if not self.should_process(process_info, region, countryiso):
continue
dict_of_lists_add(valdict, region, column[countryiso])
self.process(process_info, valdicts, regional_headers, i)
if population_lookup is None:
multi_cols = self.region_config.get("multi_cols", list())
for header in multi_cols:
multi_info = multi_cols[header]
input_headers = multi_info["headers"]
ignore = False
for input_header in input_headers:
if input_header not in national_headers[0]:
logger.error(message.format(input_header))
ignore = True
break
if ignore:
continue
regional_headers[0].append(header)
regional_headers[1].append(multi_info["hxltag"])
found_region_countries = set()
valdict = dict()
valdicts.append(valdict)
for i, orig_header in enumerate(input_headers):
index = national_headers[0].index(orig_header)
column = national_columns[index]
for countryiso in column:
for region in regionlookup.iso3_to_region_and_hrp[countryiso]:
if not self.should_process(multi_info, region, countryiso):
continue
key = f"{region}|{countryiso}"
if key in found_region_countries:
continue
value = column[countryiso]
if value:
found_region_countries.add(key)
dict_of_lists_add(valdict, region, value)
self.process(
multi_info, valdicts, regional_headers, len(regional_headers[0]) - 1
)
for arg in args:
gheaders, gvaldicts = arg
if gheaders:
for i, header in enumerate(gheaders[1]):
try:
j = regional_headers[1].index(header)
except ValueError:
continue
valdicts[j].update(gvaldicts[i])
add_population(population_lookup, regional_headers, valdicts)
logger.info("Processed regional")
return regional_headers, valdicts
def get_world(self, regional_headers, regional_columns):
desired_headers = self.region_config["global"]
message = "Regional header {} to be used as global not found!"
world_headers, world_columns = self.get_headers_and_columns(
desired_headers, regional_headers, regional_columns, message
)
global_columns = list()
for column in world_columns:
global_columns.append({"global": column["GHO"]})
return world_headers, global_columns
| 2.375 | 2 |
jolang/interpreter/stdlib/builtin_types/Array.py | jonatan1609/JoLang | 25 | 12763515 | from .object import Object
from .operator import Operator, Attribute
from .String import String
from .Integer import Integer
from .Null import Null
class Array(Object):
def __init__(self, items):
super().__init__()
self._obj = items
@Operator("Index", compatible=["Integer"])
def index(self, start, stop, step):
if start and not (stop or step):
return self._obj[getattr(start, "_obj", None)]
return Array(self._obj[getattr(start, "_obj", None):getattr(stop, "_obj", None):getattr(step, "_obj", None)])
@Attribute("append")
def append(self, item):
if isinstance(item, str):
item = String(item)
elif item is None:
item = Null()
elif isinstance(item, int):
item = Integer(item)
elif isinstance(item, list):
item = Array(item)
self._obj.append(item)
| 2.953125 | 3 |
bread/tests/urls.py | basxsoftwareassociation/bread | 13 | 12763516 | <reponame>basxsoftwareassociation/bread
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path("admin/", admin.site.urls),
path("bread/", include("bread.urls")),
]
| 1.59375 | 2 |
purequant/example/double_moving_average_strategy/double_moving_average_strategy.py | Silverbulelt/PureQuant | 3 | 12763517 | <gh_stars>1-10
from purequant.indicators import Indicators
from purequant.trade import OkexFutures
from purequant.position import Position
from purequant.market import Market
from purequant.utils.push import dingtalk
from purequant.storage import storage
from purequant.utils.time_tools import get_localtime
from purequant.config import config
from purequant.utils.logger import Logger
class Strategy:
def __init__(self, instrument_id, time_frame, fast_length, slow_length, long_stop, short_stop):
"""双均线策略"""
print("{}程序开始运行!".format(get_localtime()))
config.loads('config.json') # 载入配置文件
# 在第一次运行程序时,将总资金数据保存至数据库中
start_asset = 20
storage.save_asset_and_profit('trade', 'asset', get_localtime(), 0, start_asset)
# 读取数据库中保存的总资金数据
self.total_asset = storage.read_mysql_datas(0, 'trade', 'asset', 'asset', '>')[-1][-1]
self.counter = 0 # 计数器
self.long_stop = long_stop # 多单止损幅度
self.short_stop = short_stop # 空单止损幅度
self.access_key = config.access_key # 读取配置文件中保存的api信息
self.secret_key = config.secret_key
self.passphrase = config.passphrase
self.instrument_id = instrument_id # 合约ID
self.time_frame = time_frame # k线周期
self.fast_length = fast_length # 短周期均线长度
self.slow_length = slow_length # 长周期均线长度
self.exchange = OkexFutures(self.access_key, self.secret_key, self.passphrase, self.instrument_id) # 初始化交易所
self.position = Position(self.exchange, self.instrument_id, self.time_frame) # 初始化potion
self.market = Market(self.exchange, self.instrument_id, self.time_frame) # 初始化market
self.logger = Logger("config.json") # 初始化logger
self.indicators = Indicators(self.exchange, self.instrument_id, self.time_frame)
def begin_trade(self):
try:
# 计算策略信号
fast_ma = self.indicators.MA(self.fast_length)
slow_ma = self.indicators.MA(self.slow_length)
cross_over = fast_ma[-2] >= slow_ma[-2] and fast_ma[-3] < slow_ma[-3]
cross_below = slow_ma[-2] >= fast_ma[-2] and slow_ma[-3] < fast_ma[-3]
if self.indicators.BarUpdate():
self.counter = 0
if self.counter < 1:
# 按照策略信号开平仓
if cross_over: # 金叉时
if self.position.amount() == 0:
info = self.exchange.buy(None, round(self.total_asset/self.market.last()/self.market.contract_value()), 4)
dingtalk(info)
if self.position.direction() == 'short':
profit = self.position.short_profit()
self.total_asset += profit
storage.save_asset_and_profit('trade', 'asset', get_localtime(), profit, self.total_asset)
info = self.exchange.BUY(None, self.position.amount(), None, round(self.total_asset/self.market.last()/self.market.contract_value()), 4)
dingtalk(info)
if cross_below: # 死叉时
if self.position.amount() == 0:
info = self.exchange.sellshort(None, round(self.total_asset/self.market.last()/self.market.contract_value()), 4)
dingtalk(info)
if self.position.direction() == 'long':
profit = self.position.long_profit()
self.total_asset += profit
storage.save_asset_and_profit('trade', 'asset', get_localtime(), profit, self.total_asset)
info = self.exchange.SELL(None, self.position.amount(), None, round(self.total_asset/self.market.last()/self.market.contract_value()), 4)
dingtalk(info)
# 止损
if self.position.amount() > 0:
if self.position.direction() == 'long' and self.market.last() <= self.position.price() * self.long_stop:
profit = self.position.long_profit()
self.total_asset += profit
storage.save_asset_and_profit('trade', 'asset', get_localtime(), profit, self.total_asset)
info = self.exchange.sell(None, self.position.amount(), 4)
dingtalk(info)
self.counter += 1
if self.position.direction() == 'short' and self.market.last() >= self.position.price() * self.short_stop:
profit = self.position.short_profit()
self.total_asset += profit
storage.save_asset_and_profit('trade', 'asset', get_localtime(), profit, self.total_asset)
info = self.exchange.buytocover(None, self.position.amount(), 4)
dingtalk(info)
self.counter += 1
except Exception as msg:
self.logger.error(msg)
if __name__ == "__main__":
strategy = Strategy("TRX-USDT-201225", "1m", 10, 20, 0.95, 1.05)
while True:
strategy.begin_trade() | 2.09375 | 2 |
src/main/resources/scripts/plot.classification.py | pbloem/motive | 6 | 12763518 | # -*- coding: utf-8 -*-
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as p
import numpy as n
import pylab
import scipy.stats as stats
import networkx as nwx
import glob
import builtins
from matplotlib.pyplot import margins
import os.path
import json
from sklearn import svm, cross_validation, datasets
def classify(data, cls):
y = data[0, :]
X = data[1:, :]
if(cls == 'svm'):
model = svm.SVC(kernel='linear');
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
# split the data 80/20
for path in sorted(glob.glob('motive.*.csv')):
data = n.genfromtxt(path, delimiter=',')
svmresult = classify(data, 'svm')
for path in sorted(glob.glob('motive.*.csv')):
data = n.genfromtxt(path, delimiter=',')
svmresult = classify(data, 'svm')
ax = p.subplot(111)
ax.plot(data[:, :], alpha=0.5)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(True)
ax.spines["left"].set_visible(True)
ax.get_xaxis().set_tick_params(which='both', top='off')
ax.set_xlabel('iterations')
ax.set_ylabel('perturbation')
p.savefig('am.perturbation.png') | 2.46875 | 2 |
concur/integrations/glfw.py | potocpav/python-concur | 40 | 12763519 | <reponame>potocpav/python-concur
"""Main integration back-end."""
import glfw
import OpenGL.GL as gl
import time
import imgui
from imgui.integrations.glfw import GlfwRenderer
from concur.integrations.opengl import create_offscreen_fb, get_fb_data
__pdoc__ = dict(create_window=False, begin_maximized_window=False, create_window_dock=False)
class PatchedGlfwRenderer(GlfwRenderer):
""" Custom variant of Glfwrenderer in PyImGui:
https://github.com/swistakm/pyimgui/blob/master/imgui/integrations/glfw.py
This works around the issue that GLFW uses EN_US keyboard to specify the key codes
in `keyboard_callback`. This meant that keyboard shortcuts were broken on non-querty
keyboard layouts.
See https://github.com/ocornut/imgui/issues/2959 for details.
# Temporary try except fix until we find a better solution, if we don't apply this,
# the app will crash if certain special keys are pressed.
"""
def keyboard_callback(self, window, key, scancode, action, mods):
try:
_key = key
if _key < 0x100:
# Translate characters to the correct keyboard layout.
key_name = glfw.get_key_name(key, 0)
if key_name is not None:
_key = ord(key_name.upper())
super(PatchedGlfwRenderer, self).keyboard_callback(window, _key, scancode, action, mods)
except:
super(PatchedGlfwRenderer, self).keyboard_callback(window, key, scancode, action, mods)
def create_window(window_name, width, height, visible=True, maximized=False):
""" Create a GLFW window. """
if not glfw.init():
print("Could not initialize OpenGL context")
exit(1)
# OS X supports only forward-compatible core profiles from 3.2
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, gl.GL_TRUE)
if not visible:
glfw.window_hint(glfw.VISIBLE, glfw.FALSE)
if maximized:
glfw.window_hint(glfw.MAXIMIZED, glfw.TRUE)
# Create a windowed mode window and its OpenGL context
window = glfw.create_window(
int(width), int(height), window_name, None, None
)
glfw.make_context_current(window)
if not window:
glfw.terminate()
print("Could not initialize Window")
exit(1)
return window
def begin_maximized_window(name, glfw_window, menu_bar=False):
imgui.set_next_window_position(0, 0)
imgui.set_next_window_size(*glfw.get_window_size(glfw_window))
imgui.push_style_var(imgui.STYLE_WINDOW_ROUNDING, 0)
imgui.push_style_var(imgui.STYLE_WINDOW_BORDERSIZE, 0)
window_flags = imgui.WINDOW_NO_TITLE_BAR | imgui.WINDOW_NO_COLLAPSE | \
imgui.WINDOW_NO_RESIZE | imgui.WINDOW_NO_MOVE | \
imgui.WINDOW_NO_BRING_TO_FRONT_ON_FOCUS | imgui.WINDOW_NO_NAV_FOCUS | \
imgui.WINDOW_NO_DOCKING
if menu_bar:
window_flags |= imgui.WINDOW_MENU_BAR
imgui.begin(name, True, window_flags)
imgui.pop_style_var(2)
def create_window_dock(glfw_window, menu_bar=False):
imgui.set_next_window_bg_alpha(0)
imgui.push_style_var(imgui.STYLE_WINDOW_PADDING, (0, 0))
begin_maximized_window("Background Window", glfw_window, menu_bar)
imgui.pop_style_var(1)
imgui.dock_space("Window Dock Space", 0., 0., 1 << 3)
imgui.end()
def main(
widget, name="Concur", width=640, height=480,
fps=60, save_screencast=None, screencast_fps=60,
menu_bar=False, maximized=False):
""" Create a GLFW window, spin up the main loop, and display a given widget inside.
To create a maximized window, pass width and height larger than the screen.
Args:
widget: The widget to display inside the window. When the widget returns, the application exits.
name: Window name, displayed in the title bar and other OS outputs.
width: Desired window width.
height: Desired window height.
fps: Maximum number of frames per second
save_screencast: Capture and save the UI into a specified video file (experimental). Main window shouldn't
be resized while the application is running when using this option.
screencast_fps: Save the screencast video with a given FPS.
menu_bar: Reserve space for `concur.widgets.main_menu_bar` at the top of the window.
maximized: Create a maximized window.
"""
if imgui.get_current_context() is None:
imgui.create_context()
# Set config flags
imgui.get_io().config_flags |= imgui.CONFIG_DOCKING_ENABLE # | imgui.CONFIG_VIEWPORTS_ENABLE
window = create_window(name, width, height, maximized=maximized)
impl = PatchedGlfwRenderer(window)
win_w, win_h = glfw.get_window_size(window)
fb_w, fb_h = glfw.get_framebuffer_size(window)
font_scaling_factor = max(float(fb_w) / win_w, float(fb_h) / win_h)
imgui.get_io().font_global_scale /= font_scaling_factor
impl.refresh_font_texture() # Refresh the font texture in case user changed it
# Using this feels significantly choppier than sleeping manually. TODO: investigate & fix
# glfw.swap_interval(-1)
if save_screencast:
import imageio
width, height = glfw.get_framebuffer_size(window)
offscreen_fb = create_offscreen_fb(width, height)
writer = imageio.get_writer(save_screencast, mode='I', fps=screencast_fps)
try:
while not glfw.window_should_close(window):
t0 = time.perf_counter()
glfw.poll_events()
impl.process_inputs()
imgui.new_frame()
create_window_dock(window, menu_bar=menu_bar)
begin_maximized_window("Default##Concur", window, menu_bar=menu_bar)
try:
next(widget)
except StopIteration:
break
finally:
imgui.end()
imgui.render()
gl.glClearColor(0.5, 0.5, 0.5, 1)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
if save_screencast:
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, offscreen_fb)
impl.render(imgui.get_draw_data())
image = get_fb_data(offscreen_fb, width, height)
writer.append_data(image)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
impl.render(imgui.get_draw_data())
glfw.swap_buffers(window)
t1 = time.perf_counter()
if t1 - t0 < 1/fps:
time.sleep(1/fps - (t1 - t0))
finally:
impl.shutdown()
imgui.destroy_context(imgui.get_current_context())
glfw.terminate()
if save_screencast:
writer.close()
| 2.171875 | 2 |
observations/r/davis.py | hajime9652/observations | 199 | 12763520 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def davis(path):
"""Self-Reports of Height and Weight
The `Davis` data frame has 200 rows and 5 columns. The subjects were
men and women engaged in regular exercise. There are some missing data.
This data frame contains the following columns:
sex
A factor with levels: `F`, female; `M`, male.
weight
Measured weight in kg.
height
Measured height in cm.
repwt
Reported weight in kg.
repht
Reported height in cm.
Personal communication from <NAME>, Departments of Physical Education
and Psychology, York University.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `davis.csv`.
Returns:
Tuple of np.ndarray `x_train` with 200 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'davis.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/car/Davis.csv'
maybe_download_and_extract(path, url,
save_file_name='davis.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 3.234375 | 3 |
文本摘要/text_rank/data_loader.py | zhangdddong/beautifulNLP | 10 | 12763521 | <filename>文本摘要/text_rank/data_loader.py
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
__author__ = 'zd'
import global_parameters as config
def get_sentences(text):
"""
将文档切割成句子
:param text: 需要断句的文档
:return: list [[index1, sentence1], [index2, sentence2], ..., [-1, sentenceN]]
"""
break_points = config.break_points
# 将其标点换算成统一符号,便于分割
for point in break_points:
text = text.replace(point, '<POINT>')
# 根据<POINT>断句
sen_list = text.split('<POINT>')
# 去掉断句后的空字符
sen_list = [x for x in sen_list if x != '']
res = []
for i, word in enumerate(sen_list):
if i != len(sen_list) - 1:
res.append([i + 1, sen_list[i]])
else:
# 最后一句话 位置是-1
res.append([-1, sen_list[i]])
return res
| 2.921875 | 3 |
deepclaw/utils/ForegroundDetector.py | 1079931505/ME336-Yellow-Team-SUSTech | 2 | 12763522 | # MIT License.
# Copyright (c) 2021 by BioicDL. All rights reserved.
# Created by LiuXb on 2021/1/5
# -*- coding:utf-8 -*-
"""
@Modified:
@Description:
"""
import time
import cv2
import numpy as np
class BackgroundDetector(object):
def __init__(self):
self.fgmask = None
self.fgbg = None
def diffGround(self, groundImg, currrentImg, img_threshold=10, show_mask=False):
""" generate mask from a background image"""
# transfer to gray image
groundImg_gray = cv2.cvtColor(groundImg, cv2.COLOR_BGR2GRAY)
groundBlur = cv2.GaussianBlur(groundImg_gray, (3, 3), 1)
groundBlur.dtype = 'int16'
currrentImg_gray = cv2.cvtColor(currrentImg, cv2.COLOR_BGR2GRAY)
currrentImgBlur = cv2.GaussianBlur(currrentImg_gray, (3, 3), 1)
currrentImgBlur.dtype = 'int16'
# subtraction
dGrayBlur = abs(groundBlur-currrentImgBlur)
dGrayBlur.dtype = 'uint8'
dGrayMidBlur = cv2.medianBlur(dGrayBlur, 5)
ret, thresh = cv2.threshold(dGrayMidBlur, img_threshold, 255, cv2.THRESH_BINARY)
if show_mask:
cv2.imshow('diff img', dGrayMidBlur)
cv2.imshow('binary img from diff', thresh)
cv2.waitKey()
return thresh
def filterColor(self, currrentImg, lower=np.array([10, 20, 0]), upper=np.array([60, 80, 40]), show_result=False):
""" BGR channels"""
mask = cv2.inRange(currrentImg, lower, upper)
mask = cv2.bitwise_not(mask)
if show_result:
cv2.imshow('binary img from color', mask)
cv2.waitKey()
return mask
def multiFrameLoader(self, ref_video='outpy.avi', ref_num=500, mog_threshold=20):
""" load background video"""
cap = cv2.VideoCapture(ref_video)
self.fgbg = cv2.createBackgroundSubtractorMOG2(history=ref_num, detectShadows=True, varThreshold=mog_threshold)
cnt = 0
while (cnt < ref_num):
ret, frame = cap.read()
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self.fgmask = self.fgbg.apply(gray)
# self.fgmask = cv2.medianBlur(self.fgmask, 5)
def multiFrameFilter(self, color_img, show_mask=False):
""" create Gaussian Mixture Model from multi images as background"""
gray = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)
mask = self.fgbg.apply(gray, self.fgmask, 0)
# median filter
mask = cv2.medianBlur(mask, 5)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
mask = cv2.dilate(mask, kernel)
mask = cv2.erode(mask, kernel, iterations=1)
if show_mask:
cv2.imshow('binary img from mog', mask)
cv2.waitKey()
return mask
def grabCut_rect(self, color_img, rect=[200, 0, 900, 720]):
""" rect = [col_min, row_min, col_max, row_max]"""
mask = np.zeros(color_img.shape[:2], np.uint8)
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
# rect = (200, 0, 900, 720)
cv2.grabCut(color_img, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
return mask2
def grabCut_mask(self, color_img, mask):
# wherever it is marked white (sure foreground), change mask=1
# wherever it is marked black (sure background), change mask=0
mask[mask == 0] = 0
mask[mask != 0] = 1
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
mask, bgdModel, fgdModel = cv2.grabCut(color_img, mask, None, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK)
mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
return mask
def getConnectedDomain(self, binary_img, connectivity=4, region_area=1000, show_label=False):
""" obtain connected domain"""
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(binary_img, connectivity, cv2.CV_32S)
# delete small regions
label_index = []
for i in range(num_labels):
if stats[i][4] < region_area:
labels[labels == i] = 0
else:
label_index.append(i)
# Map component labels to hue val, 0-179 is the hue range in OpenCV
if np.max(labels) == 0:
label_hue = np.uint8(labels)
else:
label_hue = np.uint8(179*labels/np.max(labels))
blank_ch = 255*np.ones_like(label_hue)
labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
# Converting cvt to BGR
labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
# set bg label to black
labeled_img[label_hue == 0] = 0
if show_label:
cv2.imshow('color labels', labeled_img)
cv2.waitKey()
return labels, label_index, labeled_img
def getConvexHull(self, img, show_convex=False):
# convex hull
result = cv2.findContours(np.uint8(img), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# contour: if hierarchy[0][i][3] == -1, it means there are contours inside
# len(contours[i] is the num of the contour
contours = []
if len(result) == 2:
contours = result[0]
hierarchy = result[1]
elif len(result) == 3:
contours = result[1]
hierarchy = result[2]
hull = []
for i in range(len(contours)):
# creating convex hull object for each contour
hull.append(cv2.convexHull(contours[i], False))
if show_convex:
# create an empty black image
drawing = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
# draw contours and hull points
for i in range(len(contours)):
color_contours = (0, 255, 0) # green - color for contours
color = (255, 0, 0) # blue - color for convex hull
# draw ith contour
cv2.drawContours(drawing, contours, i, color_contours, 1, 8, hierarchy)
# draw ith convex hull object
cv2.drawContours(drawing, hull, i, color, 1, 8)
cv2.imshow('convex', drawing)
cv2.waitKey()
return hull
if __name__ == '__main__':
from deepclaw.driver.sensors.camera.Realsense_L515 import Realsense
camera = Realsense('./configs/basic_config/camera_rs_d435_c1.yaml')
frame = camera.get_frame()
color = frame.color_image[0]
time.sleep(1)
frame = camera.get_frame()
color = frame.color_image[0]
save_ref_img = False
if save_ref_img:
cv2.imwrite('./projects/ICRA2020/ref_img.jpg', color)
refImg = cv2.imread('./projects/ICRA2020/ref_img.jpg')
cv2.imshow("a", refImg)
cv2.waitKey()
bd_test = BackgroundDetector()
# generate mask
# thresh = bd_test.diffGround(refImg, color, img_threshold=20, show_mask=True)
thresh = bd_test.filterColor(color, show_result=True)
# bd_test.multiFrameLoader(ref_video='d435qi_v40.avi', ref_num=500, mog_threshold=16)
# thresh = bd_test.multiFrameFilter(color, show_mask=True)
labels, labels_index, color_labels = bd_test.getConnectedDomain(thresh, show_label=True, region_area=2000)
hul = bd_test.getConvexHull(labels, show_convex=True)
| 2.8125 | 3 |
projects/research/text-processors/text_finders/for_analyse/def_finder.py | zaqwes8811/smart-vocabulary-cards | 0 | 12763523 | #!/usr/bin/python
#-*- coding: utf-8 -*-
import str_finder as sf
import os
'''
file :
'''
defines = ['_2Ublock',
'_3Ublock',
'_3U_polovina_block',
'_Uniplex', # ;при активировании переключение Дупл/полудупл - внешней перемычкой. При деактивации режим Дупл/полудупл выбирается в _duplex
'_duplex', # ;(при НЕактивной _Uniplex)при активировании контроллер на выв. DUX устан. лог 0 (Дуплекс=const)/при деактивации на DUX лог 1(полудупл). От внешних перемычек не зависит
'bootloader',
'NO_BOARD',
'RawDetData',
'WithDetector',
'otladka',
'fullspeed',
'liqiud600',
'air600',
'air100',
'DVBT2bred_', # ;задержка отпирания атт (около 6,0 с) при появлении Рвх (_V3_BUM)
'manyBUM_OFF_bDE', # ;активировать!!!, для многоблочных ПРД (коммутируем передатчик485 для передатчиков, где >1 БУМа,а для 100вт - MAX1485 вкл на передачу постоянно!
'_Umip42V', #;для перекл порога по МИП на +42(+13%/-15%) V +020312
'_Umip48V', #;для перекл порога по МИП на +48(+13%/-15%) V +020312
'_virtual_UM2'] #; виртуальная плата защит ОУ1, для применения в БУМ_FM 2U.
# Main
for i in defines:
findedList = sf.Main( i )
try:
fwr = open('def_log.log', "a")
try:
fwr.write('\n'+i+':\n') # Write a string to a file
finally:
fwr.close()
# поэлементам списка
for at in findedList:
fwr = open('def_log.log', "a")
# цикл записи
sWr = ' '+at+'\n'
try:
fwr.write(sWr) # Write
# цикл записи
# обязательно будет выполнено
finally:
fwr.close()
except IOError:
print 'IOError'
| 1.960938 | 2 |
tests/test_algorithm.py | GabyRumc/evalutils | 17 | 12763524 | <reponame>GabyRumc/evalutils
import json
import os
import shutil
from pathlib import Path
from typing import Dict
import SimpleITK
import numpy as np
from pandas import DataFrame
from scipy.ndimage import center_of_mass, label
from evalutils import (
ClassificationAlgorithm,
DetectionAlgorithm,
SegmentationAlgorithm,
)
TEMPLATE_TEST_DIR = (
Path(__file__).parent.parent
/ "evalutils"
/ "templates"
/ "algorithm"
/ "{{ cookiecutter.package_name }}"
/ "test"
)
class DetectionAlgorithmTest(DetectionAlgorithm):
def predict(self, *, input_image: SimpleITK.Image) -> DataFrame:
# Extract a numpy array with image data from the SimpleITK Image
image_data = SimpleITK.GetArrayFromImage(input_image)
# Detection: Compute connected components of the maximum values
# in the input image and compute their center of mass
sample_mask = image_data == np.max(image_data)
labels, num_labels = label(sample_mask)
candidates = center_of_mass(
input=sample_mask, labels=labels, index=np.arange(num_labels) + 1
)
# Scoring: Score each candidate cluster with the value at its center
candidate_scores = [
image_data[tuple(coord)]
for coord in np.array(candidates).astype(np.uint16)
]
# Serialize candidates and scores as a list of dictionary entries
data = self._serialize_candidates(
candidates=candidates,
candidate_scores=candidate_scores,
ref_image=input_image,
)
# Convert serialized candidates to a pandas.DataFrame
return DataFrame(data)
class SegmentationAlgorithmTest(SegmentationAlgorithm):
def predict(self, *, input_image: SimpleITK.Image) -> SimpleITK.Image:
# Segment all values greater than 2 in the input image
return SimpleITK.BinaryThreshold(
image1=input_image, lowerThreshold=2, insideValue=1, outsideValue=0
)
class ClassificationAlgorithmTest(ClassificationAlgorithm):
def predict(self, *, input_image: SimpleITK.Image) -> Dict:
# Checks if there are any nodules voxels (> 1) in the input image
return dict(
values_exceeding_one=bool(
np.any(SimpleITK.GetArrayFromImage(input_image) > 1)
)
)
def test_classification_algorithm(tmpdir):
indir = Path(tmpdir / "input")
shutil.copytree(TEMPLATE_TEST_DIR, indir)
validate_algorithm_output(
input_dir=indir,
expected_results_file="results_classification.json",
algorithm_test_class=ClassificationAlgorithmTest,
)
def test_segmentation_algorithm(tmpdir):
indir = Path(tmpdir / "input")
out_file = Path(
tmpdir
/ "output"
/ "images"
/ "1.0.000.000000.0.00.0.0000000000.0000.0000000000.000.mhd"
)
shutil.copytree(TEMPLATE_TEST_DIR, indir)
validate_algorithm_output(
input_dir=indir,
expected_results_file="results_segmentation.json",
algorithm_test_class=SegmentationAlgorithmTest,
)
assert out_file.exists()
out_img = SimpleITK.GetArrayFromImage(SimpleITK.ReadImage(str(out_file)))
in_img = SimpleITK.GetArrayFromImage(
SimpleITK.ReadImage(str(indir / out_file.name))
)
assert np.array_equal((in_img >= 2), (out_img > 0))
def test_detection_algorithm(tmpdir):
indir = tmpdir / "input"
shutil.copytree(TEMPLATE_TEST_DIR, indir)
validate_algorithm_output(
input_dir=indir,
expected_results_file="results_detection.json",
algorithm_test_class=DetectionAlgorithmTest,
)
def test_detection_algorithm_2d_input(tmpdir):
indir = tmpdir / "input"
os.makedirs(indir)
test_image = (
TEMPLATE_TEST_DIR
/ "1.0.000.000000.0.00.0.0000000000.0000.0000000000.000.mhd"
)
image_data = SimpleITK.GetArrayFromImage(
SimpleITK.ReadImage(str(test_image))
)[74, :, :]
SimpleITK.WriteImage(
SimpleITK.GetImageFromArray(image_data),
str(indir / "2dtest.mha"),
True,
)
validate_algorithm_output(
input_dir=indir,
expected_results_file="results_2d.json",
algorithm_test_class=DetectionAlgorithmTest,
)
def test_detection_algorithm_empty_input(tmpdir):
indir = tmpdir / "input"
os.makedirs(indir)
SimpleITK.WriteImage(
SimpleITK.GetImageFromArray(np.zeros((100, 100), dtype=np.uint8)),
str(indir / "emptytest.mha"),
True,
)
validate_algorithm_output(
input_dir=indir,
expected_results_file="results_empty.json",
algorithm_test_class=DetectionAlgorithmTest,
)
def validate_algorithm_output(
input_dir: Path, expected_results_file: str, algorithm_test_class: type
):
output_dir = Path(input_dir).parent / "output"
output_dir.mkdir()
proc = algorithm_test_class()
proc._input_path = Path(input_dir)
proc._output_file = Path(output_dir) / "results.json"
proc._output_path = Path(output_dir) / "images"
proc.process()
results_file = output_dir / "results.json"
assert results_file.exists()
with open(str(results_file)) as f:
results = json.load(f)
expected_path = (
Path(__file__).parent / "resources" / "json" / expected_results_file
)
if not expected_path.exists():
expected_path = TEMPLATE_TEST_DIR / expected_results_file
with open(str(expected_path)) as f:
expected_result = json.load(f)
assert results == expected_result
| 2.875 | 3 |
ding/model/template/tests/test_vac.py | kxzxvbk/DI-engine | 1 | 12763525 | <reponame>kxzxvbk/DI-engine
import pytest
import numpy as np
import torch
from itertools import product
from ding.model import VAC
from ding.torch_utils import is_differentiable
from ding.model import ConvEncoder
B, C, H, W = 4, 3, 128, 128
obs_shape = [4, (8, ), (4, 64, 64)]
act_args = [[6, 'discrete'], [(3, ), 'continuous'], [[2, 3, 6], 'discrete']]
# act_args = [[(3, ), True]]
args = list(product(*[obs_shape, act_args, [False, True]]))
def output_check(model, outputs, action_shape):
if isinstance(action_shape, tuple):
loss = sum([t.sum() for t in outputs])
elif np.isscalar(action_shape):
loss = outputs.sum()
is_differentiable(loss, model)
def model_check(model, inputs):
outputs = model(inputs, mode='compute_actor_critic')
value, logit = outputs['value'], outputs['logit']
if model.action_space == 'continuous':
outputs = value.sum() + logit['mu'].sum() + logit['sigma'].sum()
else:
if model.multi_head:
outputs = value.sum() + sum([t.sum() for t in logit])
else:
outputs = value.sum() + logit.sum()
output_check(model, outputs, 1)
for p in model.parameters():
p.grad = None
logit = model(inputs, mode='compute_actor')['logit']
if model.action_space == 'continuous':
logit = logit['mu'].sum() + logit['sigma'].sum()
output_check(model.actor, logit, model.action_shape)
for p in model.parameters():
p.grad = None
value = model(inputs, mode='compute_critic')['value']
assert value.shape == (B, )
output_check(model.critic, value, 1)
@pytest.mark.unittest
@pytest.mark.parametrize('obs_shape, act_args, share_encoder', args)
class TestVACGeneral:
def test_vac(self, obs_shape, act_args, share_encoder):
if isinstance(obs_shape, int):
inputs = torch.randn(B, obs_shape)
else:
inputs = torch.randn(B, *obs_shape)
model = VAC(obs_shape, action_shape=act_args[0], action_space=act_args[1], share_encoder=share_encoder)
model_check(model, inputs)
@pytest.mark.unittest
@pytest.mark.parametrize('share_encoder', [(False, ), (True, )])
class TestVACEncoder:
def test_vac_with_impala_encoder(self, share_encoder):
inputs = torch.randn(B, 4, 64, 64)
model = VAC(
obs_shape=(4, 64, 64),
action_shape=6,
action_space='discrete',
share_encoder=share_encoder,
impala_cnn_encoder=True
)
model_check(model, inputs)
def test_encoder_assignment(self, share_encoder):
inputs = torch.randn(B, 4, 64, 64)
special_encoder = ConvEncoder(obs_shape=(4, 64, 64), hidden_size_list=[16, 32, 32, 64])
model = VAC(
obs_shape=(4, 64, 64),
action_shape=6,
action_space='discrete',
share_encoder=share_encoder,
actor_head_hidden_size=64,
critic_head_hidden_size=64,
encoder=special_encoder
)
model_check(model, inputs)
| 2.015625 | 2 |
lib/bot.py | Javex/qllbot | 0 | 12763526 | import hashlib
import lib.event
import logging
import queue
import select
import socket
import ssl
import time
class BotError(Exception):
pass
class UnknownCertError(Exception):
"""Raised when a previously unknown certificate is encountered.
Only used with the known_hosts SSL authentication method.
"""
def __init__(self, host, sha512_hash, sha1_hash):
self.host = host
self.sha512_hash = sha512_hash
self.sha1_hash = sha1_hash
class Bot(object):
"""Handles network communication of the bot."""
timeout = 1
def __init__(self, host, port=6667, encoding='utf-8', use_ssl=False,
ca_certs=None, known_hosts=None, max_reconnects=5, db=None):
if use_ssl and ca_certs is None and known_hosts is None:
raise BotError('Expecting either ca_certs or known_hosts to be '
'set when SSL is enabled.')
self.host = host
self.port = port
self.encoding = encoding
self.use_ssl = use_ssl
self.ca_certs = ca_certs
self.known_hosts = known_hosts
self.max_reconnects = max_reconnects
self.db = db
self._buffer = ''
self._socket = None
self._msg_queue = queue.Queue()
self._log = logging.getLogger(__name__)
def _create_socket(self, use_ssl):
"""Create a TCP socket and adds in SSL if requested."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if use_ssl:
cert_reqs = ssl.CERT_REQUIRED if self.ca_certs else ssl.CERT_NONE
s = ssl.wrap_socket(s, cert_reqs=cert_reqs, ca_certs=self.ca_certs)
return s
def _validate_ssl_cert(self):
if self.ca_certs is not None:
cert = self._socket.getpeercert() # get cert as dictionary
ssl.match_hostname(cert, self.host)
else:
cert = self._socket.getpeercert(True) # get binary cert
sha512_hash = hashlib.sha512(cert).hexdigest()
if self.host in self.known_hosts:
hash_ = self.known_hosts[self.host]
if sha512_hash != hash_:
self.disconnect()
e = ('SSL certificate does not match the one from the '
'known_hosts file. Most likely the server has changed'
' its certificate and you have to delete the old line'
' from the known_hosts file. Be careful, this could '
'also mean that you are being attacked!\nOld hash: '
'%s\nNew hash: %s' % (hash_, sha512_hash))
raise ssl.CertificateError(e)
else:
self.disconnect()
raise UnknownCertError(self.host, sha512_hash,
hashlib.sha1(cert).hexdigest())
def connect(self):
"""Connect to a server. Retry if it is not available."""
connected = False
retries = 0
while not connected:
try:
self._socket = self._create_socket(self.use_ssl)
self._socket.connect((self.host, self.port))
except ConnectionRefusedError:
if retries >= self.max_reconnects:
raise
time_sleep = (2 ** retries) * 5
self._log.warning('Connection refused, retrying in %ds.' %
time_sleep)
time.sleep(time_sleep)
retries += 1
else:
connected = True
if self.use_ssl:
self._validate_ssl_cert()
lib.event.call('connected', {'bot': self})
def reconnect(self):
"""Reconnect with the same credentials as before."""
self.disconnect()
self.connect()
def disconnect(self):
"""Disconnect from a server if a connection is open."""
if self._socket is not None:
self._socket.close()
def data_available(self):
"""Check if data is available on the socket with a timeout."""
rlist, _, __ = select.select([self._socket], [], [], self.timeout)
return self._socket in rlist
def _send(self):
"""Consume the internal message queue and send msgs to the server."""
if self._msg_queue.qsize() > 0:
self._socket.sendall(self._msg_queue.get() + b'\r\n')
def send(self, msg):
"""Append a message to an internal messaging queue.
If the message contains multiple commands, it will be throttled.
"""
for line in msg.strip().split('\r\n'):
self._msg_queue.put(line.encode(self.encoding, 'replace'))
def _handle_data(self, data):
"""Buffer, decode and process incoming data."""
self._buffer += data.decode(self.encoding, 'replace')
if '\r\n' in self._buffer:
messages = self._buffer.split('\r\n')
for message in messages[:-1]:
lib.event.call('raw_message', {'bot': self, 'msg': message})
self._buffer = messages[-1].rstrip()
def loop(self):
self.connect()
while True:
if self.data_available():
data = self._socket.recv(4096)
if not data:
self._log.warning('Empty response: Reconnecting the bot.')
self.reconnect()
continue
self._handle_data(data)
self._send()
lib.event.call('watchdog_tick', {'bot': self})
| 2.5625 | 3 |
development/environment.py | BenjaminHamon/DevelopmentToolkit | 1 | 12763527 | import json
import logging
import os
import sys
default_log_format = "[{levelname}][{name}] {message}"
file_log_format = "{asctime} [{levelname}][{name}] {message}"
date_format = "%Y-%m-%dT%H:%M:%S"
def configure_logging(log_level):
logging.root.setLevel(log_level)
logging.addLevelName(logging.DEBUG, "Debug")
logging.addLevelName(logging.INFO, "Info")
logging.addLevelName(logging.WARNING, "Warning")
logging.addLevelName(logging.ERROR, "Error")
logging.addLevelName(logging.CRITICAL, "Critical")
formatter = logging.Formatter(default_log_format, date_format, "{")
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(log_level)
stream_handler.formatter = formatter
logging.root.addHandler(stream_handler)
def configure_log_file(log_level, log_file):
formatter = logging.Formatter(file_log_format, date_format, "{")
file_handler = logging.FileHandler(log_file, mode = "w")
file_handler.setLevel(log_level)
file_handler.formatter = formatter
logging.root.addHandler(file_handler)
def create_default_environment():
return {
"git_executable": "git",
"python3_executable": sys.executable,
"scp_executable": "scp",
"ssh_executable": "ssh",
}
def load_environment():
env = create_default_environment()
env.update(_load_environment_transform(os.path.join(os.path.expanduser("~"), "environment.json")))
env.update(_load_environment_transform("environment.json"))
return env
def _load_environment_transform(transform_file_path):
if not os.path.exists(transform_file_path):
return {}
with open(transform_file_path) as transform_file:
return json.load(transform_file)
| 2.4375 | 2 |
main.py | kamacizy/trading_bot | 0 | 12763528 | import robinhood
import reddit
import twitter
#prompt = input('What you want?')
def MAIN():
#reddit.CROSS_CHECK()
twitter.search()
MAIN() | 1.96875 | 2 |
avoviirstools/dashboard/callbacks/volcview_images.py | tparker-usgs/viirspassplotter | 0 | 12763529 | from dash.dependencies import Input, Output
from .. import dashboard
import pandas as pd
YELLOW_THREASHOLD = 0.9
RED_THREASHOLD = 0.5
@dashboard.app.callback(
Output("volcview-sectors", "figure"), [Input("volcview-sectors-update", "n_clicks")]
)
def gen_volcview_sectors(n_clicks):
pdnow = pd.to_datetime("now")
yesterday = pdnow - pd.Timedelta("1 days")
today_data = dashboard.sector_subscriber.sector_images[yesterday:pdnow]
today_data = today_data.groupby("sector").size()
data = dashboard.sector_subscriber.sector_images
days = data.index.max() - data.index.min()
days = days / pd.Timedelta("1 days")
data = data.groupby("sector").size()
if days > 0:
data = data / days
return {
"data": [
{"x": today_data.index, "y": today_data, "type": "bar", "name": "today"},
{
"x": data.index,
"y": data,
"type": "scatter",
"name": "average",
"mode": "markers",
},
],
"layout": {
"margin": {"l": 30, "b": 80, "t": 50, "r": 5, "pad": 0},
"legend": {"y": 1.3, "orientation": "h"},
"hovermode": "closest",
},
}
@dashboard.app.callback(
Output("volcview-products", "figure"),
[Input("volcview-products-update", "n_clicks")],
)
def gen_volcview_products(n_clicks):
pdnow = pd.to_datetime("now")
yesterday = pdnow - pd.Timedelta("1 days")
today_data = dashboard.sector_subscriber.sector_images[yesterday:pdnow]
today_data = today_data.groupby("band").size()
data = dashboard.sector_subscriber.sector_images
days = data.index.max() - data.index.min()
days = days / pd.Timedelta("1 days")
data = data.groupby("band").size()
if days > 0:
data = data / days
return {
"data": [
{"x": today_data.index, "y": today_data, "type": "bar", "name": "today"},
{
"x": data.index,
"y": data,
"type": "scatter",
"name": "average",
"mode": "markers",
},
],
"layout": {
"margin": {"l": 30, "b": 50, "t": 50, "r": 5, "pad": 0},
"legend": {"y": 1.3, "orientation": "h"},
"hovermode": "closest",
"border": "#dddddd",
},
}
@dashboard.app.callback(
Output("volcview-table", "data"), [Input("volcview-table-update", "n_clicks")]
)
def gen_volcview_table(n_clicks):
data = dashboard.sector_subscriber.sector_images
data = data.sort_index(ascending=False)
data = data.iloc[:50]
data["image time"] = data.index.to_series().dt.strftime("%b %-d %H:%M:%S")
data["data time"] = pd.to_datetime(data["imageUnixtime"], unit="s")
data["data time"] = data["data time"].dt.strftime("%b %-d %H:%M:%S")
return data.to_dict("records")
@dashboard.app.callback(
[
Output("volcview-images-indicator", "className"),
Output("volcview-images-indicator", "title"),
],
[Input("volcview-images-indicator-update", "n_intervals")],
)
def update_volcview_images_indicator(value):
pdnow = pd.to_datetime("now")
yesterday = pdnow - pd.Timedelta("1 days")
today_data = dashboard.sector_subscriber.sector_images[yesterday:pdnow]
today_data = len(today_data)
data = dashboard.sector_subscriber.sector_images
days = data.index.max() - data.index.min()
days = days / pd.Timedelta("1 days")
data = len(data)
if days > 0:
data = int(data / days)
yellow = int(data * 0.9)
red = int(data * 0.5)
if today_data > yellow:
className = "fa fa-star"
tooltip = "{} images today; yellow threashold is {}".format(today_data, yellow)
elif today_data > red:
className = "fa fa-warning"
tooltip = "{} images today; yellow threashold {}, red threshold {}".format(
today_data, yellow, red
)
else:
className = "fa fa-exclamation-circle"
tooltip = "{} images today; red threashold is {}".format(today_data, red)
return className, tooltip
| 2.578125 | 3 |
python_sets/difference/difference.py | antonarnaudov/python-ninjas-2021-01 | 0 | 12763530 | <filename>python_sets/difference/difference.py
def difference(a, b):
c = []
for el in a:
if el not in b:
c.append(el)
return c
ll = [1, 2, 3, 4, 5]
ll2 = [4, 5, 6, 7, 8]
# print(difference(ll, ll2))
# = CTRL + /
set1 = {1, 2, 3, 4, 5}
set2 = {4, 5, 6, 7, 8}
set_difference = set1.difference(set2)
# print(set_difference)
# print(difference(ll, ll2) == list(set_difference))
| 3.734375 | 4 |
skdd/util.py | rlurye/skdd | 0 | 12763531 | import itertools
from collections import Counter
import math
from skdd.config import logger
# generate all rules for number of columns
def combinations(ncols):
l = list(range(0, ncols))
comb_list = []
for dilimeter in range(0, ncols):
sublist = list(l)
sublist.remove(dilimeter)
comb_dilim = list()
for L in range(0, len(sublist) + 1):
for subset in itertools.combinations(sublist, L):
if subset:
comb_dilim.append(subset)
comb_list.append(comb_dilim)
return comb_list
# generate all partiniton of number
def accel_asc(n):
a = [0 for i in range(n + 1)]
k = 1
y = n - 1
while k != 0:
x = a[k - 1] + 1
k -= 1
while 2 * x <= y:
a[k] = x
y -= x
k += 1
l = k + 1
while x <= y:
a[k] = x
a[l] = y
yield a[:k + 2]
x += 1
y -= 1
a[k] = x + y
y = x + y - 1
yield a[:k + 1]
# give extended partitions list of number
def partition(num):
result = []
for el in reversed(list(accel_asc(num))):
result.append(extended_part(el))
return result
def patition_coef(extended_part):
n = len(extended_part)
k = len(Counter(extended_part).keys())
logger.debug(" k:" + str(k))
p = 1
number_of_elements = Counter(extended_part).values()
logger.debug(" NOE:" + str(number_of_elements))
for pc in number_of_elements:
logger.debug(" p:" + str(pc))
p *= math.factorial(pc)
s = 1
number_of_v = Counter(list(Counter(extended_part).values())).values()
for sc in number_of_v:
logger.debug(" s:" + str(sc))
s *= math.factorial(sc)
logger.debug(" n:" + str(n))
result = math.factorial(n) * math.factorial(n) / ((n ** n) * math.factorial(n - k) * p * s)
logger.debug(" " + str(result))
logger.debug(" ----------------------------------")
return result
# extended partition of number
def extended_part(part):
element = []
for key, value in enumerate(reversed(part)):
element.extend(value * [key + 1])
return element
# generate all rules with number of columns for col_ind column
def generate_rules(ncols, col_ind):
logger.debug("Generate rules start")
l = list(range(0, ncols))
for value, key in enumerate(l):
l[key] = str(value) + 'c'
logger.debug("Generate rules, list of columns: " + str(l))
rules = []
sublist = list(l)
sublist.remove(str(col_ind) + 'c')
rules.append([str(col_ind) + 'c'])
for L in range(0, len(sublist) + 1):
for subset in itertools.combinations(sublist, L):
if subset:
logger.debug("Generate rules, next rule: " + str(list(subset)) + " -> " + str(col_ind) + 'c')
rules.append(list(subset))
logger.debug("Generated rules: " + str(rules))
logger.debug("Generate rules end.")
return rules # list of lists
# remove dup from list: (when set() not working)
def dedup(list):
new_k = []
for elem in list:
if elem not in new_k:
new_k.append(elem)
return new_k
| 2.921875 | 3 |
flight/views.py | andela-Taiwo/Flight-Booking-Application | 0 | 12763532 | from django.shortcuts import render
from rest_framework import (
viewsets,
decorators
)
from rest_framework import exceptions
from flight.serializers import (
FlightSerializer,
BookFlightSerializer
)
from user.serializers import UserSerializer
from rest_framework import authentication, permissions
from api.response import FlightBoookingAPIResponse
import flight.services as flight_services
# Create your views here.
class FlightViewSet(viewsets.ViewSet):
def list(self, request):
''' List and flter the flights '''
flights = flight_services.filter_flight(
requestor=request.user,
query_params=request.query_params
)
return FlightBoookingAPIResponse(
FlightSerializer(flights, many=True).data
)
@decorators.action(methods=['get'], detail=False, url_path='users/(?P<type>\d+)/day/(?P<day>[0-9+\-]+)')
def list_users(self, request, *args, **kwargs):
''' View to list users for a particular on a specific day '''
users = flight_services.list_users(
requestor=request.user,
query_params=request.query_params,
day=kwargs.get('day'),
type=kwargs.get('type')
)
return FlightBoookingAPIResponse(
users
)
def update(self, request, *args, **kwargs):
''' Update a single flight '''
try:
flight_id = int(kwargs.get('pk'))
except ValueError as e:
raise exceptions.NotAcceptable(detail='Invalid flight number')
flight = flight_services.update_flight(
requestor=request.user,
data=request.data,
flight_id=kwargs.get('pk')
)
return FlightBoookingAPIResponse(
FlightSerializer(
flight
).data
)
def retrieve(self, request, *args, **kwargs):
''' Retrieve a flight'''
try:
flight_id = int(kwargs.get('pk'))
except ValueError as e:
raise exceptions.NotAcceptable(detail='Invalid flight number')
flight = flight_services.retrieve_flight(
requestor=request.user,
flight_id=kwargs.get('pk')
)
return FlightBoookingAPIResponse(
FlightSerializer(
flight
).data
)
def create(self, request, *args, **kwargs):
''' Create a single flight'''
flight = flight_services.create_flight(
data=request.data,
requestor=request.user
)
return FlightBoookingAPIResponse(
FlightSerializer(flight, many=True).data
)
@decorators.action(methods=['post'], detail=False, url_path='book')
def book_flight(self, request, *args, **kwargs):
booked_flight = flight_services.book_ticket(
requestor=request.user,
data=request.data,
)
return FlightBoookingAPIResponse(
BookFlightSerializer(booked_flight, many=True).data
)
@decorators.action(methods=['put'], detail=False, url_path='(?P<flight_pk>\d+)/confirm')
def confirm_flight_checkin(self, request, *args, **kwargs):
confirmed_flight = flight_services.confirm_checkin(
requestor=request.user,
flight_id=kwargs.get('flight_pk')
)
return FlightBoookingAPIResponse(
BookFlightSerializer(confirmed_flight).data
)
@decorators.action(methods=['post'], detail=False, url_path='payment')
def flight_payment(self, request, *args, **kwargs):
flight_charged = flight_services.ticket_payment(
requestor=request.user,
data=request.data,
)
return FlightBoookingAPIResponse(
FlightSerializer(flight_charged, many=True).data
)
| 2.21875 | 2 |
normalized_roi_length.py | ImageJ-scripts/ij-macros | 0 | 12763533 | <gh_stars>0
#@File[] (label="ROI folder", style="file") roi_files
#@File (label="Results folder", style="directory") csvPath
# Bootstrap to extend modules search path #
from sys import path
import os.path
from java.lang.System import getProperty
jython_scripts = os.path.join(getProperty('user.home'), 'Jython_scripts')
path.append(jython_scripts)
#=========================================#
from ij import (IJ, ImagePlus)
from ij.gui import Roi
from ij.io import Opener
from IBPlib.ij.Utils.Files import buildList
def normalized_roi_length(roi_path, pixel_scaling, normalization_fn=None):
'''
Returns the scaled measure of the input roi performing the normalization
function if provided.
'''
opener = Opener()
roi = opener.openRoi(roi_path)
scaled_measure = roi.getLength()/pixel_scaling
if callable(normalization_fn):
return normalization_fn(scaled_measure)
return measure
if __name__ in ("__builtin__", "__main__"):
'''
Use case where multiple rois are measured, scaled and normalized.
Measurements being saved then to a csv file.
'''
rois = []
for i in roi_files:
if i.isDirectory():
rois.append(buildList(i.getPath(), ".roi"))
elif i.isFile() and i.getName().lower().endswith(".roi"):
rois.append(i.getPath())
scale = 3.1 # pixels/micrometers
cutdistance = 50 # distance from cell body in micrometers
lengths = (normalized_roi_length(roi, scale, lambda x: x-cutdistance) for roi in rois)
resultspath = os.path.join(csvPath.getPath(),"regrowth_1.csv")
with open(resultspath, "w") as csvfile:
for row in lengths:
line = "{0}\n".format(row)
csvfile.write(line)
IJ.log("Results were saved to {0}".format(resultspath)) | 2.375 | 2 |
test/pydata/test_datadict.py | NS2LPS/pyslave | 1 | 12763534 | from pydata import Data, h5todata
import numpy as np
import os
import h5py
def test_Data(tmpdir):
o = Data(x=np.ones(3), y=np.ones(3), a=5, b='hh')
assert o.b=='hh'
assert o['a']==5
o.append(np.ones(5),np.ones(5))
o.save(os.path.join(tmpdir, 'test.txt'))
o.save(os.path.join(tmpdir, 'test.h5'))
def test_h5todata(tmpdir):
o = Data(x=np.ones(3), y=np.ones(3), a=5, b='hh')
o.save(os.path.join(tmpdir, 'test.h5'))
f = h5py.File(os.path.join(tmpdir, 'test.h5'),'r')
d = h5todata(f['data0000'])
assert d.a==5
assert d.x[0]==1
| 2.515625 | 3 |
fh_fablib/__init__.py | nico1000/fh-fablib | 0 | 12763535 | from __future__ import unicode_literals
from functools import wraps
from os import chmod, mkdir, getuid
from os.path import dirname, exists, join
import socket
from subprocess import Popen, PIPE, call
import time
import pwd
from fabric.api import env, cd, run, local as run_local, task
from fabric.colors import cyan, red
from fabric.contrib.console import confirm
from fabric.utils import abort, puts
VERSION = (0, 6, 4)
__version__ = '.'.join(map(str, VERSION))
PRETTIER_OPTIONS = (
'--single-quote --no-bracket-spacing --no-semi --trailing-comma es5'
)
DEFAULTS = {
'box_restart': ['sctl restart %(box_domain)s:*'],
'box_check': [
'PYTHONWARNINGS=ignore venv/bin/flake8 .',
'./node_modules/.bin/eslint *.js %(box_project_name)s/static',
'venv/bin/python manage.py check',
'./node_modules/.bin/prettier --list-different ' + PRETTIER_OPTIONS +
' "%(box_project_name)s/static/**/*.scss"',
],
'box_prettify': [
'./node_modules/.bin/prettier --write ' + PRETTIER_OPTIONS + ' *.js'
' "%(box_project_name)s/static/**/*.js"'
' "%(box_project_name)s/static/**/*.scss"',
],
'box_python': 'python3',
'box_test': [
'venv/bin/python manage.py test',
# './node_modules/.bin/gulp test',
],
'box_enable_process': [
'supervisor-create-conf %(box_domain)s wsgi'
' > supervisor/conf.d/%(box_domain)s.conf',
'sctl reload',
],
'box_disable_process': [
'rm supervisor/conf.d/%(box_domain)s.conf',
'sctl reload',
],
'box_optimize': [
'PATH=node_modules/.bin/:$PATH'
' find %(box_project_name)s/templates/ -name "*.svg"' \
' -type f -exec svgo -i {} --disable=removeViewBox' \
' --enable=removeDimensions \;',
'find %(box_project_name)s/static \( -name "*.jpg" -o -name' \
' "*.jpeg" \) -type f -exec convert {} -verbose' \
' -sampling-factor 4:2:0 -strip' \
' -quality 85 -interlace JPEG -colorspace sRGB {} \;',
'find %(box_project_name)s/static -name "*.png" -type f' \
' -exec convert {} -verbose -strip {} \;',
],
'box_ssl_template': 'feinheit_cache_letsencrypt',
}
DEFAULTS_SYSTEMD = {
'box_unit_template': 'gunicorn',
'box_restart': [
'systemctl --user restart %(box_unit_template)s@%(box_domain)s.service', # noqa
],
'box_enable_process': [
'systemctl --user start %(box_unit_template)s@%(box_domain)s.service',
'systemctl --user enable %(box_unit_template)s@%(box_domain)s.service',
],
'box_disable_process': [
'systemctl --user stop %(box_unit_template)s@%(box_domain)s.service',
'systemctl --user disable %(box_unit_template)s@%(box_domain)s.service', # noqa
],
}
def require_env(fn):
@wraps(fn)
def _dec(*args, **kwargs):
# box_remote is as good as any value being set from the
# environment dictionary
if not env.get('box_remote'):
abort(red(
'Environment (one of %s) missing. "fab <env> <command>"'
% ', '.join(env.box_environments.keys()), bold=True))
return fn(*args, **kwargs)
return _dec
def require_services(fn):
def _service(port, executable, delay):
try:
socket.create_connection(
('localhost', port),
timeout=0.1).close()
except socket.error:
step('Launching %s in the background...' % executable)
call('%(executable)s &> tmp/%(executable)s.log &' % {
'executable': executable,
}, shell=True)
time.sleep(delay)
try:
socket.create_connection(
('localhost', port),
timeout=0.1).close()
except socket.error:
abort(red('Unable to start %s!' % executable, bold=True))
@wraps(fn)
def _dec(*args, **kwargs):
_service(5432, 'postgres', 0.5)
_service(6379, 'redis-server', 0.1)
return fn(*args, **kwargs)
return _dec
# Progress ------------------------------------------------------------------
def step(str):
puts(cyan('\n%s' % str, bold=True))
def init(fabfile, sentinel=None, min_version=None, systemd=None):
if sentinel is not None:
abort(red(
'Pass min_version and systemd as keyword arguments to'
' fh_fablib.init() please'
))
if min_version is not None:
if VERSION < min_version:
abort(red(
'fh-fablib update required. Have: %s. Want: %s.' % (
'.'.join(map(str, VERSION)),
'.'.join(map(str, min_version)),
),
))
if systemd is None:
abort(red(
'fh_fablib.init() requires either systemd=True or systemd=False,'
' depending on whether you want to use systemd for process'
' supervision or not.'
))
fabfile['__all__'] = (
'check',
'deploy',
'dev',
'git',
'local',
'server',
)
if pwd.getpwuid(getuid())[0] == 'www-data':
abort(red('Stop fab-ing on the server.', bold=True))
# Set defaults -----------------------------------------------------------
if systemd:
for key, value in DEFAULTS_SYSTEMD.items():
env.setdefault(key, value)
for key, value in DEFAULTS.items():
env.setdefault(key, value)
# Multi-env support ------------------------------------------------------
def _create_setup_task_for_env(environment):
def _setup():
env['box_environment'] = environment
for key, value in env.box_environments[environment].items():
env['box_%s' % key] = value
env.hosts = env.box_servers
_setup.__name__ = str(environment)
_setup.__doc__ = 'Set environment to %s' % environment
return _setup
if env.get('box_hardwired_environment'):
_create_setup_task_for_env(env.box_hardwired_environment)()
else:
# Create a task per environment
for environment in env.box_environments:
t = _create_setup_task_for_env(environment)
shortcut = env.box_environments[environment].get('shortcut')
aliases = (shortcut,) if shortcut else ()
fabfile[environment] = task(aliases=aliases)(t)
fabfile['__all__'] += (environment,)
# Fabric commands with environment interpolation -------------------------
def interpolate_with_env(fn):
"""Wrapper which extends a few Fabric API commands to fill in values from
Fabric's environment dictionary"""
@wraps(fn)
def _dec(string, *args, **kwargs):
return fn(string % env, *args, **kwargs)
return _dec
g = globals()
g['cd'] = interpolate_with_env(cd)
g['run'] = interpolate_with_env(run)
g['run_local'] = interpolate_with_env(run_local)
g['confirm'] = interpolate_with_env(confirm)
# Git pre-commit hook which always runs "fab check" ----------------------
def ensure_pre_commit_hook_installed():
"""
Ensures that ``git commit`` fails if ``fab check`` returns any errors.
"""
p = Popen('git rev-parse --git-dir'.split(), stdout=PIPE)
git_dir = p.stdout.read().strip()
project_dir = dirname(git_dir)
if not any(exists(join(project_dir, name)) for name in (
'fabfile.py', 'fabfile')):
# Does not look like a Django project.
# Additionally, "fab check" wouldn't work anyway.
return
pre_commit_hook_path = join(git_dir, 'hooks', 'pre-commit')
if not exists(pre_commit_hook_path):
with open(pre_commit_hook_path, 'w') as hook:
hook.write('#!/bin/sh\nfab check\n')
chmod(pre_commit_hook_path, 0o755)
# Run this each time the fabfile is loaded
ensure_pre_commit_hook_installed()
if not exists('tmp'):
mkdir('tmp')
from fh_fablib import check, deploy, dev, git, local, server
fabfile.update({
'check': check,
'deploy': deploy,
'dev': dev,
'git': git,
'local': local,
'server': server,
})
| 1.765625 | 2 |
andes/config/tds.py | lacknc/Andes | 1 | 12763536 | from . import ConfigBase
from ..utils.cached import cached
class Tds(ConfigBase):
def __init__(self, **kwargs):
self.fixt = True
self.tstep = 1 / 30
self.method = 'trapezoidal'
self.method_alt = ['euler', 'trapezoidal', 'fwdeuler']
self.t0 = 0.0
self.tf = 20
self.deltat = 0.01
self.deltatmax = 1
self.deltatmin = 0.0002
self.maxit = 30
self.tol = 1e-4
self.disturbance = False
self.error = 1
self.qrt = False
self.kqrt = 1
self.compute_flows = True
self.max_cache = 0
super(Tds, self).__init__(**kwargs)
@cached
def config_descr(self):
descriptions = {
'fixt':
'use fixed time step size',
'tstep':
'time step size',
'method':
'time domain integration method',
't0':
'starting simulation time',
'tf':
'ending simulation time',
'maxit':
'maximum iteration number for each integration step',
'tol':
'iteration error tolerance',
'qrt':
'quasi-real-time simulation speed',
'kqrt':
'quasi-rt runs at kqrt seconds per simulated second',
'compute_flows':
'post-compute bus injections and line flows at each step',
'max_cache':
'maximum allowed steps in varout memory, save to disk if reached, '
'0 for unlimited memory cache',
}
return descriptions
| 2.359375 | 2 |
tests_functional_small/test_ode_explicit_stepping.py | Pressio/pressio4py | 4 | 12763537 | <gh_stars>1-10
import numpy as np
from pressio4py import ode
class MySys:
def __init__(self):
pass
def createVelocity(self):
print("create")
return np.zeros(5)
def velocity(self, stateIn, time, R):
R[:] = 3.0
class MyDtSetter:
def __init__(self):
pass
def __call__(self, step, time):
print("MyDtSetter")
return 1.2
def test_forward_euler_constructor():
print("\n")
print("\ntest_forward_euler_constructor")
state = np.ones(5)
sys = MySys()
scheme = ode.stepscheme.ForwardEuler
stepper = ode.create_explicit_stepper(scheme, state, sys)
def test_forward_euler_call():
print("\n")
print("test_forward_euler_call")
state = np.ones(5)
sys = MySys()
scheme = ode.stepscheme.ForwardEuler
stepper = ode.create_explicit_stepper(scheme, state, sys)
print("before step: ", state)
assert( np.all(state == 1.) )
stepper(state, 0., 2., 1)
print("after step: ", state)
'''
state = state + dt * f
where f is MySys velocity
so we should have:
'''
assert( np.all(state == 7.) )
def test_forward_euler_advance_n_steps():
print("\n")
print("test_forward_euler_advance_n_steps")
state = np.ones(5)
sys = MySys()
scheme = ode.stepscheme.ForwardEuler
stepper = ode.create_explicit_stepper(scheme, state, sys)
dt = 1.2
dtcb = MyDtSetter()
ode.advance_n_steps(stepper, state, 0., dtcb, 1)
print("after step: ", state)
assert( np.all(state == 4.6) )
def test_forward_euler_advance_n_steps_and_observe():
print("\n")
print("test_forward_euler_advance_n_steps_and_observe")
state = np.ones(5)
sys = MySys()
scheme = ode.stepscheme.ForwardEuler
stepper = ode.create_explicit_stepper(scheme, state, sys)
dt = 1.2
class MyObs:
def __init__(self):
pass
def __call__(self, step, time, state):
print("I got cal")
print(state)
assert(step == 0 or step == 1)
if (step ==0):
assert( np.all(state == 1.) )
if (step ==1):
assert( np.all(state == 4.6) )
ode.advance_n_steps_and_observe(stepper,state,0.,dt,1, MyObs())
print("after obs: ", state)
#if __name__ == '__main__':
#test_forward_euler()
| 2.6875 | 3 |
constants.py | dimitrijoy/fact_check_explorer_api | 0 | 12763538 | CLAIMANT_START = 9
CLAIMANT_END = -1
TIME_LIMIT = 5 | 0.890625 | 1 |
rla1000_consts.py | reversinglabs/reversinglabs-a1000-1 | 0 | 12763539 | # --
# File: a1000_consts.py
#
# Copyright (c) ReversingLabs Inc 2016-2018
#
# This unpublished material is proprietary to ReversingLabs Inc.
# All rights reserved.
# Reproduction or distribution, in whole
# or in part, is forbidden except by express written permission
# of ReversingLabs Inc.
#
# --
A1000_JSON_BASE_URL = "base_url"
A1000_JSON_TASK_ID = "task_id"
A1000_JSON_API_KEY = "api_key"
A1000_JSON_MALWARE = "malware"
A1000_JSON_TASK_ID = "id"
A1000_JSON_VAULT_ID = "vault_id"
A1000_JSON_URL = "url"
A1000_JSON_HASH = "hash"
A1000_JSON_PLATFORM = "platform"
A1000_JSON_POLL_TIMEOUT_MINS = "timeout"
A1000_ERR_UNABLE_TO_PARSE_REPLY = "Unable to parse reply from device"
A1000_ERR_REPLY_FORMAT_KEY_MISSING = "None '{key}' missing in reply from device"
A1000_ERR_REPLY_NOT_SUCCESS = "REST call returned '{status}'"
A1000_SUCC_REST_CALL_SUCCEEDED = "REST Api call succeeded"
A1000_ERR_REST_API = "REST Api Call returned error, status_code: {status_code}, detail: {detail}"
A1000_TEST_PDF_FILE = "a1000_test_connectivity.pdf"
A1000_SLEEP_SECS = 3
A1000_MSG_REPORT_PENDING = "Report Not Found"
A1000_MSG_MAX_POLLS_REACHED = "Reached max polling attempts. Please use the MD5 or Sha256 of the file as a parameter to <b>get report</b> to query the report status."
A1000_PARAM_LIST = {
"fields": [
"file_type",
"file_subtype",
"file_size",
"extracted_file_count",
"local_first_seen",
"local_last_seen",
"classification_origin",
"classification_reason",
"threat_status",
"trust_factor",
"threat_level",
"threat_name",
"summary"]
}
# in minutes
A1000_MAX_TIMEOUT_DEF = 10
| 1.773438 | 2 |
daiquiri/core/management/commands/show_databases.py | agy-why/daiquiri | 14 | 12763540 | <reponame>agy-why/daiquiri<filename>daiquiri/core/management/commands/show_databases.py<gh_stars>10-100
import pprint
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
pprint.pprint(settings.DATABASES, indent=4)
| 1.773438 | 2 |
sudoku.py | the-man-with-no-name/Sudoku | 0 | 12763541 | __author__ = "<NAME>"
__license__ = "MIT"
__email__ = "<EMAIL>"
"""
Todo:
[Organized HIGH to LOW priority...]
Organize in Functions better -
Remove Redundant Code & Optimize -
"""
# Modules required
import pygame
import time
import numpy
from typing import List
# Initialize global variables
pygame.init()
SQ_DIM = 480
top = 48
left = 86
y = 32
screen = pygame.display.set_mode((SQ_DIM,SQ_DIM))
clock = pygame.time.Clock()
COLOR_INACTIVE = pygame.Color(156,156,156)
COLOR_ACTIVE = pygame.Color(255,255,255)
COLOR_TAKEN = pygame.Color(255,89,89)
ALLOWED = ['1','2','3','4','5','6','7','8','9']
FONT = pygame.font.Font("font/Roboto-Medium.ttf", 32)
FONT_SMALL = pygame.font.Font("font/Roboto-Medium.ttf", 16)
# Predefined Sudoku game boars
def initialboard(difficulty: int = 3) -> List:
if difficulty == 1:
easy_1 = [[(0,1),1],[(0,3),9],[(0,4),4],[(0,7),6],
[(1,1),2],[(1,3),7],[(1,5),6],[(1,6),1],[(1,8),3],
[(2,0),6],[(2,2),9],[(2,3),1],[(2,6),7],[(2,8),4],
[(3,2),7],[(3,3),4],
[(4,0),4],[(4,2),3],[(4,6),8],[(4,8),9],
[(5,5),8],[(5,6),4],
[(6,0),9],[(6,2),6],[(6,5),4],[(6,6),2],[(6,8),7],
[(7,0),2],[(7,2),1],[(7,3),6],[(7,5),5],[(7,7),3],
[(8,1),7],[(8,4),2],[(8,5),9],[(8,7),4]]
return isomorphic_board(easy_1)
elif difficulty == 2:
med_1 = [[(0,0),6],[(0,1),4],[(0,3),9],
[(1,5),1],[(1,6),3],
[(2,8),2],
[(3,1),3],[(3,6),9],[(3,8),6],
[(4,0),1],[(4,6),7],[(4,7),5],
[(5,1),2],[(5,4),8],[(5,5),5],[(5,8),1],
[(6,5),8],
[(7,7),9],[(7,8),7],
[(8,0),2],[(8,1),7],[(8,2),9],[(8,7),6],[(8,8),4]]
return isomorphic_board(med_1)
elif difficulty == 3:
hard_1 = [[(0,3),4],
[(1,5),8],[(1,7),9],[(1,8),6],
[(2,4),5],[(2,5),3],[(2,7),8],
[(3,1),4],[(3,2),8],
[(4,0),2],[(4,4),4],[(4,5),9],[(4,8),1],
[(5,0),6],[(5,6),5],[(5,8),9],
[(6,0),4],[(6,3),1],[(6,6),7],
[(7,1),8],[(7,3),9],[(7,6),4],
[(8,1),1],[(8,4),7],[(8,7),2]]
return isomorphic_board(hard_1)
else:
return easy_1
return hard_1
# Creates an isomorphic sudoku board
# Only symbol, row, column permutations implemented
# TODO: Implement block and stack permutations
def isomorphic_board(board: List) -> List:
iso_board = []
permute_symbols = numpy.random.permutation(9)
row_permutations = [numpy.random.permutation(range(3*i,3*(i+1))) for i in range(3)]
col_permutations = [numpy.random.permutation(range(3*i,3*(i+1))) for i in range(3)]
#block_permutation = numpy.random.permutation(range(3))
#stack_permutation = numpy.random.permutation(range(3))
for entry in board:
pos = entry[0]
val = entry[1]
r_perm = row_permutations[pos[0]//3]
c_perm = col_permutations[pos[1]//3]
iso_board.append([(r_perm.item(pos[0]%3),c_perm.item(pos[1]%3)),permute_symbols.item(val-1)+1])
return iso_board
# Create number boxes and user input boxes based on the initial board chosen
def create_board(taken_positions: List, number_boxes: List, input_boxes: List, board: List, difficulty: int) -> None:
init_board = initialboard(difficulty)
for position in init_board:
pos = position[0]
taken_positions.append(pos)
num = position[1]
number_boxes.append(NumBox(left+pos[1]*(y+1),top+pos[0]*(y+1),y,y,board_coordinates=(pos[0],pos[1]),value=num,text=str(num)))
board[pos[0],pos[1]] = num
for i in range(9):
for j in range(9):
if (i,j) not in taken_positions:
input_boxes.append(InputBox(left+j*(y+1),top+i*(y+1),y,y,board_coordinates=(i,j)))
# Make the sudoku board look nice with borders and such
def borders(screen):
shift = 32
total = 296
for i in range(9):
pygame.draw.line(screen, COLOR_INACTIVE, (left,top+i*(shift+1)-1), (left+total,top+i*(shift+1)-1),4)
pygame.draw.line(screen, COLOR_ACTIVE, (left-1,top-1), (left+total+2,top-1),4)
pygame.draw.line(screen, COLOR_ACTIVE, (left,top), (left,top+total+2),4)
pygame.draw.line(screen, COLOR_ACTIVE, (left+total,top), (left+total,top+total+2),4)
pygame.draw.line(screen, COLOR_ACTIVE, (left,top+total), (left+total,top+total),4)
pygame.draw.line(screen, COLOR_ACTIVE, (left,top+3*(shift+1)-1), (left+total,top+3*(shift+1)-1),4)
pygame.draw.line(screen, COLOR_ACTIVE, (left,top+6*(shift+1)-1), (left+total,top+6*(shift+1)-1),4)
pygame.draw.line(screen, COLOR_ACTIVE, (left+3*(shift+1)-1,top), (left+3*(shift+1)-1,top+total),4)
pygame.draw.line(screen, COLOR_ACTIVE, (left+6*(shift+1)-1,top), (left+6*(shift+1)-1,top+total),4)
# Check whether the board has Sudoku Properties
# 1) Whole Board is Latin Square
# 2) Each subsquare has a distinct entries
def win(board: List) -> bool:
comp = numpy.array(list(range(1,10)))
# Latin Square Check
for i in range(9):
if not (numpy.array_equal(numpy.sort(board[i]),comp) and numpy.array_equal(numpy.sort(board[:,i]),comp)):
return False
# Subsquare Checks
for offset_x in range(0,7,3):
for offset_y in range(0,7,3):
box_xy = []
for smallbox_x in range(3):
for smallbox_y in range(3):
box_xy.append(board.item(offset_x+smallbox_x,offset_y+smallbox_y))
if not numpy.array_equal(numpy.sort(numpy.array(box_xy)),comp):
return False
return True
# Is this a valid number placement, i.e., does it maintain the Latin Square
# property and the subsquare property?
def is_taken(coord: tuple, num: int, board) -> bool:
# 0's are default values, do not check them
if num != 0:
# Latin Square rows
for i in range(9):
if board.item(i,coord[1]) == num and coord[0] != i:
return True
# Latin Square columns
for j in range(9):
if board.item(coord[0],j) == num and coord[1] != j:
return True
startx = coord[0]//3
starty = coord[1]//3
# Subsquare property?
for i in range(startx*3,startx*3+3,1):
for j in range(starty*3,starty*3+3,1):
if board.item(i,j) == num and coord[0] != i and coord[1] != j:
return True
return False
def find_first_empty_location(sboard) -> bool:
for r in range(9):
for c in range(9):
if sboard.item(r,c) == 0:
return (r,c)
return (-1,-1)
# Solve Sudoku by backtracking
def sudoku_backtracking_solver(sboard) -> bool:
loc = find_first_empty_location(sboard)
if loc[0] == loc[1] == -1:
return True
(row,col) = loc
for number in range(1,10):
if not is_taken((row,col),number,sboard):
sboard[row,col] = number
if sudoku_backtracking_solver(sboard):
return True
sboard[row,col] = 0
return False
# Class defining user input boxes
class InputBox:
def __init__(self, x, y, w, h, text='', cursor_visible=True, max_string_length=1, board_coordinates=(0,0)):
self.rect = pygame.Rect(x, y, w, h)
self.color = COLOR_INACTIVE
self.text = text
self.txt_surface = FONT.render(text, True, self.color)
self.active = False
self.cursor_color = (0, 0, 1)
self.cursor_visible = cursor_visible
self.max_string_length = max_string_length # set to -1 for no limit
self.board_coordinates = board_coordinates
self.value = 0
def handle_event(self, event):
if event.type == pygame.MOUSEBUTTONDOWN:
if self.rect.collidepoint(event.pos):
self.active = not self.active
else:
self.active = False
self.color = COLOR_ACTIVE if self.active else COLOR_INACTIVE
if event.type == pygame.KEYDOWN:
if self.active:
if event.key == pygame.K_RETURN:
print(self.text)
elif event.key == pygame.K_BACKSPACE:
self.text = self.text[:-1]
elif len(self.text) < self.max_string_length or self.max_string_length == -1:
self.text += event.unicode
if(self.text == ''):
self.value = 0
elif(self.text in ALLOWED):
self.value = int(self.text)
else:
self.text = ''
self.value = 0
self.txt_surface = FONT.render(self.text, True, self.color)
def get_attr(self):
return (self.board_coordinates,self.value)
def draw(self, screen):
screen.blit(self.txt_surface, (self.rect.x+6, self.rect.y-2))
pygame.draw.rect(screen, self.color, self.rect, 2)
# Number boxes from predefined board, not user interactive.
class NumBox:
def __init__(self, x, y, w, h, text='', value=0, board_coordinates=(0,0)):
self.rect = pygame.Rect(x, y, w, h)
self.text = text
self.color = COLOR_INACTIVE
self.hint = 0
self.board_coordinates = board_coordinates
self.value = value
self.txt_surface = FONT.render(text, True, self.color)
def draw(self, screen):
surf = self.txt_surface.get_rect()
surf.center = (self.rect.x+int(self.rect.w/2), (self.rect.y + int(self.rect.h/2)))
screen.blit(self.txt_surface, surf)
pygame.draw.rect(screen, self.color, self.rect, 2)
# Messages to inform player
class MessageBox:
def __init__(self, x, y, w, h, text, font=FONT):
self.rect = pygame.Rect(x, y, w, h)
self.text = text
self.color = COLOR_ACTIVE
self.hint = 0
self.font = font
self.txt_surface = font.render(text, True, self.color)
def __draw__(self, screen):
surf = self.txt_surface.get_rect()
surf.center = (self.rect.x+int(self.rect.w/2), (self.rect.y + int(self.rect.h/2)))
screen.blit(self.txt_surface, surf)
pygame.draw.rect(screen, self.color, self.rect, 2)
# Message to indicate whether the move just made was valid
class TextBox(MessageBox):
def __init__(self, x, y, w, h, text='',font=FONT_SMALL):
super().__init__(x,y,w,h,text,font)
def update(self,hint: bool):
if hint:
self.text="Try again"
else:
self.text="Go!"
self.txt_surface = self.font.render(self.text, True, self.color)
def draw(self, screen):
super().__draw__(screen)
# Message to indicate whether the board is properly completed
class WinBox(MessageBox):
def __init__(self, x, y, w, h, text='',font=FONT_SMALL):
super().__init__(x,y,w,h,text,font)
self.win = False
self.score_changed = False
def update(self,board):
if win(board):
self.text="You Win!"
self.win = True
else:
self.text="Not done"
self.txt_surface = self.font.render(self.text, True, self.color)
def draw(self, screen):
super().__draw__(screen)
class ScoreBox(MessageBox):
def __init__(self, x, y, w, h, text='',font=FONT_SMALL):
super().__init__(x,y,w,h,text,font)
self.value = 0
def update(self,move):
self.value += move
self.text = str(self.value)
self.txt_surface = self.font.render(self.text, True, self.color)
def draw(self, screen):
super().__draw__(screen)
def text_objects(text, font):
textSurface = font.render(text, True, pygame.Color(0,0,0))
return textSurface, textSurface.get_rect()
def button(msg,x,y,w,h,ic,ac,action=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x+w > mouse[0] > x and y+h > mouse[1] > y:
pygame.draw.rect(screen, ac,(x,y,w,h))
if click[0] == 1 and action != None:
difficulty = int(msg)
action(difficulty)
else:
pygame.draw.rect(screen, ic,(x,y,w,h))
textSurf, textRect = text_objects(msg, FONT_SMALL)
textRect.center = ( (x+int(w/2)), (y+int(h/2)) )
screen.blit(textSurf, textRect)
# Get the list of Highscores from the file: highscores.txt
def get_highscores():
scores = []
scoreBoxes = []
with open('data/highscores.txt') as f:
scores = f.readlines()
scores = sorted([int(score.rstrip()) for score in scores],reverse=True)
f.close()
space = 10
height = 20
i = 0
for score in scores:
scoreBoxes.append(MessageBox(200,330+space*i+height*(i-1),80,height,text="{}".format(score),font=FONT_SMALL))
i += 1
return scoreBoxes
# Update the highscores.txt file
def update_leaderboard(new_score: int) -> None:
scores = []
with open('data/highscores.txt') as f:
scores = f.readlines()
scores = sorted([int(score.rstrip()) for score in scores])
f.close()
if len(scores) != 0:
i = 0
if new_score <= scores[i]:
return
while new_score > scores[i] and i < len(scores)-1:
i += 1
if i > 0:
for j in range(i):
scores[j] = scores[j+1]
scores[i-1] = new_score
with open('data/highscores.txt','w') as f:
scores = sorted(scores,reverse=True)
f.seek(0)
f.truncate()
for score in scores:
f.write("{}\n".format(score))
f.close()
# Update user score
def update_score(lastboard,board,Hint,change_to_zero,changed_up_one,scorebox1,screen):
if numpy.array_equal(lastboard,board):
scorebox1.update(0)
elif (not numpy.array_equal(lastboard,board)) and (not change_to_zero):
(r,c) = matrix_not_equal(lastboard,board)
if (not Hint) and (not changed_up_one.item(r,c)):
scorebox1.update(1)
# Only allow a box to increase the score once
changed_up_one[r,c] = True
elif Hint:
scorebox1.update(-1)
else:
scorebox1.update(0)
else:
scorebox1.update(0)
scorebox1.draw(screen)
# Returns lexicographic first place two matrices not equal if the matrices are the same shape
def matrix_not_equal(A,B):
row = -1
col = -1
if A.shape == B.shape:
(nrows,ncols) = A.shape
for i in range(nrows):
if not numpy.array_equal(A[i],B[i]):
row = i
for j in range(ncols):
if not numpy.array_equal(A[:,j],B[:,j]):
col = j
return (row,col)
else:
return (row,col)
def main():
intro = True
while intro:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
else:
screen.fill((0, 0, 0))
sudoku = MessageBox(140,50,200,50,text="SUDOKU!")
sudoku.__draw__(screen)
selectdiff = MessageBox(165,150,150,30,text="Select Difficulty",font=FONT_SMALL)
selectdiff.__draw__(screen)
button("1",140,200,40,50,COLOR_INACTIVE,COLOR_ACTIVE,action=game)
button("2",220,200,40,50,COLOR_INACTIVE,COLOR_ACTIVE,action=game)
button("3",300,200,40,50,COLOR_INACTIVE,COLOR_ACTIVE,action=game)
highscores = MessageBox(165,270,150,30,text="Highscores",font=FONT_SMALL)
scoreBoxes = get_highscores()
for scoreBox in scoreBoxes:
scoreBox.__draw__(screen)
highscores.__draw__(screen)
pygame.display.update()
clock.tick(40)
return
def game(difficulty):
# Initialize board components
board = numpy.zeros((9,9),dtype=int)
Taken = numpy.zeros((9,9),dtype=bool)
# lastlastboard = numpy.zeros((9,9),dtype=int)
lastboard = numpy.zeros((9,9),dtype=int)
number_boxes = []
taken_positions = []
input_boxes = []
changed_up_one = numpy.zeros((9,9),dtype=bool)
create_board(taken_positions,number_boxes,input_boxes,board,difficulty)
sboard = numpy.copy(board)
sudoku_backtracking_solver(sboard)
print(sboard)
# Create Progress Messages
resetbox1 = WinBox(left,top+310,150,40,text='Not done',font=FONT)
hintbox1 = TextBox(left,top+360,150,40,text='GO!',font=FONT)
scorebox1 = ScoreBox(left+170,top+310,100,40,text='0',font=FONT)
# Run until user asks to quit
running = True
while running:
lastboard = numpy.copy(board)
change_to_zero = False
# Did user click window close button?
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
for box in input_boxes:
box.handle_event(event)
# Check whether moves were valid
for box in input_boxes:
(coord,number) = box.get_attr()
board[coord[0],coord[1]] = number
if lastboard[coord[0],coord[1]] != 0 and number == 0:
change_to_zero = True
toggle = is_taken(coord,number,board)
if toggle:
Taken[coord[0],coord[1]] = True
else:
Taken[coord[0],coord[1]] = False
# Draw the number the user inputed
screen.fill((0, 0, 0))
for numbox in number_boxes:
numbox.draw(screen)
for box in input_boxes:
box.draw(screen)
# Are there any invalid moves on the board?
Hint = numpy.any(Taken)
# Update Hint Message
hintbox1.update(Hint)
hintbox1.draw(screen)
# Update user score
update_score(lastboard,board,Hint,change_to_zero,changed_up_one,scorebox1,screen)
# Indicate to user whether game is finished
resetbox1.update(board)
resetbox1.draw(screen)
# Edit highscores if user won and score merits leaderboard
if resetbox1.win and not resetbox1.score_changed:
new_score = int(scorebox1.text)
update_leaderboard(new_score)
resetbox1.score_changed = True
borders(screen)
pygame.display.update()
clock.tick(40)
screen.fill((0, 0, 0))
pygame.display.update() | 2.953125 | 3 |
unsupervised_lensing/models/VAE_Nets.py | DeepLense-Unsupervised/unsupervised-lensing | 8 | 12763542 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Encoder(nn.Module):
def __init__(self,no_channels=1):
super().__init__()
self.conv1 = nn.Conv2d(no_channels, 16, 7, stride=3, padding=1)
self.conv2 = nn.Conv2d(16, 32, 7, stride=3, padding=1)
self.conv3 = nn.Conv2d(32, 64, 7)
self.flat = nn.Flatten()
self.mu = nn.Linear(5184, 1000)
self.var = nn.Linear(5184, 1000)
def forward(self, x):
convolution1 = F.relu(self.conv1(x))
convolution2 = F.relu(self.conv2(convolution1))
convolution3 = F.relu(self.conv3(convolution2))
Flattened = self.flat(convolution3)
z_mu = self.mu(Flattened)
z_var = self.var(Flattened)
return z_mu, z_var
class Decoder(nn.Module):
def __init__(self,no_channels=1):
super().__init__()
self.linear = nn.Linear(1000, 5184)
self.conv4 = nn.ConvTranspose2d(64, 32, 7)
self.conv5 = nn.ConvTranspose2d(32, 16, 7, stride=3, padding=1, output_padding=2)
self.conv6 = nn.ConvTranspose2d(16, no_channels, 6, stride=3, padding=1, output_padding=2)
def forward(self, x):
hidden = self.linear(x)
Reshaped = hidden.reshape(-1,64,9,9)
convolution4 = F.relu(self.conv4(Reshaped))
convolution5 = F.relu(self.conv5(convolution4))
predicted = torch.tanh(self.conv6(convolution5))
return predicted
class VAE(nn.Module):
def __init__(self, enc, dec):
super().__init__()
self.enc = enc
self.dec = dec
def forward(self, x):
z_mu, z_var = self.enc(x)
std = torch.exp(z_var / 2)
eps = torch.randn_like(std)
x_sample = eps.mul(std).add_(z_mu)
predicted = self.dec(x_sample)
return predicted, z_mu, z_var
| 2.828125 | 3 |
test/test_fundmental.py | KunxiongWang/simonsc | 2 | 12763543 | <filename>test/test_fundmental.py
from simonsc.object.csmar_table import csmar_tables, csmar_class_dict
from simonsc import auth
from simonsc.utils import query
from simonsc.api import get_table_data
if __name__ == '__main__':
auth("quantresearch", "quantresearch")
for table in csmar_class_dict:
data_query = query(csmar_class_dict[table]).limit(20)
data = get_table_data(data_query)
print(get_table_data(data_query))
| 2.125 | 2 |
arduino_iot_rest/models/arduino_devicev2_cert.py | akash73/iot-client-py | 13 | 12763544 | # coding: utf-8
"""
Arduino IoT Cloud API
Provides a set of endpoints to manage Arduino IoT Cloud **Devices**, **Things**, **Properties** and **Timeseries**. This API can be called just with any HTTP Client, or using one of these clients: * [Javascript NPM package](https://www.npmjs.com/package/@arduino/arduino-iot-client) * [Python PYPI Package](https://pypi.org/project/arduino-iot-client/) * [Golang Module](https://github.com/arduino/iot-client-go) # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from arduino_iot_rest.configuration import Configuration
class ArduinoDevicev2Cert(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'ca': 'str',
'compressed': 'ArduinoCompressedv2',
'der': 'str',
'device_id': 'str',
'enabled': 'bool',
'href': 'str',
'id': 'str',
'pem': 'str'
}
attribute_map = {
'ca': 'ca',
'compressed': 'compressed',
'der': 'der',
'device_id': 'device_id',
'enabled': 'enabled',
'href': 'href',
'id': 'id',
'pem': 'pem'
}
def __init__(self, ca=None, compressed=None, der=None, device_id=None, enabled=True, href=None, id=None, pem=None, local_vars_configuration=None): # noqa: E501
"""ArduinoDevicev2Cert - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._ca = None
self._compressed = None
self._der = None
self._device_id = None
self._enabled = None
self._href = None
self._id = None
self._pem = None
self.discriminator = None
if ca is not None:
self.ca = ca
self.compressed = compressed
self.der = der
self.device_id = device_id
self.enabled = enabled
self.href = href
self.id = id
self.pem = pem
@property
def ca(self):
"""Gets the ca of this ArduinoDevicev2Cert. # noqa: E501
The Certification Authority used to sign the certificate # noqa: E501
:return: The ca of this ArduinoDevicev2Cert. # noqa: E501
:rtype: str
"""
return self._ca
@ca.setter
def ca(self, ca):
"""Sets the ca of this ArduinoDevicev2Cert.
The Certification Authority used to sign the certificate # noqa: E501
:param ca: The ca of this ArduinoDevicev2Cert. # noqa: E501
:type: str
"""
self._ca = ca
@property
def compressed(self):
"""Gets the compressed of this ArduinoDevicev2Cert. # noqa: E501
:return: The compressed of this ArduinoDevicev2Cert. # noqa: E501
:rtype: ArduinoCompressedv2
"""
return self._compressed
@compressed.setter
def compressed(self, compressed):
"""Sets the compressed of this ArduinoDevicev2Cert.
:param compressed: The compressed of this ArduinoDevicev2Cert. # noqa: E501
:type: ArduinoCompressedv2
"""
if self.local_vars_configuration.client_side_validation and compressed is None: # noqa: E501
raise ValueError("Invalid value for `compressed`, must not be `None`") # noqa: E501
self._compressed = compressed
@property
def der(self):
"""Gets the der of this ArduinoDevicev2Cert. # noqa: E501
The certificate in DER format # noqa: E501
:return: The der of this ArduinoDevicev2Cert. # noqa: E501
:rtype: str
"""
return self._der
@der.setter
def der(self, der):
"""Sets the der of this ArduinoDevicev2Cert.
The certificate in DER format # noqa: E501
:param der: The der of this ArduinoDevicev2Cert. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and der is None: # noqa: E501
raise ValueError("Invalid value for `der`, must not be `None`") # noqa: E501
self._der = der
@property
def device_id(self):
"""Gets the device_id of this ArduinoDevicev2Cert. # noqa: E501
The unique identifier of the device # noqa: E501
:return: The device_id of this ArduinoDevicev2Cert. # noqa: E501
:rtype: str
"""
return self._device_id
@device_id.setter
def device_id(self, device_id):
"""Sets the device_id of this ArduinoDevicev2Cert.
The unique identifier of the device # noqa: E501
:param device_id: The device_id of this ArduinoDevicev2Cert. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and device_id is None: # noqa: E501
raise ValueError("Invalid value for `device_id`, must not be `None`") # noqa: E501
self._device_id = device_id
@property
def enabled(self):
"""Gets the enabled of this ArduinoDevicev2Cert. # noqa: E501
Whether the certificate is enabled # noqa: E501
:return: The enabled of this ArduinoDevicev2Cert. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this ArduinoDevicev2Cert.
Whether the certificate is enabled # noqa: E501
:param enabled: The enabled of this ArduinoDevicev2Cert. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and enabled is None: # noqa: E501
raise ValueError("Invalid value for `enabled`, must not be `None`") # noqa: E501
self._enabled = enabled
@property
def href(self):
"""Gets the href of this ArduinoDevicev2Cert. # noqa: E501
The api reference of this cert # noqa: E501
:return: The href of this ArduinoDevicev2Cert. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this ArduinoDevicev2Cert.
The api reference of this cert # noqa: E501
:param href: The href of this ArduinoDevicev2Cert. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and href is None: # noqa: E501
raise ValueError("Invalid value for `href`, must not be `None`") # noqa: E501
self._href = href
@property
def id(self):
"""Gets the id of this ArduinoDevicev2Cert. # noqa: E501
The unique identifier of the key # noqa: E501
:return: The id of this ArduinoDevicev2Cert. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ArduinoDevicev2Cert.
The unique identifier of the key # noqa: E501
:param id: The id of this ArduinoDevicev2Cert. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def pem(self):
"""Gets the pem of this ArduinoDevicev2Cert. # noqa: E501
The certificate in pem format # noqa: E501
:return: The pem of this ArduinoDevicev2Cert. # noqa: E501
:rtype: str
"""
return self._pem
@pem.setter
def pem(self, pem):
"""Sets the pem of this ArduinoDevicev2Cert.
The certificate in pem format # noqa: E501
:param pem: The pem of this ArduinoDevicev2Cert. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and pem is None: # noqa: E501
raise ValueError("Invalid value for `pem`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
pem is not None and len(pem) > 512):
raise ValueError("Invalid value for `pem`, length must be less than or equal to `512`") # noqa: E501
self._pem = pem
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ArduinoDevicev2Cert):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ArduinoDevicev2Cert):
return True
return self.to_dict() != other.to_dict()
| 2.234375 | 2 |
mokapay/config/desktop.py | harpiya/mokapay | 0 | 12763545 | <reponame>harpiya/mokapay
# @Author: <NAME> <developer>
# @Date: 2019-01-21T13:42:26+03:00
# @Email: <EMAIL>
# @Project: Harpiya Kurumsal Yönetim Sistemi
# @Filename: desktop.py
# @Last modified by: developer
# @Last modified time: 2019-01-21T13:54:28+03:00
# @License: MIT License. See license.txt
# @Copyright: Harpiya Yazılım Teknolojileri
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "MokaPay",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Moka Ödeme Sistemi")
}
]
| 1.171875 | 1 |
tutorials/t4/DLL.py | YufeiCui/CSCA48 | 1 | 12763546 | # Provided by Dr. <NAME> & <NAME>. Edited by <NAME>
class DNode(object):
'''represents a node as a building block of a double linked list'''
def __init__(self, element, prev_node=None, next_node=None):
'''(Node, obj, Node, Node) -> NoneType
construct a Dnode as building block of a double linked list'''
# Representation invariant:
# element is an object, that is hold this node
# _prev is a DNode
# _next is a DNode
# _prev is the node immediately before this node (i.e. self)
# _next is the node immediately after this node (i.e. self)
self._element = element
self._next = next_node
self._prev = prev_node
def set_next(self, next_node):
'''(Node, Node) -> NoneType
set node to point to next_node'''
self._next = next_node
def set_prev(self, prev_node):
'''(Node, Node) -> NoneType
set node to point to prev_node'''
self._prev = prev_node
def set_element(self, element):
'''(Node, obj) ->NoneType
set the _element to a new value'''
self._element = element
def get_next(self):
'''(Node) -> Node
returns the reference to next node'''
return self._next
def get_prev(self):
'''(Node) -> Node
returns the reference to previous node'''
return self._prev
def get_element(self):
'''(Node) -> obj
returns the element of this node'''
return self._element
def __str__(self):
'''(Node) -> str
returns the element of this node and the reference to next node'''
return "(" + str(hex(id(self._prev))) + ", " + str(self._element) + ", " + str(hex(id(self._next))) + ")"
class DoubleLinkedList(object):
''' represents a double linked list'''
def __init__(self):
'''(DoubleLinkedList) ->NoneType
initializes the references of an empty DLL'''
# set the size
self._size = 0
# head and tails are a dummy node
self._head = DNode(None, None, None)
self._tail = DNode(None, None, None)
# head points to tail and tail to head
self._head.set_next(self._tail)
self._tail.set_prev(self._head)
def is_empty(self):
'''(DoubleLinkedList) -> bool
returns true if no item is in this DLL'''
return self._size == 0
def size(self):
'''(DoubleLinkedList) -> int
returns the number of items in this DLL'''
return self._size
def add_first(self, element):
'''(DoubleLinkedList, obj) -> NoneType
adds a node to the front of the DLL, after the head'''
# create a node that head points to. Also the node points to the node after the head
node = DNode(element, self._head, self._head.get_next())
# have the node after head to point to this node (_prev)
self._head.get_next().set_prev(node)
# have the head to point to this new node
self._head.set_next(node)
# increment the size
self._size += 1
def add_last(self, element):
'''(DoubleLinkedList, obj) -> NoneType
adds a node to the end of this DLL'''
# create a DNode with the given element that points to tail from right and to the node before the tail from left
node = DNode(element, self._tail.get_prev(), self._tail)
# let the node to the left of the tail to point to this new node
self._tail.get_prev().set_next(node)
# let the _prev part of the tail to point to newly created node
self._tail.set_prev(node)
# increment the size
self._size += 1
def remove_first(self):
'''(DoubleLinkedList, obj) -> obj
remove the node from the head of this DLL and returns the element stored in this node'''
# set element to None in case DLL was empty
element = None
# if DLL is not empty
if not self.is_empty():
# get the first node to the right of the head
node = self._head.get_next()
# have head point to the second node after the head
self._head.set_next(node.get_next())
# have the second node after the head to point to head from left
node.get_next().set_prev(self._head)
# decrement the size
self._size -= 1
# set the _next & _prev of the removed node to point to None (for garbage collection purpose)
node.set_next(None)
node.set_prev(None)
# get the element stored in the node
element = node.get_element()
# return the element of the removed node
return element
def remove_last(self):
'''(DoubleLinkedList, obj) -> obj
remove the node from the tail of this DLL and returns the element stored in this node'''
# set element to None in case DLL was empty
element = None
# if DLL is not empty
if not self.is_empty():
# get the first node to the left of the tail
node = self._tail.get_prev()
# have tail point to the second node before the tail
self._tail.set_prev(node.get_prev())
# have the second node before the tail to point to tail from right
node.get_prev().set_next(self._tail)
# decrement the size
self._size -= 1
# set the _next, _prev of removed node to point to None (for garbage collection purpose)
node.set_next(None)
node.set_prev(None)
# get the element stored in the node
element = node.get_element()
# return the element of the removed node
return element
def __str__(self):
'''(DoubleLinkedList) -> str
returns the items in the DLL in a string form
'''
# define a node, which points to the first node after the head
cur = self._head.get_next()
# define an empty string to be used as a container for the items in the SLL
result = ""
# loop over the DLL until you get to the end of the DLL
while cur is not self._tail:
# get the element of the current node and attach it to the final result
result = result + str(cur.get_element()) + ", "
# proceed to next node
cur = cur.get_next()
# enclose the result in a parentheses
result = "(" + result[:-2] + ")"
# return the result
return result
if __name__ == "__main__":
node_1 = DNode("A")
node_2 = DNode("B", node_1)
node_3 = DNode("C", node_1, node_2)
print(node_1)
print(node_2)
print(node_3)
print(str(hex(id(node_1))))
print(str(hex(id(node_2))))
dll = DoubleLinkedList()
print(dll)
dll.add_first("A")
dll.add_first("B")
dll.add_last("C")
dll.add_last("D")
print(dll)
print(dll.remove_last())
print(dll.remove_last())
print(dll.remove_last())
print(dll.remove_last())
print(dll)
| 4.21875 | 4 |
wthpython/spacial_nums/spacial_num.py | Nobodylesszb/python_study | 0 | 12763547 | <reponame>Nobodylesszb/python_study
a = 256
b = 256
print(a is b)
"""
output:True
"""
c = 257
d = 257
print(id(c),id(d)) | 2.859375 | 3 |
code/18SrRNA/pipeline_mapping/parse_seqs.py | DraceniY/Chapevo | 0 | 12763548 | import os , sys , glob, argparse , itertools
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import pandas as pd
import numpy as np
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
import subprocess
accession_table = pd.read_csv("../refseq_download.txt", sep='\t', header=0)
for i in range(len(accession_table)):
accession_table.loc[i,'refseq'] = accession_table.loc[i,'refseq'].split('.')[0]
taxid_dict = {}
with open("../../../data/taxonomy/taxid.txt", 'r') as file:
for line in file:
current_line = line.split()
current_id = int(current_line[0])
current_name = current_line[1]
taxid_dict[current_id] = current_name
def load_sequences(seqdir):
"""
load all 18S sequences
change name
parse to one fasta file
"""
list_seqs = glob.glob(seqdir+'/*_18S_output.fasta')
records = []
list_added = []
for i in list_seqs:
prefix = i.split('/')[-1].split('_')[0:2]
prefix2 = "_".join(prefix)
taxid = accession_table[accession_table['refseq']==prefix2][['taxid', 'taxid2', 'species_taxid']].values
taxid2 = list(set(list(taxid[0])) )
good_taxid = []
names = []
for j in taxid2:
if j in taxid_dict.keys():
current_name = taxid_dict.get(j)
names.append(current_name)
good_taxid.append(j)
names = list(set(names))
current_name = names[0]
current_short = "TAX" + str(good_taxid[0]) + "xxxxxxxxxx"
current_short = current_short[0:10]
# current_short = str(current_name.split('_')[0][0]) + str(current_name.split('_')[1][0:3]) + str(good_taxid[0])
current_fname = 'result/' + prefix2 + '_18S_output.fasta'
if os.path.exists(current_fname) and os.path.getsize(current_fname) > 0:
current_fasta = SeqIO.parse(open(current_fname, mode='r'), 'fasta')
for current_seq in current_fasta:
current_seq.id = current_short
current_seq.description = "Taxid:"+str(good_taxid[0]) + " " + "Organism:"+current_name + " " + "Type:18S_rRNA"
if current_name not in list_added:
records.append(current_seq)
list_added.append(current_name)
SeqIO.write(records , 'refseq_18S.fasta' , "fasta")
if __name__ == '__main__':
load_sequences('result_new')
| 2.390625 | 2 |
turtlebot3_visual_servoing/src/navigateObstacle.py | shriarul5273/MSCV-Multi-Sensor-Fusion-and-Tracking | 0 | 12763549 | <reponame>shriarul5273/MSCV-Multi-Sensor-Fusion-and-Tracking
#!/usr/bin/env python
#import
import rospy
import actionlib
from move_base_msgs.msg import MoveBaseAction,MoveBaseGoal
from geometry_msgs.msg import Pose,Quaternion
# initlize the global variables
global GoalPose,goal
GoalPose = Pose()
# intlize the move base goal
goal = MoveBaseGoal()
# action call to the move_base clinet to make the robot move to the goal.
client = actionlib.SimpleActionClient('move_base', MoveBaseAction)
# subscriber callback to initlize the goal pose to the call the move base
def callback(msg):
GoalPose.orientation.x = msg.orientation.x
GoalPose.orientation.y = msg.orientation.y
GoalPose.orientation.z = msg.orientation.z
GoalPose.orientation.w = msg.orientation.w
GoalPose.position.x = msg.position.x
GoalPose.position.y = msg.position.y
GoalPose.position.z = msg.position.z
try :
goal.target_pose.pose.position.x = GoalPose.position.x
goal.target_pose.pose.position.y = GoalPose.position.y
goal.target_pose.pose.position.z = GoalPose.position.z
goal.target_pose.pose.orientation.x = GoalPose.orientation.x
goal.target_pose.pose.orientation.y = GoalPose.orientation.y
goal.target_pose.pose.orientation.z = GoalPose.orientation.z
goal.target_pose.pose.orientation.w = GoalPose.orientation.w
client.send_goal(goal)
except rospy.ROSException as e:
print(e)
def main():
client.wait_for_server()
#initlize the ros node
if __name__ == '__main__':
rospy.init_node('navigateObstacle')
sub = rospy.Subscriber('/GoalPose',Pose,callback)
try:
main()
rospy.spin()
except rospy.ROSException as e:
print(e) | 2.1875 | 2 |
copycat/replacement.py | jalanb/co.py.cat | 27 | 12763550 | from .workspace_structure import WorkspaceStructure
class Replacement(WorkspaceStructure):
def __init__(self, object_from_initial, object_from_modified, relation):
WorkspaceStructure.__init__(self)
self.object_from_initial = object_from_initial
self.object_from_modified = object_from_modified
self.relation = relation
| 2.59375 | 3 |