max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
app/module_user/view.py | B02902008/TaipeiWater | 0 | 12761351 | <filename>app/module_user/view.py
from flask import request
from . import blue_user
from app.module_database import db_operation as db_op
import uuid
import json
def select_user_with_account(usr, pwd, db, cursor):
sql = "SELECT id, help FROM users WHERE username='" + usr + "' AND password=Password('" + pwd + "')"
result = db_op.sql_execute(db, cursor, sql, False)
if len(result) != 1:
return None, None
return result[0][0], (result[0][1] == 1)
def select_user_with_token(tkn, db, cursor):
sql = "SELECT id, help FROM users WHERE token='" + tkn + "' AND tokenExpire>=NOW()"
result = db_op.sql_execute(db, cursor, sql, False)
if len(result) != 1:
return None, None
return result[0][0], (result[0][1] == 1)
def select_user_setting(id, db, cursor):
sql = "SELECT * FROM user_setting WHERE id=" + id
result = db_op.sql_execute(db, cursor, sql, False)
if len(result) != 1:
return None, None, None
return result[0][1], result[0][2], result[0][3]
def update_setting(id, vt, vs, vr, db, cursor):
sql = "UPDATE user_setting SET view_type='" + vt + "', view_status='" + vs + "', view_range=" + \
vr + " WHERE id=" + id
db_op.sql_execute(db, cursor, sql, True)
return
def update_token(id, tkn, db, cursor):
sql = "UPDATE users SET token='" + tkn + "', tokenExpire=DATE_ADD(NOW(), INTERVAL 7 DAY) WHERE id=" + id
db_op.sql_execute(db, cursor, sql, True)
return
def clear_token(id, db, cursor):
sql = "UPDATE users SET token='', tokenExpire=TIMESTAMP(0) WHERE id=" + id
db_op.sql_execute(db, cursor, sql, True)
return
def check_account(usr, db, cursor):
sql = "SELECT COUNT(*) FROM users WHERE username='" + usr + "'"
result = db_op.sql_execute(db, cursor, sql, False)
if result[0][0] != 0:
return False
return True
def insert_user(usr, pwd, helper, db, cursor):
sql = "INSERT INTO users (username,password,help) VALUES ('" + usr + "',PASSWORD('" + \
pwd + "')," + (str(1) if helper else str(0)) + ")"
db_op.sql_execute(db, cursor, sql, True)
sql = "INSERT INTO user_setting () VALUES ()"
db_op.sql_execute(db, cursor, sql, True)
return
def auth_user(id, tkn, db, cursor):
sql = "SELECT COUNT(*) FROM users WHERE id=" + id + " AND token='" + tkn + "'"
result = db_op.sql_execute(db, cursor, sql, False)
if result[0][0] != 1:
return False
return True
@blue_user.route('/login', methods=['POST'])
def login():
db, cursor = db_op.db_connect()
if db is None or cursor is None:
return json.dumps({"success": False, "msg": "資料庫錯誤"})
uid, help = select_user_with_account(request.values['username'], request.values['password'], db, cursor)
if uid is None or help is None:
db_op.db_close(db)
return json.dumps({"success": False, "msg": "帳號或密碼錯誤"})
view_type, view_status, view_range = select_user_setting(str(uid), db, cursor)
if view_type is None or view_status is None or view_range is None:
db_op.db_close(db)
return json.dumps({"success": False, "msg": "無法載入使用者偏好設定"})
token = str(uuid.uuid1())
update_token(str(uid), token, db, cursor)
db_op.db_close(db)
return json.dumps({"success": True, "msg": {"uid": uid, "help": help, "token": token, "view_type": view_type,
"view_status": view_status, "view_range": view_range}})
@blue_user.route('/token', methods=['POST'])
def token():
db, cursor = db_op.db_connect()
if db is None or cursor is None:
return json.dumps({"success": False, "msg": "資料庫錯誤"})
uid, help = select_user_with_token(request.values['token'], db, cursor)
if uid is None or help is None:
db_op.db_close(db)
return json.dumps({"success": False, "msg": "登入逾期,請重新登入"})
view_type, view_status, view_range = select_user_setting(str(uid), db, cursor)
if view_type is None or view_status is None or view_range is None:
db_op.db_close(db)
return json.dumps({"success": False, "msg": "無法載入使用者偏好設定"})
token = str(uuid.uuid1())
update_token(str(uid), token, db, cursor)
db_op.db_close(db)
return json.dumps({"success": True, "msg": {"uid": uid, "help": help, "token": token, "view_type": view_type,
"view_status": view_status, "view_range": view_range}})
@blue_user.route('/register', methods=['POST'])
def register():
db, cursor = db_op.db_connect()
if db is None or cursor is None:
return json.dumps({"success": False, "msg": "資料庫錯誤"})
if not check_account(request.values['username'], db, cursor):
db_op.db_close(db)
return json.dumps({"success": False, "msg": "使用者已存在"})
insert_user(request.values['username'], request.values['password'], request.values['helper'], db, cursor)
db_op.db_close(db)
return json.dumps({"success": True, "msg": "註冊成功,請進行登入"})
@blue_user.route('/logout', methods=['POST'])
def logout():
db, cursor = db_op.db_connect()
if db is None or cursor is None:
return json.dumps({"success": False, "msg": "資料庫錯誤"})
if auth_user(str(request.values['uid']), request.values['token'], db, cursor):
clear_token(str(request.values['uid']), db, cursor)
db_op.db_close(db)
return json.dumps({"success": True, "msg": ""})
@blue_user.route('/setting', methods=['POST'])
def setting():
db, cursor = db_op.db_connect()
if db is None or cursor is None:
return json.dumps({"success": False, "msg": "資料庫錯誤"})
if not auth_user(str(request.values['uid']), request.values['token'], db, cursor):
db_op.db_close(db)
return json.dumps({"success": False, "msg": "無法更新使用者偏好設定"})
update_setting(str(request.values['uid']), request.values['view_type'], request.values['view_status'],
str(request.values['view_range']), db, cursor)
db_op.db_close(db)
return json.dumps({"success": True, "msg": "使用者偏好設定已更新"})
| 2.5 | 2 |
tf_optimizees/logreg_classifier.py | justanothercoder/LSTM-Optimizer-TF | 1 | 12761352 | <reponame>justanothercoder/LSTM-Optimizer-TF<gh_stars>1-10
import numpy as np
import tensorflow as tf
from sklearn.datasets import load_digits, fetch_mldata
from sklearn.preprocessing import StandardScaler
from sklearn import utils
class LogregClassifier:
def __init__(self, lambd=1e-4):
self.lambd = lambd
def build(self, optimizer):
x, y = self.inputs()
pred = self.inference(x)
loss, acc = self.loss(pred, y)
train_op = self.train_op(loss, optimizer)
self.ops = {
'x': x, 'y': y,
'pred': pred,
#'loss': self.ema.average(loss),
#'acc': self.ema.average(acc),
'loss': loss,
'acc': acc,
'train_op': train_op
}
return self.ops
def inputs(self):
x = tf.placeholder(tf.float32, shape=[None, self.X.shape[1]])
y = tf.placeholder(tf.int32, shape=[None])
return x, y
def inference(self, x):
with tf.variable_scope('logreg_scope') as self.scope:
pred = tf.layers.dense(x, 10)
return pred
def loss(self, logits, y):
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits), axis=-1)
loss += tf.add_n([
tf.nn.l2_loss(v)
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope.name)
if 'bias' not in v.name
]) * self.lambd
p = tf.cast(tf.argmax(tf.nn.softmax(logits), axis=1), tf.int32)
acc = tf.reduce_mean(tf.cast(tf.equal(p, y), tf.float32))
self.ema = tf.train.ExponentialMovingAverage(decay=0.95)
self.average_op = self.ema.apply([loss, acc])
return loss, acc
def train_op(self, loss, optimizer='adam'):
if optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(1e-3)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
all_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.scope.name)
train_op = optimizer.minimize(loss, var_list=all_vars)
with tf.control_dependencies([train_op]):
train_op = tf.group(self.average_op)
return train_op
def prepare_data(self, dataset_name):
self.dataset_name = dataset_name
if dataset_name == 'digits':
dataset = load_digits(n_class=10)
elif dataset_name == 'mnist':
dataset = fetch_mldata('MNIST original', data_home='/srv/hd1/data/vyanush/')
self.X, self.Y = dataset.data, dataset.target
self.X, self.Y = utils.shuffle(self.X, self.Y)
self.X = StandardScaler().fit_transform(self.X.astype(np.float32))
if dataset_name == 'mnist':
self.X_train = self.X[:50000]
self.Y_train = self.Y[:50000]
self.X_val = self.X[50000:]
self.Y_val = self.Y[50000:]
def batch_iterator(self, n_epochs, batch_size):
for epoch in range(n_epochs):
indices = np.arange(self.X_train.shape[0])
np.random.shuffle(indices)
for pos in range(0, self.X_train.shape[0] - batch_size + 1, batch_size):
ind = indices[pos: pos + batch_size]
yield self.X_train[ind], self.Y_train[ind]
| 2.1875 | 2 |
venv/lib/python2.7/site-packages/boxsdk/auth/redis_managed_oauth2.py | LockScreen/Backend | 1 | 12761353 | <reponame>LockScreen/Backend
# coding: utf-8
from __future__ import unicode_literals
from redis import StrictRedis
from redis.lock import Lock
from uuid import uuid4
from boxsdk import JWTAuth, OAuth2
class RedisManagedOAuth2Mixin(OAuth2):
"""
Box SDK OAuth2 subclass.
Allows for storing auth tokens in redis.
:param unique_id:
An identifier for this auth object. Auth instances which wish to share tokens must use the same ID.
:type unique_id:
`unicode`
:param redis_server:
An instance of a Redis server, configured to talk to Redis.
:type redis_server:
:class:`Redis`
"""
def __init__(self, unique_id=uuid4(), redis_server=None, *args, **kwargs):
self._unique_id = unique_id
self._redis_server = redis_server or StrictRedis()
refresh_lock = Lock(redis=self._redis_server, name='{0}_lock'.format(self._unique_id))
super(RedisManagedOAuth2Mixin, self).__init__(*args, refresh_lock=refresh_lock, **kwargs)
if self._access_token is None:
self._update_current_tokens()
def _update_current_tokens(self):
"""
Get the latest tokens from redis and store them.
"""
self._access_token, self._refresh_token = self._redis_server.hvals(self._unique_id) or (None, None)
@property
def unique_id(self):
"""
Get the unique ID used by this auth instance. Other instances can share tokens with this instance
if they share the ID with this instance.
"""
return self._unique_id
def _get_tokens(self):
"""
Base class override.
Gets the latest tokens from redis before returning them.
"""
self._update_current_tokens()
return super(RedisManagedOAuth2Mixin, self)._get_tokens()
def _store_tokens(self, access_token, refresh_token):
"""
Base class override.
Saves the refreshed tokens in redis.
"""
super(RedisManagedOAuth2Mixin, self)._store_tokens(access_token, refresh_token)
self._redis_server.hmset(self._unique_id, {'access': access_token, 'refresh': refresh_token})
class RedisManagedOAuth2(RedisManagedOAuth2Mixin):
"""
OAuth2 subclass which uses Redis to manage tokens.
"""
pass
class RedisManagedJWTAuth(RedisManagedOAuth2Mixin, JWTAuth):
"""
JWT Auth subclass which uses Redis to manage access tokens.
"""
def _auth_with_jwt(self, sub, sub_type):
"""
Base class override. Returns the access token in a tuple to match the OAuth2 interface.
"""
return super(RedisManagedJWTAuth, self)._auth_with_jwt(sub, sub_type), None
| 2.640625 | 3 |
workshops/test/test_landing_page.py | r-gaia-cs/swc-amy | 0 | 12761354 | <filename>workshops/test/test_landing_page.py
from datetime import datetime, timedelta
from django.core.urlresolvers import reverse
from ..models import Event, Site
from .base import TestBase
class TestLandingPage(TestBase):
"Tests for the workshop landing page"
def setUp(self):
# Create a test site
test_site = Site.objects.create(domain='example.com',
fullname='Test Site')
# Create one new event for each day in the next 10 days
for t in range(1,11):
event_start = datetime.now() + timedelta(days=t)
Event.objects.create(start=event_start,
slug='upcoming_{0}'.format(t),
site=test_site,
admin_fee=100)
# Create one new event for each day from 10 days ago to
# 3 days ago
for t in range(3,11):
event_start = datetime.now() + timedelta(days=-t)
Event.objects.create(start=event_start,
slug='past_{0}'.format(t),
site=test_site,
admin_fee=100)
# Create an event that started yesterday and ends
# tomorrow
event_start = datetime.now() + timedelta(days=-1)
event_end = datetime.now() + timedelta(days=1)
Event.objects.create(start=event_start,
end=event_end,
slug='ends_tomorrow',
site=test_site,
admin_fee=100)
# Create an event that ends today
event_start = datetime.now() + timedelta(days=-1)
event_end = datetime.now()
Event.objects.create(start=event_start,
end=event_end,
slug='ends_today',
site=test_site,
admin_fee=100)
# Create an event that starts today
event_start = datetime.now()
event_end = datetime.now() + timedelta(days=1)
Event.objects.create(start=event_start,
end=event_end,
slug='starts_today',
site=test_site,
admin_fee=100)
self._setUpUsersAndLogin()
def test_has_upcoming_events(self):
"""Test that the landing page is passed some
upcoming_events in the context.
"""
response = self.client.get(reverse('index'))
# This will fail if the context variable doesn't exist
upcoming_events = response.context['upcoming_events']
# There are 10 upcoming events
assert len(upcoming_events) == 10
# They should all start with upcoming
assert all([e.slug[:8] == 'upcoming' for e in upcoming_events])
| 2.484375 | 2 |
setup/install_windows_deps.py | nesp-tsr/tsx | 3 | 12761355 | # Shapely installed via pip doesn't have GEOS dependencies
# Instead we download a custom Python wheel which does have the dependencies
# (see https://stackoverflow.com/questions/13144158/python-geos-and-shapely-on-windows-64)
from urllib import urlretrieve
from subprocess import call
import os
wheels = [
'https://download.lfd.uci.edu/pythonlibs/l8ulg3xw/GDAL-2.2.4-cp27-cp27m-win_amd64.whl',
'https://download.lfd.uci.edu/pythonlibs/l8ulg3xw/Fiona-1.7.13-cp27-cp27m-win_amd64.whl',
'https://download.lfd.uci.edu/pythonlibs/l8ulg3xw/Shapely-1.6.4.post1-cp27-cp27m-win_amd64.whl'
]
# Make sure we can install wheels
call(['pip', 'install', 'wheel'])
# Download wheel
print("Install custom packages")
for url in wheels:
print("Downloading: %s" % url)
filename = url.split("/")[-1]
# Download it
urlretrieve(url, filename)
# Install it
call(['pip', 'install', filename])
# Clean up
os.remove(filename)
print("Done")
| 2.703125 | 3 |
src/climsoft_api/api/observationinitial/schema.py | faysal-ishtiaq/climsoft-api | 0 | 12761356 | <filename>src/climsoft_api/api/observationinitial/schema.py
import datetime
from typing import List
import climsoft_api.api.obselement.schema as obselement_schema
import climsoft_api.api.station.schema as station_schema
from climsoft_api.api.schema import BaseSchema, Response
from pydantic import constr, Field
class CreateObservationInitial(BaseSchema):
recordedFrom: constr(max_length=255) = Field(title="Recorded From")
describedBy: int = Field(title="Described By")
obsDatetime: str = Field(title="Obs Datetime")
qcStatus: int = Field(title="QC Status")
acquisitionType: int = Field(title="Acquisition Type")
obsLevel: constr(max_length=255) = Field(title="Obs Level")
obsValue: constr(max_length=255) = Field(title="Obs Value")
flag: constr(max_length=255) = Field(title="Flag")
period: int = Field(title="Period")
qcTypeLog: str = Field(title="QC Type Log")
dataForm: constr(max_length=255) = Field(title="Data Form")
capturedBy: constr(max_length=255) = Field(title="Captured By")
mark: bool = Field(title="Mark")
temperatureUnits: constr(max_length=255) = Field(title="Temperature Units")
precipitationUnits: constr(max_length=255) = Field(title="Precipitation Units")
cloudHeightUnits: constr(max_length=255) = Field(title="Cloud Height Units")
visUnits: constr(max_length=255) = Field(title="Vis Units")
dataSourceTimeZone: int = Field(title="Data Source Timezone")
class Config:
fields = {
"recordedFrom": "recorded_from",
"describedBy": "described_by",
"obsDatetime": "obs_datetime",
"qcStatus": "qc_status",
"acquisitionType": "acquisition_type",
"obsLevel": "obs_level",
"qcTypeLog": "qc_type_log",
"dataForm": "data_form",
"capturedBy": "captured_by",
"temperatureUnits": "temperature_units",
"precipitationUnits": "precipitation_units",
"cloudHeightUnits": "cloud_height_units",
"visUnits": "vis_units",
"dataSourceTimeZone": "data_source_timezone"
}
class UpdateObservationInitial(BaseSchema):
obsLevel: constr(max_length=255) = Field(title="Obs Level")
obsValue: constr(max_length=255) = Field(title="Obs Value")
flag: constr(max_length=255) = Field(title="Flag")
period: int = Field(title="Period")
qcTypeLog: str = Field(title="QC Type Log")
dataForm: constr(max_length=255) = Field(title="Data Form")
capturedBy: constr(max_length=255) = Field(title="Captured By")
mark: bool = Field(title="Mark")
temperatureUnits: constr(max_length=255) = Field(title="Temperature Units")
precipitationUnits: constr(max_length=255) = Field(title="Precipitation Units")
cloudHeightUnits: constr(max_length=255) = Field(title="Cloud Height Units")
visUnits: constr(max_length=255) = Field(title="Vis Units")
dataSourceTimeZone: int = Field(title="Data Source Timezone")
class Config:
fields = {
"recordedFrom": "recorded_from",
"describedBy": "described_by",
"obsDatetime": "obs_datetime",
"qcStatus": "qc_status",
"acquisitionType": "acquisition_type",
"obsLevel": "obs_level",
"qcTypeLog": "qc_type_log",
"dataForm": "data_form",
"capturedBy": "captured_by",
"temperatureUnits": "temperature_units",
"precipitationUnits": "precipitation_units",
"cloudHeightUnits": "cloud_height_units",
"visUnits": "vis_units",
"dataSourceTimeZone": "data_source_timezone"
}
class ObservationInitial(CreateObservationInitial):
obsDatetime: datetime.datetime = Field(title="Obs Datetime")
class Config:
orm_mode = True
allow_population_by_field_name = True
class ObservationInitialResponse(Response):
result: List[ObservationInitial] = Field(title="Result")
class ObservationInitialWithChildren(ObservationInitial):
obselement: obselement_schema.ObsElement = Field(title="Obs Element")
station: station_schema.Station = Field(title="Station")
class Config:
orm_mode = True
allow_population_by_field_name = True
fields = {
"recordedFrom": "recorded_from",
"describedBy": "described_by",
"obsDatetime": "obs_datetime",
"qcStatus": "qc_status",
"acquisitionType": "acquisition_type",
"obsLevel": "obs_level",
"qcTypeLog": "qc_type_log",
"dataForm": "data_form",
"capturedBy": "captured_by",
"temperatureUnits": "temperature_units",
"precipitationUnits": "precipitation_units",
"cloudHeightUnits": "cloud_height_units",
"visUnits": "vis_units",
"dataSourceTimeZone": "data_source_timezone"
}
class ObservationInitialWithChildrenResponse(Response):
result: List[ObservationInitialWithChildren] = Field(title="Result")
class ObservationInitialInputGen(CreateObservationInitial):
class Config:
allow_population_by_field_name = True
class ObservationInitialQueryResponse(ObservationInitialResponse):
limit: int = Field(title="Limit")
page: int = Field(title="Page")
pages: int = Field(title="Pages")
| 2.140625 | 2 |
django_orm/single_model_orm/users_app/models.py | gfhuertac/coding_dojo_python | 0 | 12761357 | from django.db import models
# Create your models here.
class User(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email_address = models.CharField(max_length=255)
age = models.IntegerField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| 2.625 | 3 |
tests/viewcode/test_viewcode.py | speedyleion/sphinx-c-doc | 7 | 12761358 | <gh_stars>1-10
"""
View code is basically a post processing of a parsed document tree.
In order to more easily test this it generates an entire sphinx project and
then the resultant html files are analyzed to ensure they have the right
content.
For all of these tests warnigns are treated as errors so that any warnings
from bad logic can more easily be seen in the test output
"""
import re
import os
from sphinx.cmd.build import main
from bs4 import BeautifulSoup
SCRIPT_DIR = os.path.dirname(__file__)
def test_viewcode_of_sphinx_project(tmp_path):
"""
Tests the insertion of hyperlinks between documentation and code.
This isn't ideal to have all asserts in one function, but to keep the
overall test run times down it is done this way.
"""
source_dir = os.path.join(SCRIPT_DIR, "..", "assets")
# With sphinx 3 it will throw a warning for duplicate declarations, even with no
# index usage, so allow warnings in this test.
main(
[
"-a",
"-E",
"-D",
"exclude_patterns=[]",
"-D",
"master_doc=viewcode_index",
source_dir,
str(tmp_path),
]
)
file_name = tmp_path / "example.html"
with file_name.open() as f:
contents = f.read()
# Looking for tags of the form
#
# <a class="reference internal" href="_modules/example.c.html#c.MY_COOL_MACRO"><span class="viewcode-link"><span class="pre">[source]</span></span></a>
chosen_links = (
"_modules/example.c.html#c.MY_COOL_MACRO",
"_modules/example.c.html#c.members_documented_with_napoleon.two.nested_two",
)
soup = BeautifulSoup(contents)
for href in chosen_links:
tag = soup.find("a", {"href": href})
assert "[source]" == tag.text
file_name = tmp_path / "sub_dir" / "file_2.html"
with file_name.open() as f:
contents = f.read()
chosen_links = (
"../_modules/file_2.c.html#c.unknown_member.foo",
"../_modules/file_2.c.html#c.file_level_variable",
)
soup = BeautifulSoup(contents)
for href in chosen_links:
tag = soup.find("a", {"href": href})
assert "[source]" == tag.text
# Test the back links
file_name = tmp_path / "_modules" / "example.c.html"
with file_name.open() as f:
contents = f.read()
chosen_links = (
"../example.html#c.members_documented_with_napoleon.two.nested_two",
"../example.html#c.MY_COOL_MACRO",
)
soup = BeautifulSoup(contents)
for href in chosen_links:
tag = soup.find("a", {"href": href})
assert "[docs]" == tag.text
# Test normal C constructs elsewhere in docs
file_name = tmp_path / "viewcode.html"
with file_name.open() as f:
contents = f.read()
chosen_links = (
"_modules/example.c.html#c.napoleon_documented_function",
# One needs to use noindex in order to avoid sphinx warning and once
# one uses noindex then the permalinks are no longer generated :(
# "#c.napoleon_documented_function"
)
soup = BeautifulSoup(contents)
for href in chosen_links:
tag = soup.find("a", {"href": href})
assert "[source]" == tag.text
# Ensure only the one function that actually had a source file to be able to link to creates a link
link_count = len(re.findall("viewcode-link", contents))
assert link_count == 1
| 2.6875 | 3 |
tests/func_misc_test.py | pustotnik/raven | 0 | 12761359 | # coding=utf-8
#
# pylint: disable = wildcard-import, unused-wildcard-import
# pylint: disable = missing-docstring, invalid-name
# pylint: disable = unused-argument, no-member, attribute-defined-outside-init
# pylint: disable = too-many-lines, too-many-branches, too-many-statements
"""
Copyright (c) 2020, <NAME>. All rights reserved.
license: BSD 3-Clause License, see LICENSE for more details.
"""
import os
import fnmatch
import shutil
import pytest
from zm import utils
from zm.features import ToolchainVars
from zm.testing import loadFromJson
from tests.func_utils import *
@pytest.mark.usefixtures("unsetEnviron")
class TestParams(object):
@pytest.fixture(params = getZmExecutables(), autouse = True)
def allZmExe(self, request):
self.zmExe = zmExes[request.param]
@pytest.fixture(params = [joinpath('c', '02-simple'), joinpath('cpp', '04-complex')])
def project(self, request, tmpdir):
def teardown():
printErrorOnFailed(self, request)
request.addfinalizer(teardown)
setupTest(self, request, tmpdir)
return request.param
def testBuildRootInCLI(self, project):
env = { 'ZENMAKE_TESTING_MODE' : '1' }
cmdLine = ['build', '-o', '_bld']
assert runZm(self, cmdLine, env)[0] == 0
checkBuildResults(self, cmdLine, resultExists = True, fakeBuild = True)
assert self.confPaths.buildroot == joinpath(self.confPaths.buildconfdir, '_bld')
def testBuildRootInEnv(self, project, monkeypatch):
monkeypatch.setenv('BUILDROOT', '_bld_') # for checkBuildResults
env = { 'BUILDROOT' : '_bld_', 'ZENMAKE_TESTING_MODE' : '1' }
cmdLine = ['build']
assert runZm(self, cmdLine, env)[0] == 0
checkBuildResults(self, cmdLine, resultExists = True, fakeBuild = True)
assert self.confPaths.buildroot == joinpath(self.confPaths.buildconfdir, '_bld_')
@pytest.mark.skipif(PLATFORM != 'linux',
reason = "It's enough to test on linux only")
def testToolchainVars(self, project):
projectLang = os.path.split(project)[-2].replace('p', 'x')
fixture = {
'c' : {
'gcc': {
'sysenvval' : 'gcc',
'compflags' : '',
'linkflags' : '',
'ldflags' : '-Wl,-rpath,.',
},
'clang': {
'sysenvval' : 'clang',
'compflags' : '-O1 -g',
'linkflags' : '-Wl,-rpath,. ',
'ldflags' : '',
},
'clang-path': {
'sysenvval' : shutil.which('clang'),
'compflags' : '-O1 -g',
'linkflags' : '-Wl,-rpath,. ',
'ldflags' : '',
},
},
'cxx': {
'g++': {
'sysenvval' : 'g++',
'compflags' : '-O2 -Wall',
'linkflags' : '-Wl,-rpath,. -Wl,--as-needed',
'ldflags' : '-fsanitize=address',
},
'clang++': {
'sysenvval' : 'clang++',
'compflags' : '-O3 -Wall -Wextra',
'linkflags' : '-Wl,--as-needed -fsanitize=address',
'ldflags' : '-Wl,-rpath,.',
},
},
}
def formExpectedFlags(flags):
flags = utils.uniqueListWithOrder(reversed(flags))
flags.reverse()
return flags
env = { 'ZENMAKE_TESTING_MODE' : '1' }
cmdLine = ['build']
sysEnvToolVar = ToolchainVars.sysVarToSetToolchain(projectLang)
cfgEnvToolVar = ToolchainVars.cfgVarToSetToolchain(projectLang)
compFlagsName = projectLang.upper() + 'FLAGS'
# invalid name
toolchain = 'invalid'
env[sysEnvToolVar] = toolchain
assert runZm(self, cmdLine, env)[0] != 0
prjfixture = fixture[projectLang]
for toolchain, info in prjfixture.items():
env[sysEnvToolVar] = info['sysenvval']
env[compFlagsName] = info['compflags']
env['LINKFLAGS'] = info['linkflags']
env['LDFLAGS'] = info['ldflags']
assert runZm(self, ['distclean'])[0] == 0
assert runZm(self, cmdLine, env)[0] == 0
targets = obtainBuildTargets(self, cmdLine)
checkBuildTargets(targets, resultExists = True, fakeBuild = True)
confManager = processConfManagerWithCLI(self, cmdLine)
buildout = confManager.root.confPaths.buildout
paths = []
patterns = '.* c4che config.log'.split()
for root, dirs, files in os.walk(buildout):
ignore = set()
for pattern in patterns:
for name in fnmatch.filter(dirs, pattern):
dirs.remove(name) # don't visit sub directories
for name in fnmatch.filter(files, pattern):
ignore.add(name)
paths += [os.path.join(root, x) for x in files if x not in ignore]
for path in paths:
with open(path, 'r') as f:
data = loadFromJson(f.read())
zmTaskName = data['tgen-name']
usedEnv = data['env']
zmtasks = data['zmtasks']
taskParams = zmtasks[zmTaskName]
features = taskParams['features']
targetKind = getTargetPattern(usedEnv, features)[1]
# check toolchain
assert usedEnv[cfgEnvToolVar] == [shutil.which(info['sysenvval'])]
isLink = data['is-link']
if not isLink:
# check CFLAGS/CXXFLAGS
sysEnvFlags = env[compFlagsName].split()
bconfFlags = utils.toList(taskParams.get(compFlagsName.lower(), []))
expectedFlags = formExpectedFlags(bconfFlags + sysEnvFlags)
if targetKind == 'shlib':
# Waf adds this flag itself
expectedFlags = ['-fPIC'] + expectedFlags
assert usedEnv.get(compFlagsName, []) == expectedFlags
else:
# check LINKFLAGS/LDFLAGS
for flagsName in ('linkflags', 'ldflags'):
sysEnvFlags = env[flagsName.upper()].split()
bconfFlags = utils.toList(taskParams.get(flagsName, []))
expectedFlags = formExpectedFlags(bconfFlags + sysEnvFlags)
if targetKind == 'shlib' and flagsName == 'linkflags':
# Waf adds this flag itself
expectedFlags = ['-shared'] + expectedFlags
assert usedEnv.get(flagsName.upper(), []) == expectedFlags
| 1.75 | 2 |
GoogleCodeJam/2018/Qual/C-GoGopher/C.py | eltrai/algsContests | 0 | 12761360 | <reponame>eltrai/algsContests<filename>GoogleCodeJam/2018/Qual/C-GoGopher/C.py<gh_stars>0
#!/usr/bin/env python3
import sys
def readint():
return int(input())
def readints():
return map(int, input().split())
def readline():
return str(input())
def newMatrix():
return [[0 for x in range(3)] for y in range(3)]
def matrixFull(m):
for i in range(3):
for j in range(3):
if m[i][j] == 0:
return False
return True
def run():
A = readint()
square = newMatrix()
offset=0
print("2 2")
while True:
I, J = readints()
if I == -1:
sys.exit()
elif I == 0:
return
else:
square[I-offset-1][J-1] = 1
if matrixFull(square):
offset += 3
square = newMatrix()
print("%d %d" % (2+offset, 2))
T = readint()
for case in range(T):
run()
| 3.265625 | 3 |
love_pet_lifetime/pet_purchase/apps.py | DCCop/love-pet-lifetime | 0 | 12761361 | from django.apps import AppConfig
class PetPurchaseConfig(AppConfig):
name = 'pet_purchase'
| 1.289063 | 1 |
2D_LJ_MD.py | michael-cowan/Molecular-Dynamics | 7 | 12761362 | <gh_stars>1-10
"""
2D Molecular Dynamics simulation that models Lennard-Jones particles with periodic boundaries
Author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
from time import time
# All values taken as estimates to represent Argon atoms in the gaseous phase
eps = 1.65E-21 # J/molec
sigma = 3.40E-10 # m
kb = 1.38064852E-23 # J/(molec*K)
temp = 100 # K
m = 6.63E-26 # kg/molec
t = 1E-14 # s
box = 1E-8 # m
n = 225 # molecules
atom = 96 # atom whose trajectory will be plotted
#Dimensionless parameters
temp_dless = kb * temp / eps
m_dless = 1.0
t_dless = t / (sigma * np.sqrt(m / eps))
box_dless = box / sigma
half = round(box_dless / 2.0, 3)
dt = t_dless
cut = 2. # cut-off r length used in LJ force calc
cut_potential = 4.0 * ((cut ** -12) - (cut ** -6))
#Creates an even distribution of atoms on a cubic lattice (sqrt(n) must = integer)
def lattice():
space = np.linspace(0, box_dless, np.sqrt(n) + 1)
space = space[:-1]
diff = (box_dless - space[-1]) / 2.0
pos = np.zeros([2, n])
i = j = k = 0
while k < np.sqrt(n):
pos[0, i] = space[j] + diff
pos[1, i] = space[k] + diff
i += 1
j += 1
if j == np.sqrt(n):
j = 0
if i % np.sqrt(n) == 0:
k += 1
return pos
#Initializes velocities, positions, & "old" positions
def initialize():
pos = lattice() #placing atoms on cubic lattice
vel = np.random.random([2, n]) - 0.5 #random vel : [-0.5, 0.5]
px = vel[0].sum() * m_dless / n
py = vel[1].sum() * m_dless / n
for i in xrange(n):
vel[0, i] -= px
vel[1, i] -= py #Shifts center-of-mass momentum to 0
norm_x = ((1.0 / (2 * n)) * ((vel[0] ** 2).sum())) #Divide by 2N degrees of freedom
norm_y = ((1.0 / (2 * n)) * ((vel[1] ** 2).sum()))
old_pos = np.zeros([2, n])
for i in xrange(n):
vel[0, i] *= (np.sqrt(temp_dless / norm_x))
vel[1, i] *= (np.sqrt(temp_dless / norm_y)) #Scale so <vi*^2> = T*
old_pos[0, i] = pos[0, i] - vel[0, i] * dt
old_pos[1, i] = pos[1, i] - vel[1, i] * dt
return pos,vel,old_pos
#LJ Pairwise additive force
def force(pos):
f = np.zeros([2,n])
potential = 0
d = np.zeros(2)
for i in xrange(n - 1):
for j in xrange(i + 1, n):
d = np.zeros(2)
for k in xrange(2):
d[k] = pos[k, i] - pos[k, j]
if d[k] > half: #Central Image Algorithm
d[k] = d[k] - box_dless
elif d[k] < -half:
d[k] = d[k] + box_dless
r = np.sqrt(np.dot(d, d))
if r < cut:
potential += 4.0 * ((r ** -12) - (r ** -6))
#f=(48/r^2)*(1/r^12 - 1/2r^6)
ff = (r ** -2) * (r ** -12) - (0.5 * (r ** -6))
f[:, i] = f[:, i] + (d * ff)
f[:, j] = f[:, j] - (d * ff)
return 48. * f, (potential / (2 * n)) + cut_potential
#Calculates the kinetic energy based on velocities
def kin(vel):
v2 = 0
for i in xrange(n):
v2 += np.dot(vel[:, i], vel[:, i]) #v_r = sqrt(vx^2 + vy^2) so v_r^2 = vx^2 + vy^2
return (0.5 * m_dless * v2) / (2 * n) #Returns kinetic energy per atom of system
#Verlet algorithm used to determine new positions from forces
def verlet(old_pos, pos, f, check):
for i in xrange(n):
for k in range(2):
if check[k, i] == 1:
old_pos[k, i] -= box_dless
elif check[k, i] == 2:
old_pos[k, i] += box_dless
new_pos = 2 * pos - old_pos + ((1 / m_dless) * f * (dt ** 2))
vel=(new_pos-old_pos)/(2.0*dt) #Finite difference method to find velocity
kinetic = kin(vel)
return new_pos, pos, kinetic
#Velocity-Verlet algorithm used to determine new positions and velocities from force
def velocity_verlet(pos, vel, f):
new_pos = pos + vel * dt + 0.5 * f * (dt ** 2)
new_pos, check = periodic(new_pos)
f_n, potential = force(new_pos)
new_vel = vel + 0.5 * (f + f_n) * dt
return new_pos, new_vel, f_n, potential
#Second Order Runge-Kutta algorithm
def runge_kutta(pos, vel, f):
half_pos = pos + 0.5 * vel * dt
new_pos = pos + dt * (vel + (0.5 / m_dless) * f * dt)
half_f, half_potential = force(half_pos)
new_vel = vel + (dt / m_dless) * half_f
return new_pos, new_vel
#Converts coordinates to create periodic boundaries
def periodic(pos):
check = np.zeros([2, n])
for i in xrange(n):
for k in range(2):
if pos[k, i] > box_dless:
pos[k, i] -= box_dless
check[k, i] = 1
if pos[k, i] < 0:
pos[k, i] += box_dless
check[k, i] = 2
return pos,check
#Calculates the radial distribution function
def rdf(pos, box_dless):
area = box_dless ** 2
r_max = half
num_den = n / area
dr = 0.3
hist = np.zeros(int(r_max / dr))
for i in xrange(n - 1):
for j in xrange(i + 1, n):
r2 = np.zeros(2)
for k in xrange(2):
a = abs(pos[k, i] - pos[k, j])
if a > r_max:
a = box_dless - a
r2[k] = a ** 2
r = np.sqrt(r2.sum())
if r < r_max:
if int(r / dr) == int(r_max / dr):
hist[int(r / dr)-1] += 2
else:
hist[int(r / dr)] += 2
for x in xrange(len(hist)):
shell_area = np.pi * (((dr * (x + 1)) ** 2) - ((dr * x) ** 2))
hist[x] = hist[x] / (n * num_den * shell_area)
ddr = np.linspace(dr, r_max, r_max / dr)
return ddr[:-1], hist[:-1]
#Runs simulation with verlet algorithm
def sim_verlet(steps, blocks):
pos, vel, old_pos = initialize()
pos, check = periodic(pos)
f, potential = force(pos)
kinetic = kin(vel)
e_tot_0 = potential + kinetic
gr_avg = ddr = 1
print 'Total Energy of System (Per Atom)'.rjust(25)
print 'Initial: '+str(round(e_tot_0, 6)).rjust(15)
study = np.zeros([2, (blocks * (steps / blocks)) + 1])
study[:, 0] = pos[:, atom]
en = np.zeros(blocks + 1)
en[0] = e_tot_0
for s in xrange(blocks):
e_tot = 0
for x in xrange(steps / blocks):
pos, old_pos, kinetic = verlet(old_pos, pos, f, check)
pos, check = periodic(pos)
e_tot += potential + kinetic
study[:, (s * (steps / blocks)) + (x + 1)] = pos[:, atom]
f, potential = force(pos)
en[s + 1] = e_tot / (steps / blocks)
ddr, gr = rdf(pos, box_dless)
if s == 0:
gr_avg = np.zeros(len(ddr))
gr_avg += gr
print 'Block ' + str(s+1) + ': ' + str(round(en[s + 1], 6)).rjust(15)
gr_avg /= blocks
return pos, study, en, gr_avg, ddr
#Runs simulation with velocity-verlet algorithm
def sim_vel_verlet(steps, blocks):
pos, vel, old_pos = initialize()
f, potential = force(pos)
kinetic = kin(vel)
e_tot_0 = potential + kinetic
gr_avg = ddr = 1
print 'Total Energy of System (Per Atom)'.rjust(25)
print 'Initial: '+str(round(e_tot_0, 6)).rjust(15)
study = np.zeros([2, (blocks * (steps / blocks)) + 1])
study[:, 0] = pos[:, atom]
en = np.zeros(blocks + 1)
en[0] = e_tot_0
for s in xrange(blocks):
e_tot = 0
for x in xrange(steps / blocks):
pos, vel, f, potential = velocity_verlet(pos, vel, f)
study[:, (s * (steps / blocks)) + (x + 1)] = pos[:, atom]
kinetic = kin(vel)
e_tot += potential + kinetic
en[s + 1] = e_tot / (steps / blocks)
ddr, gr = rdf(pos, box_dless)
if s == 0:
gr_avg = np.zeros(len(ddr))
gr_avg += gr
print 'Block ' + str(s+1) + ': ' + str(round(en[s + 1], 6)).rjust(15)
gr_avg /= blocks
return pos, study, en, gr_avg, ddr
#Runs simulation with Runge-Kutta algorithm
def sim_runge_kutta(steps, blocks):
pos, vel, old_pos = initialize()
study = np.zeros([2, (blocks * (steps / blocks)) + 1])
study[:, 0] = pos[:, atom]
f, potential = force(pos)
kinetic = kin(vel)
e_tot_0 = potential + kinetic
gr_avg = ddr = 1
print 'Total Energy of System (Per Atom)'.rjust(25)
print 'Initial: '+str(round(e_tot_0, 6)).rjust(15)
en = np.zeros(blocks + 1)
en[0] = e_tot_0
for s in xrange(blocks):
e_tot = 0
for x in xrange(steps / blocks):
pos, vel = runge_kutta(pos, vel, f)
pos, check = periodic(pos)
study[:, (s * (steps / blocks)) + (x + 1)] = pos[:, atom]
f, potential = force(pos)
kinetic = kin(vel)
e_tot += potential + kinetic
en[s + 1] = e_tot / (steps / blocks)
ddr, gr = rdf(pos, box_dless)
if s == 0:
gr_avg = np.zeros(len(ddr))
gr_avg += gr
print 'Block ' + str(s+1) + ': ' + str(round(en[s + 1], 6)).rjust(15)
gr_avg /= blocks
return pos,study,en,gr_avg,ddr
#Adds labels to bar graph
def autolabel(rects, ax):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.01 * height,
format(round(height, 3), '.3f'),
ha = 'center', va = 'bottom')
#Writes the results to the text file "fid"
def pr(fid, name, pos, study, en, gr_avg, ddr):
fid.write('\n\n' + name + '\n\n')
fid.write('Positions:\n')
fid.write(str(pos))
fid.write('\nStudy:\n')
fid.write(str(study))
fid.write('\nEnergy:\n')
fid.write(str(en))
fid.write('\nRDF:\n')
fid.write(str(gr_avg))
fid.write('\nr*:\n')
fid.write(str(ddr))
#Wrapped function to run a simulation for verlet, velocity-verlet, and Runge-Kutta algorithms
def run(steps = 500, blocks = 100):
fid = open('sim_results.txt','w')
print '\n' + '-' * 35 + '\n\n'
print 'Total Steps per Simulation: '+str(steps)+'\n\n'
print '-' * 35 + '\n\n'
print 'Running Verlet Algorithm.\n\n'
### Verlet ###
start_verlet = time()
pos1, study1, en1, gr_avg1, ddr1 = sim_verlet(steps, blocks)
stop_verlet = time()
pr(fid, 'Verlet Algorithm', pos1, study1, en1, gr_avg1, ddr1)
print '\nVerlet complete.'
print '\n\n' + '-' * 35 + '\n\n' + 'Running Velocity Verlet Algorithm.\n\n'
### Velocity-Verlet ###
start_vel_verlet = time()
pos2, study2, en2, gr_avg2, ddr = sim_vel_verlet(steps, blocks)
stop_vel_verlet = time()
pr(fid, 'Velocity Verlet Algorithm', pos2, study2, en2, gr_avg2, ddr)
print '\nVelocity Verlet complete.'
print '\n\n' + '-' * 35 + '\n\n' + 'Running Runge-Kutta Algorithm.\n\n'
### Runge-Kutta ###
start_runge_kutta = time()
pos3, study3, en3, gr_avg3, ddr = sim_runge_kutta(steps, blocks)
stop_runge_kutta = time()
pr(fid, 'Runge-Kutta Algorithm', pos3, study3, en3, gr_avg3, ddr)
print '\nRunge-Kutta complete.'
#Calculates runtime per step for each simulation
time_verlet = (stop_verlet - start_verlet) / steps
time_vel_verlet = (stop_vel_verlet - start_vel_verlet) / steps
time_runge_kutta = (stop_runge_kutta - start_runge_kutta) / steps
#Creates bar chart for runtimes
figt = plt.figure('Time Per Step', figsize = (13,11))
axt = figt.add_subplot(111)
rects = axt.bar([1, 3, 5], [time_verlet, time_vel_verlet, time_runge_kutta], align = 'center')
autolabel(rects, axt)
axt.set_ylabel('average time per step (seconds)')
axt.set_ylim(0, 1)
axt.set_xticks([1, 3, 5])
axt.set_xticklabels(['Verlet', 'Velocity Verlet', 'Runge-Kutta'])
#figt.savefig('compare_time2.png')
#Adds runtimes to the text file "fid"
fid.write('\n\nTimes:\n')
fid.write('Verlet: ' + str(time_verlet))
fid.write('\nVelocity Verlet: ' + str(time_vel_verlet))
fid.write('\nRunge-Kutta: ' + str(time_runge_kutta))
fid.close()
#Plots the radial distribution function versus radial distance for each simulation
figr = plt.figure('Radial Distribution Function', figsize=(13, 11))
axr = figr.add_subplot(111)
axr.plot(ddr1, gr_avg1, ddr, gr_avg2, ddr, gr_avg3)
axr.legend(['Verlet Algorithm', 'Velocity Verlet Algorithm', 'Runge-Kutta Algorithm'],
shadow = True, bbox_to_anchor = (1.02, 1.05), ncol = 3)
axr.set_xlabel('r*')
axr.set_ylabel('g(r)')
#figr.savefig('compare_rdf2.png')
#Plots the average energy (for each blocks) for all simulations
fige = plt.figure('Energy Fluctuation', figsize = (13, 11))
axe = fige.add_subplot(111)
x = range(len(en1))
axe.plot(x, en1, x, en2, x, en3)
axe.legend(['Verlet Algorithm', 'Velocity Verlet Algorithm', 'Runge-Kutta Algorithm'],
shadow = True, bbox_to_anchor = (1.02, 1.05), ncol = 3)
axe.set_xlabel('Block')
axe.set_ylabel('average total dimensionless energy (per atom)')
#fige.savefig('compare_energy2.png')
#Plots the final position of atoms in verlet sim and the trajectory of atom tracked
fig = plt.figure('Verlet MD', figsize = (13, 11))
ax = fig.add_subplot(111)
ax.set_ylim(0, box_dless)
ax.set_xlim(0, box_dless)
ppos = np.array([np.delete(pos1[0], atom), np.delete(pos1[1], atom)])
ax.plot(ppos[0, :], ppos[1, :], 'o', color = 'blue')
ax.plot(pos1[0, atom], pos1[1, atom], 'o', study1[0], study1[1], color = 'green')
ax.set_xticks([])
ax.set_yticks([])
#fig.savefig('verlet_sim.png')
#Plots the final position of atoms in velocity-verlet sim and the trajectory of atom tracked
fig2 = plt.figure('Velocity-Verlet MD', figsize = (13, 11))
ax2 = fig2.add_subplot(111)
ax2.set_ylim(0, box_dless)
ax2.set_xlim(0, box_dless)
ppos = np.array([np.delete(pos2[0], atom), np.delete(pos2[1], atom)])
ax2.plot(ppos[0, :], ppos[1, :], 'o', color = 'blue')
ax2.plot(pos2[0, atom], pos2[1, atom], 'o', study2[0], study2[1], color = 'green')
ax2.set_xticks([])
ax2.set_yticks([])
#fig2.savefig('velocity_verlet_sim.png')
#Plots the final position of atoms in Runge-Kutta sim and the trajectory of atom tracked
fig3 = plt.figure('Runge-Kutta MD', figsize = (13, 11))
ax = fig3.add_subplot(111)
ax.set_ylim(0, box_dless)
ax.set_xlim(0, box_dless)
ppos = np.array([np.delete(pos3[0], atom), np.delete(pos3[1], atom)])
ax.plot(ppos[0, :], ppos[1, :], 'o', color = 'blue')
ax.plot(pos3[0, atom], pos3[1, atom], 'o', study3[0], study3[1], color = 'green')
ax.set_xticks([])
ax.set_yticks([])
#fig3.savefig('runge_kutta_sim.png')
plt.show()
if __name__ == '__main__':
#Sample run of program; 50 steps split into 5 blocks
run(50, 5)
| 2.75 | 3 |
osc_bge/school/migrations/0004_auto_20181130_1342.py | jisuhan3201/osc-bge | 0 | 12761363 | <gh_stars>0
# Generated by Django 2.0.9 on 2018-11-30 13:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('school', '0003_auto_20181128_1859'),
]
operations = [
migrations.CreateModel(
name='SchoolPhotos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('photo', models.ImageField(blank=True, null=True, upload_to='school')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SchoolTypes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('type', models.CharField(max_length=80, null=True)),
],
options={
'abstract': False,
},
),
migrations.RenameField(
model_name='school',
old_name='contacts',
new_name='phone',
),
migrations.RemoveField(
model_name='college',
name='detail_type',
),
migrations.RemoveField(
model_name='secondary',
name='detail_type',
),
migrations.RemoveField(
model_name='secondary',
name='entry_requirement',
),
migrations.RemoveField(
model_name='secondary',
name='toefl_requirement',
),
migrations.RemoveField(
model_name='secondary',
name='total_fee',
),
]
| 1.78125 | 2 |
python/graphscope/nx/algorithms/tests/forward/test_cycles.py | luoxiaojian/GraphScope-1 | 2 | 12761364 | <filename>python/graphscope/nx/algorithms/tests/forward/test_cycles.py
import networkx.algorithms.tests.test_cycles
import pytest
from networkx.algorithms.tests.test_cycles import TestMinimumCycles
from graphscope.nx.utils.compat import import_as_graphscope_nx
from graphscope.nx.utils.compat import with_graphscope_nx_context
import_as_graphscope_nx(networkx.algorithms.tests.test_cycles,
decorators=pytest.mark.usefixtures("graphscope_session"))
from graphscope.nx.algorithms import minimum_cycle_basis
def assert_basis_equal(a, b):
assert sorted(a) == sorted(b)
@pytest.mark.usefixtures("graphscope_session")
@with_graphscope_nx_context(TestMinimumCycles)
class TestMinimumCycles:
def test_weighted_diamond(self):
mcb = minimum_cycle_basis(self.diamond_graph, weight="weight")
# in graphscope.nx, answer is [[1, 2, 3, 4], [2, 3, 4]] and it's correct too.
assert_basis_equal([sorted(c) for c in mcb], [[1, 2, 3, 4], [2, 3, 4]])
| 2.453125 | 2 |
tests/neural/beer2vec/test_beer2vec_load.py | ZXVentures/brewgorithm_devkit | 1 | 12761365 | from brewgorithm import beer2vec, beer_emb, word_weighter
import numpy as np
import unittest
from sklearn.metrics.pairwise import cosine_similarity
class TestBeer2vec(unittest.TestCase):
def test_most_similar_test(self):
beers = beer2vec.get_beer2vec()
embeddings = beer_emb.embed_doc("apricot peach fruity", word_weighter.is_beer_related)
emb = np.average(embeddings, axis=0)
sims = cosine_similarity([emb], [beer['vector'] for beer in beers]).reshape(-1)
candidates = []
for i, sim in enumerate(sims):
candidates.append((sim, i))
result = [x for x in sorted(candidates, key=lambda i: i[0], reverse=True)[:2]][1]
self.assertEqual(beers[result[1]]['BeerNamePlain'].strip(), "delirium tremens")
self.assertEqual(float(beers[result[1]]['Alcohol']), 8.5)
self.assertEqual(int(beers[result[1]]['OverallPctl']), 93)
desc = [a[0] for a in beer_emb.most_similar(positive=[beers[result[1]]['vector']], negative=[])]
self.assertIn("fruity", desc)
if __name__ == '__main__':
unittest.main()
| 2.765625 | 3 |
smda/utility/ElfFileLoader.py | williballenthin/smda | 0 | 12761366 | <reponame>williballenthin/smda<filename>smda/utility/ElfFileLoader.py<gh_stars>0
import logging
import io
LOGGER = logging.getLogger(__name__)
LIEF_AVAILABLE = False
try:
import lief
lief.logging.disable()
LIEF_AVAILABLE = True
except:
LOGGER.warning("LIEF not available, will not be able to parse data from ELF files.")
class ElfFileLoader(object):
@staticmethod
def isCompatible(data):
if not LIEF_AVAILABLE:
return False
# check for ELF magic
return data[:4] == b"\x7FELF"
@staticmethod
def getBaseAddress(binary):
elffile = lief.parse(bytearray(binary))
# Determine base address of binary
#
base_addr = 0
candidates = [0xFFFFFFFFFFFFFFFF]
for section in elffile.sections:
if section.virtual_address:
candidates.append(section.virtual_address - section.offset)
if len(candidates) > 1:
base_addr = min(candidates)
return base_addr
@staticmethod
def mapBinary(binary):
# ELFFile needs a file-like object...
# Attention: for Python 2.x use the cStringIO package for StringIO
elffile = lief.parse(bytearray(binary))
base_addr = ElfFileLoader.getBaseAddress(binary)
mapped_binary = b""
LOGGER.debug("Assuming base address 0x%x for inference of reference counts (based on ELF header)", base_addr)
# find begin of the first and end of the last section
if elffile.sections:
max_virt_section_offset = 0
min_raw_section_offset = 0xFFFFFFFFFFFFFFFF
for section in elffile.sections:
# print("{:20s} 0x{:08x} - 0x{:08x} / 0x{:08x}".format(section.name, section.header.sh_addr, section.header.sh_offset, section.header.sh_size))
if section.virtual_address:
max_virt_section_offset = max(max_virt_section_offset, section.size + section.virtual_address)
min_raw_section_offset = min(min_raw_section_offset, section.virtual_address)
# copy binary to mapped_binary
if max_virt_section_offset:
mapped_binary = bytearray([0] * (max_virt_section_offset - base_addr))
mapped_binary[0:min_raw_section_offset] = binary[0:min_raw_section_offset]
for section in elffile.sections:
if section.virtual_address:
rva = section.virtual_address - base_addr
mapped_binary[rva:rva + section.size] = section.content
elif elffile.segments:
max_virt_segment_offset = 0
min_raw_segment_offset = 0xFFFFFFFFFFFFFFFF
for segment in elffile.segments:
if segment.virtual_address:
max_virt_segment_offset = max(max_virt_segment_offset, segment.physical_size + segment.virtual_address)
min_raw_segment_offset = min(min_raw_segment_offset, segment.virtual_address)
# copy binary to mapped_binary
if max_virt_segment_offset:
mapped_binary = bytearray([0] * (max_virt_segment_offset - base_addr))
mapped_binary[0:min_raw_segment_offset] = binary[0:min_raw_segment_offset]
for segment in elffile.segments:
if segment.virtual_address:
rva = segment.virtual_address - base_addr
mapped_binary[rva:rva + segment.physical_size] = segment.content
return bytes(mapped_binary)
@staticmethod
def getBitness(binary):
# TODO add machine types whenever we add more architectures
elffile = lief.parse(bytearray(binary))
machine_type = elffile.header.machine_type
if machine_type == lief.ELF.ARCH.x86_64:
return 64
elif machine_type == lief.ELF.ARCH.i386:
return 32
return 0
@staticmethod
def mergeCodeAreas(code_areas):
merged_code_areas = sorted(code_areas)
result = []
index = 0
while index < len(merged_code_areas) - 1:
this_area = merged_code_areas[index]
next_area = merged_code_areas[index + 1]
if this_area[1] != next_area[0]:
result.append(this_area)
index += 1
else:
merged_code_areas = merged_code_areas[:index] + [[this_area[0], next_area[1]]] + merged_code_areas[index + 2:]
return merged_code_areas
@staticmethod
def getCodeAreas(binary):
# TODO add machine types whenever we add more architectures
elffile = lief.parse(bytearray(binary))
code_areas = []
for section in elffile.sections:
# SHF_EXECINSTR = 4
if section.flags & 0x4:
section_start = section.virtual_address
section_size = section.size
if section_size % section.alignment != 0:
section_size += section.alignment - (section_size % section.alignment)
section_end = section_start + section_size
code_areas.append([section_start, section_end])
return ElfFileLoader.mergeCodeAreas(code_areas)
| 2.421875 | 2 |
DeepLearning/bo-deeplog/bo_training.py | jadsonjs/DataScience | 0 | 12761367 | <reponame>jadsonjs/DataScience
#python
import numpy as np
from numpy import array
#matploy for graphic
import matplotlib.pyplot as plt
# my modules
from csv_module import load_traning_files
from csv_module import load_traning_output_files
from csv_module import load_test_files
from csv_module import load_test_output_files
from csv_module import load_validation_files
from csv_module import load_validation_output_files
from array_module import reshape_input
# my modules for deep learning funcions
from bo_module import buildLSTMModel1
from bo_module import buildLSTMModel2
from bo_module import buildLSTMModel3
from bo_module import traningLSTM1
from bo_module import traningLSTM2
# ==================== input values ====================
#linux
#base_directory = '/home/jadson/git/deeplearning/data'
#macos
base_directory = '/Users/jadson/git/deeplearning/data'
training_directory = base_directory+'/training'
test_directory = base_directory+'/tests'
validation_directory = base_directory+'/validation'
model_file = base_directory+'/lstm_model.h5'
# size of the data set
traning_samples = 600
test_samples = 200
validation_samples = 142
timesteps = 100
features = 1
classes = 10
#configuration of lstm
lstm_layer_size = 100
dence_layer_size = 10
batch_size= 16
epochs=100
# ======================================================
# ============= LOAD THE TRANING DATA FROM THE /data/training ===============
print(' ------- loading traning data ------ ')
x_train = load_traning_files(training_directory, traning_samples)
x_train3d = reshape_input(x_train, traning_samples, timesteps, features) # 256 samples x 100 timesteps x 1 features
print(' ------- loading traning output data ------ ')
y_train = load_traning_output_files(training_directory, traning_samples) # 256 samples x 10 classes
# ============= LOAD THE TEST DATA FROM THE /data/tests ===============
print(' ------- loading test data ------ ')
x_test = load_test_files(test_directory, test_samples) # 100 timesteps x 1 feature
x_test3d = reshape_input(x_test, test_samples, timesteps, features) # 10 samples x 100 timesteps x 1 feature
print(' ------- loading test output data ------ ')
y_test = load_test_output_files(test_directory, test_samples) # 10 classes x 10 samples
# ============= LOAD THE TEST DATA FROM THE /data/tests ===============
print(' ------- loading test data ------ ')
x_validation = load_validation_files(validation_directory, validation_samples) # 100 timesteps x 1 feature
x_validation3d = reshape_input(x_validation, validation_samples, timesteps, features) # 10 samples x 100 timesteps x 1 feature
print(' ------- loading test output data ------ ')
y_validation = load_validation_output_files(validation_directory, validation_samples) # 10 classes x 10 samples
# ==================== data analysis ===============
#print('--- X_train ---')
print( np.array(x_train) )
print( np.array(x_train).ndim )
#print(len(x_train))
#print('--- y_train ---')
#print(y_train)
#print(len(y_train))
# Make a histogram with 10 classes of the `labels` data
plt.hist( np.array(y_train), 10)
# Show the plot
plt.show()
# Make a histogram with 237 urls of the `labels` data
plt.hist( np.array(x_train), 237)
# Show the plot
plt.show()
# ==================== build model ===============
model = buildLSTMModel3(lstm_layer_size, dence_layer_size, timesteps, features)
traningLSTM1(model, batch_size, epochs, np.array(x_train3d), np.array(y_train), np.array(x_validation3d), np.array(y_validation), np.array(x_test3d), np.array(y_test) )
# ==================== Save the Model ===============
# save model to single file
# sudo pip install h5py
model.save(model_file)
| 2.359375 | 2 |
mean_teacher/cli.py | coma2441/LP-DeepSSL | 1 | 12761368 | # Copyright (c) 2018, Curious AI Ltd. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.`
#
# Changes were made by
# Authors: <NAME>, <NAME>, <NAME>, <NAME>. 2018.
import re
import argparse
import logging
from . import architectures, datasets
LOG = logging.getLogger('main')
__all__ = ['parse_cmd_args', 'parse_dict_args']
def create_parser():
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--dataset', metavar='DATASET', default='cifar10',
choices=datasets.__all__,
help='dataset: ' +
' | '.join(datasets.__all__) +
' (default: imagenet)')
parser.add_argument('--train-subdir', type=str, default='train+val',
help='the subdirectory inside the data directory that contains the training data')
parser.add_argument('--eval-subdir', type=str, default='test',
help='the subdirectory inside the data directory that contains the evaluation data')
parser.add_argument('--label-split', default=10, type=int, metavar='FILE',
help='list of image labels (default: based on directory structure)')
parser.add_argument('--exclude-unlabeled', default=False, type=str2bool, metavar='BOOL',
help='exclude unlabeled examples from the training set')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('--epochs', default=180, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=100, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--labeled-batch-size', default=None, type=int,
metavar='N', help="labeled examples per minibatch (default: no constrain)")
parser.add_argument('--lr', '--learning-rate', default=0.05, type=float,
metavar='LR', help='max learning rate')
parser.add_argument('--initial-lr', default=0.0, type=float,
metavar='LR', help='initial learning rate when using linear rampup')
parser.add_argument('--lr-rampup', default=0, type=int, metavar='EPOCHS',
help='length of learning rate rampup in the beginning')
parser.add_argument('--lr-rampdown-epochs', default=210, type=int, metavar='EPOCHS',
help='length of learning rate cosine rampdown (>= length of training)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--nesterov', default=True, type=str2bool,
help='use nesterov momentum', metavar='BOOL')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--ema-decay', default=0.999, type=float, metavar='ALPHA',
help='ema variable decay rate (default: 0.999)')
parser.add_argument('--consistency', default=None, type=float, metavar='WEIGHT',
help='use consistency loss with given weight (default: None)')
parser.add_argument('--consistency-type', default="mse", type=str, metavar='TYPE',
choices=['mse', 'kl'],
help='consistency loss type to use')
parser.add_argument('--consistency-rampup', default=5, type=int, metavar='EPOCHS',
help='length of the consistency loss ramp-up')
parser.add_argument('--logit-distance-cost', default=-1, type=float, metavar='WEIGHT',
help='let the student model have two outputs and use an MSE loss between the logits with the given weight (default: only have one output)')
parser.add_argument('--checkpoint-epochs', default=10, type=int,
metavar='EPOCHS', help='checkpoint frequency in epochs, 0 to turn checkpointing off (default: 1)')
parser.add_argument('--evaluation-epochs', default=1, type=int,
metavar='EPOCHS', help='evaluation frequency in epochs, 0 to turn evaluation off (default: 1)')
parser.add_argument('--print-freq', '-p', default=100, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', type=str2bool,
help='evaluate model on evaluation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--gpu-id', type=str, default='0',
help='gpu id')
parser.add_argument('--dfs-k', type=int, default=50,
help='diffusion k')
parser.add_argument('--fully-supervised', default=False, type=str2bool, metavar='BOOL',
help='is fully-supervised')
parser.add_argument('--isL2', default=True, type=str2bool, metavar='BOOL',
help='is l2 normalized features')
parser.add_argument('--num-labeled', type=int, default=1000,
help='number of labeled instances')
parser.add_argument('--test-mode', type=str, default='',
help='number of labeled instances')
parser.add_argument('--isMT', default=False, type=str2bool, metavar='BOOL',
help='is combined with mean teacher')
return parser
def parse_commandline_args():
return create_parser().parse_args()
def parse_dict_args(**kwargs):
def to_cmdline_kwarg(key, value):
if len(key) == 1:
key = "-{}".format(key)
else:
key = "--{}".format(re.sub(r"_", "-", key))
value = str(value)
return key, value
kwargs_pairs = (to_cmdline_kwarg(key, value)
for key, value in kwargs.items())
cmdline_args = list(sum(kwargs_pairs, ()))
LOG.info("Using these command line args: %s", " ".join(cmdline_args))
return create_parser().parse_args(cmdline_args)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str2epochs(v):
try:
if len(v) == 0:
epochs = []
else:
epochs = [int(string) for string in v.split(",")]
except:
raise argparse.ArgumentTypeError(
'Expected comma-separated list of integers, got "{}"'.format(v))
if not all(0 < epoch1 < epoch2 for epoch1, epoch2 in zip(epochs[:-1], epochs[1:])):
raise argparse.ArgumentTypeError(
'Expected the epochs to be listed in increasing order')
return epochs
| 1.9375 | 2 |
notebook/pnud_textanalysis/embedding.py | dlegor/textmining_pnud | 0 | 12761369 | <gh_stars>0
import pandas as pd
import numpy as np
import dask
from typing import Union,List,Any
import spacy
sp_nlp=spacy.load('es_core_news_md')
tokens_aux=sp_nlp('algo',disable=['entity','ner'])
nan_sentence=sp_nlp('Sin Comentario')
vector_aux=tokens_aux.vector
def vector_representation(str_vec:str)->np.ndarray:
tokens=sp_nlp(str_vec,disable=['entity','ner'])
if len(tokens.vector)==0:
return vector_aux
else:
return tokens.vector
def vector_courpus(frame=Union[pd.DataFrame,pd.Series,List[str]])->Any:
if isinstance(frame,pd.DataFrame):
assert frame.shape[1]==1,"The Shape must be (n_rows,1)"
frame_int=frame.to_list()
if isinstance(frame,pd.Series):
frame_int=frame.to_list()
if isinstance(frame, list):
frame_int=frame
L=[dask.delayed(vector_representation)(x) for x in frame_int]
L_out=dask.compute(*L)
return np.asarray(L_out)
def _similarity_pair(sentence1:str,sentence2:str)->float:
if len(sentence1.strip())<3:
sentence1='Sin Comentario'
if len(sentence2.strip())<3:
sentence2='Sin Comentario'
s1=sp_nlp(sentence1)
s2=sp_nlp(sentence2)
if s1.vector.size==0:
if s2.vector.size==0:
return 1.0
else:
s2.similarity(nan_sentence)
else:
return s1.similarity(s2)
| 2.28125 | 2 |
presigned-url.py | MarcoPolo/hydra-booster-infra | 0 | 12761370 | from sys import argv
import boto3
# unfortunately the AWS CLI cannot generate presigned S3 URLs for PutObject requests,
# so we have to do it with a proper AWS SDK
url = boto3.client('s3').generate_presigned_url(
ClientMethod='put_object',
Params={'Bucket': argv[1], 'Key': argv[2]},
ExpiresIn=3600
)
print(url)
| 2.5 | 2 |
Lintcode/G_Practice/Tag_LinkedList/134. LRU Cache.py | ctc316/algorithm-python | 0 | 12761371 | <filename>Lintcode/G_Practice/Tag_LinkedList/134. LRU Cache.py<gh_stars>0
class Node:
def __init__(self, key="", val=-1, prev=None, next=None):
self.key = key
self.val = val
self.prev = prev
self.next = next
class LRUCache:
"""
@param: capacity: An integer
"""
def __init__(self, capacity):
self.capacity = capacity
self.mapping = {}
self.head = None
self.tail = None
"""
@param: key: An integer
@return: An integer
"""
def get(self, key):
if key not in self.mapping:
return -1
node = self.mapping[key]
self.__moveToHead(node)
return node.val
"""
@param: key: An integer
@param: value: An integer
@return: nothing
"""
def set(self, key, value):
if key not in self.mapping:
if len(self.mapping) >= self.capacity:
self.__removeTail()
new_node = Node(key, value, None, self.head)
self.mapping[key] = new_node
if self.head:
self.head.prev = new_node
self.head = new_node
if self.tail is None:
self.tail = self.head
else:
node = self.mapping[key]
node.val = value
self.__moveToHead(node)
def __moveToHead(self, node):
if node is self.head:
return
if node.prev:
node.prev.next = node.next
if node.next:
node.next.prev = node.prev
if node is self.tail:
self.tail = node.prev
self.head.prev = node
node.next = self.head
self.head = node
def __removeTail(self):
if self.tail.prev:
self.tail.prev.next = None
del self.mapping[self.tail.key]
self.tail = self.tail.prev
| 3.46875 | 3 |
services/django_server/mydata/local_settings-example.py | aapris/MyData | 0 | 12761372 | <gh_stars>0
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*'] # TODO: to env
TIME_ZONE = 'Europe/Helsinki' # TODO: to env
FIRST_DAY_OF_WEEK = 1
MONTH_DAY_FORMAT = 'j F '
| 1.234375 | 1 |
simulations/simul_abm.py | wazaahhh/pgames | 0 | 12761373 | <reponame>wazaahhh/pgames<filename>simulations/simul_abm.py<gh_stars>0
import os
import numpy as np
from datetime import datetime
constants = {}
variables = {"M" : [9],
'd' : list(np.linspace(0.7,1,4)),
's' : [0]} # + list(np.linspace(0.005,0.060,12))
#variables = {"M" : [1,2,3,5,7,9],
# 'd' : list(np.linspace(0.1,1,10)),
# 's' : [0] + list(np.linspace(0.005,0.060,12))}
variableNames = ['M','d','s']
def formatCommand(variableNames,values, configFile="pgame.cfg"):
command = "./abm " + "".join(["-%s %s "%(variableNames[i],values[i]) for i in range(len(variableNames))])
command = command + " -c %s"%configFile
return command
def simul(configFile="pgame.cfg",dryRun=False):
counter = 0
for i,ix in enumerate(variables['M']):
for j,jx in enumerate(variables['d']):
for k,kx in enumerate(variables['s']):
counter += 1
command = formatCommand(variableNames,[ix,jx,kx],configFile=configFile)
print counter,ix,jx,kx, "command: ", command, " " , datetime.now()
if dryRun:
continue
os.system(command) | 2.171875 | 2 |
tests/test_attributes.py | pmeier/pytorch_testing_utils | 0 | 12761374 | <reponame>pmeier/pytorch_testing_utils
import itertools
import pytest
import torch
import pytorch_testing_utils as ptu
from .marks import skip_if_cuda_not_available
def test_assert_tensor_dtype_equal(subtests):
dtypes = (
torch.float32,
torch.float64,
torch.float16,
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.bool,
)
for dtype in dtypes:
tensor1 = torch.empty(1, dtype=dtype)
tensor2 = torch.empty(1, dtype=dtype)
with subtests.test(dtype1=dtype, dtype2=dtype):
ptu.assert_tensor_dtype_equal(tensor1, tensor2)
for dtype1, dtype2 in itertools.permutations(dtypes, 2):
tensor1 = torch.empty(1, dtype=dtype1)
tensor2 = torch.empty(1, dtype=dtype2)
with subtests.test(dtype1=dtype1, dtype2=dtype2):
with pytest.raises(AssertionError):
ptu.assert_tensor_dtype_equal(tensor1, tensor2)
def test_assert_tensor_layout_equal(subtests):
layouts = (torch.strided, torch.sparse_coo)
for layout in layouts:
tensor1 = torch.empty(1, layout=layout)
tensor2 = torch.empty(1, layout=layout)
with subtests.test(layout1=layout, layout2=layout):
ptu.assert_tensor_layout_equal(tensor1, tensor2)
for layout1, layout2 in itertools.permutations(layouts, 2):
tensor1 = torch.empty(1, layout=layout1)
tensor2 = torch.empty(1, layout=layout2)
with subtests.test(layout1=layout1, layout2=layout2):
with pytest.raises(AssertionError):
ptu.assert_tensor_layout_equal(tensor1, tensor2)
@skip_if_cuda_not_available
def test_assert_tensor_device_equal(subtests):
devices = (
torch.device("cpu"),
*[
torch.device("cuda", ordinal)
for ordinal in range(torch.cuda.device_count())
],
)
for device in devices:
tensor1 = torch.empty(1, device=device)
tensor2 = torch.empty(1, device=device)
with subtests.test(device1=device, device2=device):
ptu.assert_tensor_device_equal(tensor1, tensor2)
for device1, device2 in itertools.permutations(devices, 2):
tensor1 = torch.empty(1, device=device1)
tensor2 = torch.empty(1, device=device2)
with subtests.test(device1=device1, device2=device2):
with pytest.raises(AssertionError):
ptu.assert_tensor_device_equal(tensor1, tensor2)
# FIXME: Enable this when ptu.assert_tensor_memory_format_equal is implemented.
# def test_assert_tensor_memory_format_equal(subtests):
# memory_formats = (
# torch.contiguous_format,
# torch.channels_last,
# torch.preserve_format,
# )
#
# for memory_format in memory_formats:
# tensor1 = torch.empty(1, memory_format=memory_format)
# tensor2 = torch.empty(1, memory_format=memory_format)
# with subtests.test(memory_format1=memory_format, memory_format2=memory_format):
# ptu.assert_tensor_memory_format_equal(tensor1, tensor2)
#
# for memory_format1, memory_format2 in itertools.permutations(memory_formats, 2):
# tensor1 = torch.empty(1, memory_format=memory_format1)
# tensor2 = torch.empty(1, memory_format=memory_format2)
# with subtests.test(
# memory_format1=memory_format1, memory_format2=memory_format2
# ):
# with pytest.raises(AssertionError):
# ptu.assert_tensor_memory_format_equal(tensor1, tensor2)
| 2.1875 | 2 |
minigest/magazzino/serializers/inventario_prodotto.py | ctrlmaniac/minigest | 0 | 12761375 | <reponame>ctrlmaniac/minigest<filename>minigest/magazzino/serializers/inventario_prodotto.py
from rest_framework import serializers
from ..models import InventarioProdotto
class InventarioProdottoSerializer(serializers.ModelSerializer):
prodotto_nome = serializers.CharField(source="prodotto.nome", read_only=True)
class Meta:
model = InventarioProdotto
fields = [
"id",
"inventario",
"prodotto",
"prodotto_nome",
"quantita",
"totale_acquisti",
"venduto",
"giacenza_precedente",
]
read_only_fields = ["prodotto_nome", "venduto"]
| 1.828125 | 2 |
src/folderconstants.py | imperial-qore/TranAD | 31 | 12761376 | # Data folders
output_folder = 'processed'
data_folder = 'data'
| 1.023438 | 1 |
tests/mkldnnpy_bench_tests/test_conv_bench.py | mingxiaoh/chainer-v3 | 0 | 12761377 | <reponame>mingxiaoh/chainer-v3
import chainer.links as L
import numpy as np
import time
from chainer import Variable
batch = 1
total_backward = 0
total_forward = 0
count = 0
niter = 13
n_dry = 3
data = np.ndarray((batch, 3, 224, 224), dtype=np.float32)
data.fill(333.33)
y_grad = np.ones((batch, 64, 112, 112), dtype=np.float32)
conv = L.Convolution2D(3, 64, 7, stride=2, pad=3)
x = Variable(data)
for i in range(niter):
print("iter:", i)
start = time.time()
y = conv(x)
end = time.time()
if i > n_dry - 1:
count += 1
total_forward += (end-start)*1000
y.grad = y_grad
start = time.time()
y.backward()
end = time.time()
if i > n_dry - 1:
total_backward += (end-start)*1000
print("Average Forward: ", total_forward/count, "ms")
print("Average Backward: ", total_backward/count, "ms")
print("Average Total: ", (total_forward + total_backward)/count, "ms")
| 2.96875 | 3 |
indy_node/test/anon_creds/test_revoc_def_static_validation.py | brentzundel/indy-node | 2 | 12761378 | import copy
import json
import random
import pytest
from indy_common.constants import REVOC_TYPE, TAG, TAG_LIMIT_SIZE
from plenum.common.constants import GENERAL_LIMIT_SIZE, REQNACK, REJECT
from plenum.common.types import OPERATION
from plenum.common.util import randomString
from plenum.test.helper import sdk_sign_request_from_dict, sdk_send_signed_requests, sdk_get_replies
@pytest.fixture(scope="module", params=['lt', 'eq', 'gt'])
def _lt_eq_gt(request):
return request.param
@pytest.fixture(scope="module", params=[REVOC_TYPE, TAG])
def _res_field_size(request, _lt_eq_gt):
_field = request.param
_expected = REQNACK if _lt_eq_gt == 'gt' else REJECT
_valid_size = TAG_LIMIT_SIZE if _field == TAG else GENERAL_LIMIT_SIZE
if _lt_eq_gt == 'lt':
return _expected, _field, random.randint(0, _valid_size - 1)
if _lt_eq_gt == 'eq':
return _expected, _field, _valid_size
return _expected, _field, random.randint(_valid_size + 1, 2 * _valid_size)
@pytest.fixture(scope="module")
def revoc_def_req(looper,
sdk_wallet_steward,
build_revoc_def_by_default,
_res_field_size):
_expected, _field, _size = _res_field_size
_req = copy.deepcopy(build_revoc_def_by_default)
_req[OPERATION][_field] = randomString(_size)
return _expected, sdk_sign_request_from_dict(looper, sdk_wallet_steward, _req['operation'])
def test_revoc_def_static_validation_on_field_size(revoc_def_req,
looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_steward):
_expected, _req = revoc_def_req
results = sdk_send_signed_requests(sdk_pool_handle, [json.dumps(_req)])
_reply = sdk_get_replies(looper, results)[0][1]
assert _expected == _reply['op']
| 1.929688 | 2 |
sHAM_package/sHAM/old/uecqs.py | giosumarin/sHAM | 7 | 12761379 | <gh_stars>1-10
import numpy as np
from numpy import errstate, inf
from sHAM import weightsharing, uweightsharing, stochastic
import gc
def ECSQ(weights_to_quantize, k, wanted_clusters, lambd=0.5, tr=0.001):
J_last = inf
stacked = [np.hstack(w) for w in weights_to_quantize]
vect_weights = np.concatenate(stacked, axis=None)
dim = len(vect_weights)
w_split = np.array_split(np.sort(vect_weights), k)
c = np.array([np.mean(w) for w in w_split]).reshape(-1,1)
p = np.array([w.size/dim for w in w_split]).reshape(-1,1)
dim_weights = [w.shape for w in weights_to_quantize]
idx_layers = [np.zeros_like(w, dtype='int16') for w in weights_to_quantize]
stacked_idx = [np.hstack(idx) for idx in idx_layers]
vect_idx = np.concatenate(stacked_idx, axis=None)
shift = 0
for i in range(len(w_split)):
vect_idx[shift:shift+len(w_split[i])] = i
shift += len(w_split[i])
while True:
J = 0
for i, elem in enumerate(vect_weights):
with errstate(divide='ignore'):
j_t = np.square(np.abs(elem-c)) - lambd*np.log(p)
l = np.argmin(j_t)
vect_idx[i] = l
J += j_t[l]/dim
for i in range(len(c)):
c[i] = np.mean(vect_weights[vect_idx == i]) if len(vect_idx[vect_idx == i]) != 0 else -inf
p[i] = len(vect_idx[vect_idx == i])/dim
if J_last - J <= tr or wanted_clusters >= len(c[c!=-inf]):
break
J_last = J
new_vect_idx = np.copy(vect_idx)
for i_c in range(len(c)):
if c[i_c] == -inf:
new_vect_idx[vect_idx >= i_c] -= 1
c = (c[(c != -inf)].reshape(-1,1))
print(len(c))
idx_layers = []
for row, col in dim_weights:
idx_layers.append((new_vect_idx[:row*col]).reshape(row,col))
new_vect_idx = new_vect_idx[row*col:]
return c, idx_layers
class uECQS_NN(uweightsharing.uWeightsharing_NN):
def __init__(self, model, clusters_for_dense_layers, wanted_clusters, index_first_dense, tr=0.001, lamb=0.5, apply_compression_bias=False, div=None):
self.model = model
self.clusters = clusters_for_dense_layers
self.index_first_dense = index_first_dense
self.lamb = lamb
self.tr = tr
self.wanted_clusters = wanted_clusters
if div:
self.div=div
else:
self.div = 1 if apply_compression_bias else 2
def apply_uECQS(self, list_trainable=None, untrainable_per_layers=None):
if not list_trainable:
list_weights = self.model.get_weights()
else:
list_weights=[]
for w in (list_trainable):
list_weights.append(w.numpy())
d = self.index_first_dense
weights_to_quantize = [list_weights[i] for i in range (d, len(list_weights), self.div)]
self.centers, self.idx_layers = ECSQ(weights_to_quantize, self.clusters, lambd=self.lamb, tr=self.tr, wanted_clusters=self.wanted_clusters)
self.clusters = len(self.centers)
if not list_trainable:
self.untrainable_per_layers = 0
self.model.set_weights(self.recompose_weight(list_weights))
else:
self.untrainable_per_layers = untrainable_per_layers
self.model.set_weights(self.recompose_weight(list_weights, True, untrainable_per_layers))
gc.collect()
| 1.578125 | 2 |
directory/web/migrations/0001_initial.py | ocwc/directory | 0 | 12761380 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('name', models.CharField(max_length=255)),
('iso_code', models.CharField(max_length=6)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GeneralExpertise',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('name', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MOOCExpertise',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('name', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OERExpertise',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('name', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OpenAccessExpertise',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('name', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('alternative_contact', models.CharField(max_length=255, blank=True)),
('slug', models.SlugField(max_length=255)),
('job_title', models.CharField(max_length=255, blank=True)),
('institution', models.CharField(max_length=255, blank=True)),
('is_member', models.CharField(choices=[(0, "Don't know"), (1, 'Yes'), (2, 'No')], max_length=10, verbose_name='Open Education Consortium member?')),
('city', models.CharField(max_length=255, blank=True)),
('state_province', models.CharField(max_length=255, blank=True)),
('language_native', models.TextField(blank=True, verbose_name='Native/near native level')),
('language_business', models.TextField(blank=True, verbose_name='Business level')),
('language_conversational', models.TextField(blank=True, verbose_name='Conversational')),
('general_expertise_other', models.TextField(max_length=255, blank=True, verbose_name='Other, please indicate')),
('oer_expertise_other', models.TextField(blank=True, verbose_name='Other, please indicate:')),
('openacess_expertise_other', models.TextField(blank=True, verbose_name='Other, please indicate:')),
('mooc_expertise_other', models.TextField(blank=True)),
('discipline', models.TextField(blank=True, verbose_name='If you have expertise with open education in a particular discipline, please indicate:')),
('personal_statement', models.TextField(blank=True)),
('external_links', models.TextField(blank=True)),
('pub_date', models.DateTimeField(auto_now_add=True)),
('mod_date', models.DateTimeField(auto_now=True)),
('visible', models.BooleanField(default=True)),
('country', models.ForeignKey(to='web.Country', null=True)),
('general_expertise', models.ManyToManyField(to='web.GeneralExpertise', verbose_name='Open Education - General', null=True)),
('mooc_expertise', models.ManyToManyField(to='web.MOOCExpertise', verbose_name='If you have expertise with open education in a particular discipline, please indicate:', null=True)),
('oer_expertise', models.ManyToManyField(to='web.OERExpertise', verbose_name='Open Educational Resources', null=True)),
('openacess_expertise', models.ManyToManyField(to='web.OpenAccessExpertise', verbose_name='MOOCs', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('name', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='person',
name='region',
field=models.ManyToManyField(to='web.Region', verbose_name='Please select the geographic regions in which you have professional experience:*'),
preserve_default=True,
),
]
| 1.796875 | 2 |
StRoot/StSpinPool/StChargedPionAnalysisMaker/scripts/plots/old.py | xiaohaijin/RHIC-STAR | 2 | 12761381 | import os
import math
from glob import glob
import ROOT
import analysis
run5_tree_dir = '/Users/kocolosk/data/run5/tree'
run5_hist_dir = '/Users/kocolosk/data/run5/hist'
run6_tree_dir = '/Users/kocolosk/data/run6/tree'
run6_hist_dir = '/Users/kocolosk/data/run6/hist'
run5_scalar_path = os.environ['STAR'] + '/StRoot/StSpinPool/StTamuRelLum/inputs/run5.txt'
run6_scalar_path = os.environ['STAR'] + '/StRoot/StSpinPool/StTamuRelLum/inputs/run6.txt'
def spin2006_asymmetries():
"""asymmetries for charged pion production shown at SPIN 2006"""
asym_plus = analysis.AsymmetryGenerator('asym_plus')
asym_minus = analysis.AsymmetryGenerator('asym_minus')
runlist = analysis.asym.golden_runlist_c
scalar_path = os.environ['STAR'] + '/StRoot/StSpinPool/StTamuRelLum/inputs/run5.txt'
scalars = analysis.ScalarCounts(scalar_path)
polarizations = analysis.Polarizations.Online
theory = analysis.asym.theoryCurves()
plusGraphs = [ theory.getGraph('plus',key) for key in ('std','zero','max','min') ]
minusGraphs = [ theory.getGraph('minus',key) for key in ('std','zero','max','min') ]
## systematic uncertainties
baseline = -0.1
syst_x = [3.0, 5.0, 7.0, 9.0]
syst = {'plus': [7.3, 8.4, 7.5, 5.1], 'minus': [5.7, 6.0, 5.7, 7.1] }
systGraph = {'plus': ROOT.TGraph(len(syst_x)+3), 'minus': ROOT.TGraph(len(syst_x)+3) }
for charge in ('plus','minus'):
systGraph[charge].SetPoint(0, 3.0, baseline)
systGraph[charge].SetPoint(5, 9.0, baseline)
systGraph[charge].SetPoint(6, 3.0, baseline)
for i,val in enumerate(syst[charge]):
systGraph[charge].SetPoint(i+1, syst_x[i], baseline + (val/1000.0))
## generate the asymmetries
allFiles = glob(run5_hist_dir + '/chargedPions_*.hist.root')
for fname in allFiles:
run = analysis.getRun(fname)
if runlist is None or run in runlist:
print fname, run
tfile = ROOT.TFile(fname)
mgr = analysis.HistogramManager(tfile,['pt'])
try:
bin7 = scalars[str(run) + '-5-7']
bin8 = scalars[str(run) + '-5-8']
bin9 = scalars[str(run) + '-5-9']
except KeyError:
print run, 'is not in the scalars database'
continue
uu = bin7.uu + bin8.uu + bin9.uu
ud = bin7.ud + bin8.ud + bin9.ud
du = bin7.du + bin8.du + bin9.du
dd = bin7.dd + bin8.dd + bin9.dd
try:
pol = polarizations[bin7.fill]
except KeyError:
print fill, 'has no online polarization values'
asym_plus.FillFromHistogramManager(mgr, 'jetpatch', 1, uu, ud, du, dd, pol.py, pol.pb)
asym_minus.FillFromHistogramManager(mgr, 'jetpatch', -1, uu, ud, du, dd, pol.py, pol.pb)
tfile.Close()
## fun with graphics
h1 = asym_plus.GetAsymmetry('ll')
g1 = ROOT.TGraphErrors(h1)
h2 = asym_minus.GetAsymmetry('ll')
g2 = ROOT.TGraphErrors(h2)
## set numbers to exactly the prelim result
h1.SetBinContent(1, -0.0125)
h1.SetBinContent(2, 0.0297)
h1.SetBinContent(3, 0.0155)
h1.SetBinContent(4, -0.0371)
h1.SetBinError(1, 0.0054)
h1.SetBinError(2, 0.0135)
h1.SetBinError(3, 0.0263)
h1.SetBinError(4, 0.0477)
h2.SetBinContent(1, -0.0048)
h2.SetBinContent(2, -0.0247)
h2.SetBinContent(3, -0.0551)
h2.SetBinContent(4, 0.0140)
h2.SetBinError(1, 0.0056)
h2.SetBinError(2, 0.0142)
h2.SetBinError(3, 0.0278)
h2.SetBinError(4, 0.0512)
g1 = ROOT.TGraphErrors(h1)
g2 = ROOT.TGraphErrors(h2)
## ignore bin width errors
for gr in (g1,g2):
for point in range(gr.GetN()):
gr.SetPointError(point, 0.0, gr.GetErrorY(point))
line = ROOT.TLine(2.0, 0.0, 10.0, 0.0)
line.SetLineStyle(2)
latex = ROOT.TLatex()
#leg = ROOT.TLegend(0.13, 0.65, 0.35, 0.88)
#leg.SetFillStyle(0)
#leg.SetBorderSize(0)
#leg.AddEntry(plusGraphs[0],' GRSV-STD', 'l')
#leg.AddEntry(plusGraphs[1],' #Delta G = 0', 'l')
#leg.AddEntry(plusGraphs[2],' #Delta G = G', 'l')
#leg.AddEntry(plusGraphs[3],' #Delta G = -G', 'l')
bg = ROOT.TH1D(h1)
bg.Reset()
bg.SetYTitle(' A_{LL}')
bg.GetYaxis().SetRangeUser(-0.11, 0.11)
## pi-plus
c1 = ROOT.TCanvas('c1','A_{LL} for #pi^{+}', 1060, 800)
bg.SetXTitle('#pi^{+} P_{T} (GeV/c)')
bg.DrawCopy()
g1.SetMarkerSize(0.9);
g1.SetMarkerStyle(21)
g1.Draw('p')
[ g.Draw('l') for g in plusGraphs ]
systGraph['plus'].SetLineColor(1)
systGraph['plus'].SetFillColor(15)
systGraph['plus'].Draw('fl')
line.Draw('same')
#leg.Draw('p')
latex.DrawLatex(2.3,0.12," #vec{p} + #vec{p} #rightarrow #pi^{+} + X at #sqrt{s}=200 GeV \
-1< #eta^{#pi}< 1 ")
latex.DrawLatex(2.6,-0.07,"2005 STAR Preliminary");
## pi-minus
c2 = ROOT.TCanvas('c2','A_{LL} for #pi^{-}', 1060, 800)
bg.SetXTitle('#pi^{-} P_{T} (GeV/c)')
bg.DrawCopy()
g2.SetMarkerSize(0.9);
g2.SetMarkerStyle(20)
g2.Draw('p')
[ g.Draw('l') for g in minusGraphs ]
systGraph['minus'].SetLineColor(1)
systGraph['minus'].SetFillColor(15)
systGraph['minus'].Draw('fl')
line.Draw('same')
#leg.Draw('p')
latex.DrawLatex(2.3,0.12," #vec{p} + #vec{p} #rightarrow #pi^{-} + X at #sqrt{s}=200 GeV \
-1< #eta^{#pi}< 1 ")
latex.DrawLatex(2.6,-0.07,"2005 STAR Preliminary")
## add the new predictions
from analysis.asym import theoryCurves
plusGraphs2 = [
theoryCurves(analysis.asym.werner_plus_dss_cteqm5_std, analysis.xsec.werner_plus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_plus_dss_cteqm5_zero, analysis.xsec.werner_plus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_plus_dss_cteqm5_max, analysis.xsec.werner_plus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_plus_dss_cteqm5_min, analysis.xsec.werner_plus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_plus_dss_cteqm5_gsc, analysis.xsec.werner_plus_dss_cteqm5_pt).getGraph()
]
minusGraphs2 = [
theoryCurves(analysis.asym.werner_minus_dss_cteqm5_std, analysis.xsec.werner_minus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_minus_dss_cteqm5_zero, analysis.xsec.werner_minus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_minus_dss_cteqm5_max, analysis.xsec.werner_minus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_minus_dss_cteqm5_min, analysis.xsec.werner_minus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_minus_dss_cteqm5_gsc, analysis.xsec.werner_minus_dss_cteqm5_pt).getGraph()
]
leg = ROOT.TLegend(0.13, 0.65, 0.35, 0.88)
leg.SetFillStyle(0)
leg.SetBorderSize(0)
leg.AddEntry(plusGraphs2[0],' GRSV-STD', 'l')
leg.AddEntry(plusGraphs2[1],' #Delta G = 0', 'l')
leg.AddEntry(plusGraphs2[2],' #Delta G = G', 'l')
leg.AddEntry(plusGraphs2[3],' #Delta G = -G', 'l')
leg.AddEntry(plusGraphs2[4],' GS Set C', 'l')
for grList in (plusGraphs2, minusGraphs2):
grList[1].SetLineColor(ROOT.kBlue)
grList[2].SetLineColor(ROOT.kRed)
grList[3].SetLineColor(ROOT.kGreen)
grList[4].SetLineColor(ROOT.kMagenta)
for gr in grList:
gr.SetLineWidth(3)
for grList in (plusGraphs, minusGraphs):
for gr in grList:
gr.SetLineStyle(2)
c1.cd()
leg.Draw('p')
[ g.Draw('l') for g in plusGraphs2 ]
latex.DrawLatex(4.8, 0.09, 'solid: DSS dashed: mod. KKP')
c2.cd()
leg.Draw('p')
[ g.Draw('l') for g in minusGraphs2 ]
latex.DrawLatex(4.8, 0.09, 'solid: DSS dashed: mod. KKP')
raw_input('wait here:')
c1.Print('.gif')
c2.Print('.gif')
def asymmetries_for_publication_run5(runlist=None):
"""final results for inclusive asymmetries from Run 5"""
asym_plus = analysis.AsymmetryGenerator('asym_plus', key='pt')
asym_minus = analysis.AsymmetryGenerator('asym_minus', key='pt')
scalar_path = os.environ['STAR'] + '/StRoot/StSpinPool/StTamuRelLum/inputs/run5.txt'
scalars = analysis.ScalarCounts(scalar_path)
polarizations = analysis.Polarizations.Final
#theory = analysis.asym.theoryCurves()
#plusGraphs = [ theory.getGraph('plus',key) for key in ('std','zero','max','min') ]
#minusGraphs = [ theory.getGraph('minus',key) for key in ('std','zero','max','min') ]
from analysis.asym import theoryCurves
plusGraphs = [
theoryCurves(analysis.asym.werner_plus_dss_cteqm5_std, analysis.xsec.werner_plus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_plus_dss_cteqm5_zero, analysis.xsec.werner_plus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_plus_dss_cteqm5_max, analysis.xsec.werner_plus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_plus_dss_cteqm5_min, analysis.xsec.werner_plus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_plus_dss_cteqm5_gsc, analysis.xsec.werner_plus_dss_cteqm5_pt).getGraph()
]
minusGraphs = [
theoryCurves(analysis.asym.werner_minus_dss_cteqm5_std, analysis.xsec.werner_minus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_minus_dss_cteqm5_zero, analysis.xsec.werner_minus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_minus_dss_cteqm5_max, analysis.xsec.werner_minus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_minus_dss_cteqm5_min, analysis.xsec.werner_minus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_minus_dss_cteqm5_gsc, analysis.xsec.werner_minus_dss_cteqm5_pt).getGraph()
]
for grList in (plusGraphs, minusGraphs):
grList[1].SetLineStyle(3)
grList[1].SetLineColor(ROOT.kBlue)
grList[2].SetLineStyle(4)
grList[2].SetLineColor(ROOT.kRed)
grList[3].SetLineStyle(2)
grList[3].SetLineColor(ROOT.kGreen)
grList[4].SetLineStyle(5)
grList[4].SetLineColor(ROOT.kMagenta)
for gr in grList:
gr.SetLineWidth(3)
## systematic uncertainties
baseline = -0.1
syst_x = [3.0, 5.0, 7.0, 9.0]
## preliminary result
#syst = {'plus': [7.3, 8.4, 7.5, 5.1], 'minus': [5.7, 6.0, 5.7, 7.1] }
## assuming pT dependence in PID background
#syst = {'plus': [2.26, 2.99, 4.59, 10.26], 'minus': [1.05, 1.66, 7.07, 12.41] }
syst = {}
tmp = systematic_uncertainty_run5('plus')
syst['plus'] = [1000*elem for elem in tmp]
tmp = systematic_uncertainty_run5('minus')
syst['minus'] = [1000*elem for elem in tmp]
systGraph = {'plus': ROOT.TGraph(len(syst_x)+3), 'minus': ROOT.TGraph(len(syst_x)+3) }
for charge in ('plus','minus'):
systGraph[charge].SetPoint(0, 3.0, baseline)
systGraph[charge].SetPoint(5, 9.0, baseline)
systGraph[charge].SetPoint(6, 3.0, baseline)
for i,val in enumerate(syst[charge]):
systGraph[charge].SetPoint(i+1, syst_x[i], baseline + (val/1000.0))
## generate the asymmetries
allFiles = glob(run5_hist_dir + '/chargedPions_*.hist.root')
for fname in allFiles:
run = analysis.getRun(fname)
if runlist is None or run in runlist:
print fname, run
tfile = ROOT.TFile(fname)
mgr = analysis.HistogramManager(tfile,['pt', 'pt_near', 'pt_away'])
try:
bin7 = scalars[str(run) + '-5-7']
bin8 = scalars[str(run) + '-5-8']
bin9 = scalars[str(run) + '-5-9']
except KeyError:
print run, 'is not in the scalars database'
continue
uu = bin7.uu + bin8.uu + bin9.uu
ud = bin7.ud + bin8.ud + bin9.ud
du = bin7.du + bin8.du + bin9.du
dd = bin7.dd + bin8.dd + bin9.dd
try:
pol = polarizations[bin7.fill]
except KeyError:
print bin7.fill, 'has no final polarization values'
continue
#asym_plus.FillFromHistogramManager(mgr, 'alltrigs', 1, uu, ud, du, dd, pol.py, pol.pb)
#asym_minus.FillFromHistogramManager(mgr, 'alltrigs', -1, uu, ud, du, dd, pol.py, pol.pb)
asym_plus.FillFromHistogramManager(mgr, 'jetpatch', 1, uu, ud, du, dd, pol.py, pol.pb)
asym_minus.FillFromHistogramManager(mgr, 'jetpatch', -1, uu, ud, du, dd, pol.py, pol.pb)
tfile.Close()
## fun with graphics
h1 = asym_plus.GetAsymmetry('ll')
g1 = ROOT.TGraphErrors(h1)
h2 = asym_minus.GetAsymmetry('ll')
g2 = ROOT.TGraphErrors(h2)
## ignore bin width errors
for gr in (g1,g2):
for point in range(gr.GetN()):
gr.SetPointError(point, 0.0, gr.GetErrorY(point))
line = ROOT.TLine(2.0, 0.0, 10.0, 0.0)
line.SetLineStyle(2)
latex = ROOT.TLatex()
leg = ROOT.TLegend(0.13, 0.62, 0.40, 0.89)
leg.SetFillStyle(0)
leg.SetBorderSize(0)
leg.AddEntry(plusGraphs[0],' GRSV-std', 'l')
leg.AddEntry(plusGraphs[2],' GRSV #Delta g = g input', 'l')
leg.AddEntry(plusGraphs[1],' GRSV #Delta g = 0 input', 'l')
leg.AddEntry(plusGraphs[3],' GRSV #Delta g = -g input', 'l')
leg.AddEntry(plusGraphs[4],' GS Set C', 'l')
bg = ROOT.TH1D(h1)
bg.Reset()
bg.SetYTitle(' A_{LL}')
bg.GetYaxis().SetRangeUser(-0.08, 0.06)
## combo plot
c3 = ROOT.TCanvas('c3', 'A_{LL} combined', 0, 0, 800, 350)
#c3 = ROOT.TCanvas('c3', 'A_{LL} combined', 0, 0, 350, 600)
c3.Draw()
titlepad = ROOT.TPad('titlepad', '', 0.0, 0.9, 1.0, 1.0)
leftpad = ROOT.TPad('leftpad','', 0.0, 0.0, 0.5, 1.0)
leftpad.SetLeftMargin(0.14)
leftpad.SetRightMargin(0.02)
#leftpad = ROOT.TPad('top','', 0.0, 0.45, 1.0, 0.9)
#leftpad.SetTopMargin(0.14)
#leftpad.SetRightMargin(0.02)
rightpad = ROOT.TPad('rightpad','', 0.5, 0.0, 1.0, 1.0)
rightpad.SetLeftMargin(0.11)
rightpad.SetRightMargin(0.05)
#rightpad = ROOT.TPad('bottom','', 0.0, 0.0, 1.0, 0.45)
#rightpad.SetLeftMargin(0.11)
#rightpad.SetRightMargin(0.05)
for pad in (titlepad, leftpad, rightpad):
pad.Draw()
pad.SetFillColor(10)
pad.SetBorderMode(0)
pad.SetFillStyle(4000) ## make it transparent
#leg2 = ROOT.TLegend(0.16, 0.62, 0.54, 0.90)
leg2 = ROOT.TLegend(0.16, 0.12, 0.6, 0.45)
leg2.SetFillStyle(0)
leg2.SetBorderSize(0)
leg2.AddEntry(plusGraphs[0],' GRSV-std', 'l')
leg2.AddEntry(plusGraphs[2],' GRSV #Deltag = g input', 'l')
leg2.AddEntry(plusGraphs[1],' GRSV #Deltag = 0 input', 'l')
leg2.AddEntry(plusGraphs[3],' GRSV #Deltag = -g input', 'l')
leg2.AddEntry(plusGraphs[4],' GS Set C', 'l')
titlepad.cd()
latex.SetTextSize(0.7)
latex.SetTextAlign(21)
latex.DrawLatex(0.5,0.3,"STAR #vec{p} + #vec{p} #rightarrow #pi + X at #sqrt{s}=200 GeV \
|#eta_{#pi}|<1.0")
leftpad.cd()
bg.SetXTitle('')
bg.SetYTitle('A_{LL}')
bg.GetYaxis().SetTitleSize(0.05)
bg.GetYaxis().SetTitleOffset(1.22)
bg.DrawCopy()
g2.SetMarkerSize(0.9);
g2.SetMarkerStyle(20)
g2.Draw('p')
[ g.Draw('l') for g in minusGraphs ]
systGraph['minus'].SetLineColor(1)
systGraph['minus'].SetFillColor(12)
systGraph['minus'].Draw('fl')
line.Draw('same')
leg2.Draw('p')
latex.SetTextSize(0.2)
latex.SetTextAlign(21)
#latex.DrawLatex(4.0,-0.075,'#pi^{-}')
latex.DrawLatex(3.1,0.03,'#pi^{-}')
rightpad.cd()
bg.SetXTitle('#pi P_{T} [GeV/c]')
bg.SetYTitle('')
bg.DrawCopy()
g1.SetMarkerSize(0.9);
g1.SetMarkerStyle(21)
g1.Draw('p')
[ g.Draw('l') for g in plusGraphs ]
systGraph['plus'].SetLineColor(1)
systGraph['plus'].SetFillColor(12)
systGraph['plus'].Draw('fl')
line.Draw('same')
#latex.DrawLatex(4.0,-0.075,'#pi^{+}')
latex.DrawLatex(3.1,0.03,'#pi^{+}')
'pi-plus fit to pol0'
h1.Fit('pol0', 'N', '', 2.0, 10.0)
'pi-minus fit to pol0'
h2.Fit('pol0', 'N', '', 2.0, 10.0)
for h in (h1,h2):
print h.GetName()
for i in range(h.GetNbinsX()):
print 'y=% .2e, stat=%.2e' % (h.GetBinContent(i+1), h.GetBinError(i+1))
raw_input('wait here:')
c3.Print('.eps')
def jet_correlations_run5(runlist=None):
"""3-D deta-dphi plot -- possible paper plot at one time. \
Also plots the uncorrected pion momentum fraction for near-side
and away-side
"""
style = ROOT.TStyle(ROOT.gStyle)
style.SetOptStat(0)
style.SetLabelOffset(-0.01,'xy')
style.SetLabelSize(0.035,'xy')
style.SetTitleOffset(1.2,'y')
style.cd()
h1 = None
h2 = None
h3 = None
## silly hack
keepMeOpen = []
allFiles = glob(run5_hist_dir + '/chargedPions_*.hist.root')
for fname in allFiles:
run = analysis.getRun(fname)
if runlist is None or run in runlist:
print fname, run
tfile = ROOT.TFile(fname)
mgr = analysis.HistogramManager(tfile,['dphi_deta', 'z', 'z_away'])
if h1 is None:
h1 = mgr['anyspin']['jetpatch'].tracks_sum['dphi_deta'].Clone()
h2 = mgr['anyspin']['jetpatch'].tracks_sum['z'].Clone()
h3 = mgr['anyspin']['jetpatch'].tracks_sum['z_away'].Clone()
keepMeOpen.append(tfile)
else:
h1.Add(mgr['anyspin']['jetpatch'].tracks_sum['dphi_deta'])
h2.Add(mgr['anyspin']['jetpatch'].tracks_sum['z'])
h3.Add(mgr['anyspin']['jetpatch'].tracks_sum['z_away'])
c1 = ROOT.TCanvas('c1')
c1.SetLogz()
h1.SetXTitle('#phi pion - #phi jet')
h1.SetYTitle('#eta pion - #eta jet')
h1.GetYaxis().SetRangeUser(-2.0, 0.4)
h1.DrawCopy('lego2')
#reset some styles
style.SetLabelOffset(0.005,'xy')
style.SetLabelSize(0.04,'xy')
style.SetTitleOffset(1,'y')
c2 = ROOT.TCanvas('c2')
h2.FitSlicesY()
fig3c_mean = ROOT.gDirectory.Get('%s_1' % (h2.GetName(),))
fig3c_mean.SetTitle('Uncorrected pion momentum fraction')
fig3c_mean.SetXTitle('#pi p_{T} [GeV/c]')
fig3c_mean.SetYTitle('< z >')
fig3c_mean.SetAxisRange(0,1,'y')
fig3c_mean.SetMarkerStyle(21)
h3.FitSlicesY()
fig3c_away_mean = ROOT.gDirectory.Get('%s_1' % (h3.GetName(),))
fig3c_away_mean.SetMarkerStyle(25)
fig3c_away_mean.SetMarkerColor(ROOT.kRed)
fig3c_away_mean.SetLineColor(ROOT.kRed)
leg = ROOT.TLegend(0.75,0.8,0.89,0.89)
leg.AddEntry(fig3c_mean,'dR < 0.4','p')
leg.AddEntry(fig3c_away_mean,'dR > 1.5','p')
fig3c_mean.DrawCopy()
fig3c_away_mean.Draw('same')
leg.Draw('same')
#c3 = ROOT.TCanvas('c3')
#h3.Draw()
c4 = ROOT.TCanvas('combo plot')
mainpad = ROOT.TPad('mainpad', '', 0.0, 0.0, 1.0, 1.0)
mainpad.SetLeftMargin(0.1)
mainpad.SetRightMargin(0.03)
mainpad.SetTopMargin(0.05)
mainpad.SetBottomMargin(0.1)
#mainpad.SetTicky()
insetpad = ROOT.TPad('insetpad', '', 0.52, 0.13, 0.97, 0.6)
insetpad.SetLogz()
#insetpad.SetLeftMargin(0.)
#insetpad.SetRightMargin(0.)
#insetpad.SetTopMargin(0.)
#insetpad.SetBottomMargin(0.)
for pad in (mainpad, insetpad):
pad.Draw()
pad.SetFillColor(10)
pad.SetBorderMode(0)
pad.SetFillStyle(4000) ## make it transparent
mainpad.cd()
fig3c_mean.SetTitle('')
fig3c_mean.GetYaxis().SetRangeUser(0., 0.7)
fig3c_mean.Draw()
fig3c_away_mean.Draw('same')
insetpad.cd()
h1.GetXaxis().SetTitle('#Delta #phi')
h1.GetXaxis().SetTitleSize(0.1)
h1.GetXaxis().SetLabelSize(0.06)
h1.GetYaxis().SetTitle('#Delta #eta')
h1.GetYaxis().SetTitleSize(0.1)
h1.GetYaxis().SetLabelSize(0.06)
## this is temporary till we get the cuts right
#h1.GetZaxis().SetRangeUser(10, 600000)
#h1.GetYaxis().SetRangeUser(-1.5, 0.4)
h1.GetYaxis().SetRangeUser(-1.7, 0.7)
h1.DrawCopy('lego2')
#h1.DrawCopy('col z')
mainpad.cd()
leg2 = ROOT.TLegend(0.15, 0.78, 0.45, 0.92)
leg2.AddEntry(fig3c_mean,'trigger jet','p')
leg2.AddEntry(fig3c_away_mean,'away-side jet','p')
leg2.Draw()
raw_input('wait here:')
def jet_correlations_run6():
"""3-D deta-dphi plot -- possible paper plot at one time. \
Also plots the uncorrected pion momentum fraction for near-side
and away-side
"""
style = ROOT.TStyle(ROOT.gStyle)
style.SetOptStat(0)
style.SetLabelOffset(-0.01,'xy')
style.SetLabelSize(0.035,'xy')
style.SetTitleOffset(1.2,'y')
style.cd()
runlist = None
h1 = None
h2 = None
h3 = None
## silly hack
keepMeOpen = []
allFiles = glob(run6_hist_dir + '/chargedPions_*.hist.root')
for fname in allFiles:
run = analysis.getRun(fname)
if runlist is None or run in runlist:
print fname, run
tfile = ROOT.TFile(fname)
mgr = analysis.HistogramManager(tfile,['dphi_deta', 'z', 'z_away'])
if h1 is None:
h1 = mgr['anyspin']['alltrigs'].tracks_sum['dphi_deta'].Clone()
h2 = mgr['anyspin']['alltrigs'].tracks_sum['z'].Clone()
h3 = mgr['anyspin']['alltrigs'].tracks_sum['z_away'].Clone()
keepMeOpen.append(tfile)
else:
h1.Add(mgr['anyspin']['alltrigs'].tracks_sum['dphi_deta'])
h2.Add(mgr['anyspin']['alltrigs'].tracks_sum['z'])
h3.Add(mgr['anyspin']['alltrigs'].tracks_sum['z_away'])
c1 = ROOT.TCanvas('c1')
c1.SetLogz()
h1.SetXTitle('#phi pion - #phi jet')
h1.SetYTitle('#eta pion - #eta jet')
h1.DrawCopy('lego2')
#reset some styles
style.SetLabelOffset(0.005,'xy')
style.SetLabelSize(0.04,'xy')
style.SetTitleOffset(1,'y')
c2 = ROOT.TCanvas('c2')
h2.FitSlicesY()
fig3c_mean = ROOT.gDirectory.Get('%s_1' % (h2.GetName(),))
fig3c_mean.SetTitle('Uncorrected pion momentum fraction')
fig3c_mean.SetXTitle('#pi p_{T}')
fig3c_mean.SetYTitle('< p_{T,#pi} / p_{T,jet} >')
fig3c_mean.SetAxisRange(0,1,'y')
fig3c_mean.SetMarkerStyle(21)
h3.FitSlicesY()
fig3c_away_mean = ROOT.gDirectory.Get('%s_1' % (h3.GetName(),))
fig3c_away_mean.SetMarkerStyle(25)
fig3c_away_mean.SetMarkerColor(ROOT.kRed)
fig3c_away_mean.SetLineColor(ROOT.kRed)
leg = ROOT.TLegend(0.75,0.8,0.89,0.89)
leg.AddEntry(fig3c_mean,'dR < 0.4','p')
leg.AddEntry(fig3c_away_mean,'dR > 1.5','p')
fig3c_mean.Draw()
fig3c_away_mean.Draw('same')
leg.Draw('same')
c3 = ROOT.TCanvas('c3')
h3.Draw()
raw_input('wait here:')
def asymmetry_statistics_comparison():
"""plots statistical prescision of A_{LL} for Run 5 (prelim + final)
and Run 5 + Run 6 combined away-side measurement
"""
prelim = analysis.AsymmetryGenerator('prelim')
final = analysis.AsymmetryGenerator('final')
combo = analysis.AsymmetryGenerator('combo', key='pt_away')
scalars_run5 = analysis.ScalarCounts(run5_scalar_path)
scalars_run6 = analysis.ScalarCounts(run6_scalar_path)
polarizations = analysis.Polarizations.Final
polarizations_prelim = analysis.Polarizations.Online
## generate the asymmetries
allFiles = glob(run5_hist_dir + '/chargedPions_*.hist.root')
for fname in allFiles:
run = analysis.getRun(fname)
print fname, run
tfile = ROOT.TFile(fname)
mgr = analysis.HistogramManager(tfile,['pt', 'pt_away'])
try:
bin7 = scalars_run5[str(run) + '-5-7']
bin8 = scalars_run5[str(run) + '-5-8']
bin9 = scalars_run5[str(run) + '-5-9']
except KeyError:
print run, 'is not in the scalars database'
continue
uu = bin7.uu + bin8.uu + bin9.uu
ud = bin7.ud + bin8.ud + bin9.ud
du = bin7.du + bin8.du + bin9.du
dd = bin7.dd + bin8.dd + bin9.dd
if run in analysis.golden_runlist_c:
try:
pol = polarizations_prelim[bin7.fill]
prelim.FillFromHistogramManager(mgr, 'jetpatch', 1, uu, ud, du, dd, pol.py, pol.pb)
except KeyError:
print bin7.fill, 'has no preliminary polarization values'
try:
pol = polarizations[bin7.fill]
final.FillFromHistogramManager(mgr, 'jetpatch', 1, uu, ud, du, dd, pol.py, pol.pb)
combo.FillFromHistogramManager(mgr, 'alltrigs', 1, uu, ud, du, dd, pol.py, pol.pb)
except KeyError:
print bin7.fill, 'has no final polarization values'
tfile.Close()
allFiles = glob(run6_hist_dir + '/chargedPions_*.hist.root')
for fname in allFiles:
run = analysis.getRun(fname)
print fname, run
tfile = ROOT.TFile(fname)
mgr = analysis.HistogramManager(tfile,['pt', 'pt_away'])
try:
bin6 = scalars_run6[str(run) + '-5-6']
bin7 = scalars_run6[str(run) + '-5-7']
bin8 = scalars_run6[str(run) + '-5-8']
bin9 = scalars_run6[str(run) + '-5-9']
except KeyError:
print run, 'is not in the scalars database'
continue
uu = bin6.uu + bin7.uu + bin8.uu + bin9.uu
ud = bin6.ud + bin7.ud + bin8.ud + bin9.ud
du = bin6.du + bin7.du + bin8.du + bin9.du
dd = bin6.dd + bin7.dd + bin8.dd + bin9.dd
try:
pol = polarizations[bin6.fill]
combo.FillFromHistogramManager(mgr, 'alltrigs', 1, uu, ud, du, dd, pol.py, pol.pb)
except KeyError:
print bin6.fill, 'has no final polarization values'
tfile.Close()
h1 = prelim.GetAsymmetry('ll')
h2 = final.GetAsymmetry('ll')
h3 = combo.GetAsymmetry('ll')
g1 = ROOT.TGraphErrors(h1)
g2 = ROOT.TGraphErrors(h2)
g3 = ROOT.TGraphErrors(h3)
for point in range(g1.GetN()):
x = h1.GetBinCenter(point+1)
g1.SetPoint(point, x-0.2, 0)
g2.SetPoint(point, x, 0)
g3.SetPoint(point, x+0.2, 0)
for gr in (g1,g2,g3):
gr.SetMarkerStyle(21)
gr.SetPointError(point, 0.0, gr.GetErrorY(point))
g2.SetMarkerColor(ROOT.kRed)
g2.SetLineColor(ROOT.kRed)
g3.SetMarkerColor(ROOT.kGreen)
g3.SetLineColor(ROOT.kGreen)
leg = ROOT.TLegend(0.13, 0.67, 0.40, 0.89)
leg.SetFillStyle(0)
leg.SetBorderSize(0)
leg.AddEntry(g1, '2005 prelim', 'p')
leg.AddEntry(g2, '2005 final', 'p')
leg.AddEntry(g3, '2005/6 away-side', 'p')
bg = ROOT.TH1D(h1)
bg.Reset()
bg.SetTitle('Statistical Precision of Various A_{LL} Measurements')
bg.SetXTitle('p_{T}')
bg.GetYaxis().SetRangeUser(-0.06, 0.06)
c = ROOT.TCanvas()
bg.DrawCopy()
g1.Draw('p')
g2.Draw('p')
g3.Draw('p')
leg.Draw()
raw_input('press enter:')
def trigger_bias_using_away_side(runlist=None, trgname='alltrigs'):
"""plot asym vs. pt for near + away, fit with pol0 and compare"""
generator = {
'near_plus' : analysis.AsymmetryGenerator('near_plus', key='pt_near'),
'near_minus' : analysis.AsymmetryGenerator('near_minus', key='pt_near'),
'away_plus' : analysis.AsymmetryGenerator('away_plus', key='pt_away'),
'away_minus' : analysis.AsymmetryGenerator('away_minus', key='pt_away')
}
scalar_path = os.environ['STAR'] + '/StRoot/StSpinPool/StTamuRelLum/inputs/run5.txt'
scalars = analysis.ScalarCounts(scalar_path)
polarizations = analysis.Polarizations.Final
## generate the asymmetries
allFiles = glob(run5_hist_dir + '/chargedPions_*.hist.root')
for fname in allFiles:
run = analysis.getRun(fname)
if runlist is None or run in runlist:
print fname, run
tfile = ROOT.TFile(fname)
mgr = analysis.HistogramManager(tfile,['pt_near', 'pt_away'])
try:
bin7 = scalars[str(run) + '-5-7']
bin8 = scalars[str(run) + '-5-8']
bin9 = scalars[str(run) + '-5-9']
except KeyError:
print run, 'is not in the scalars database'
continue
uu = bin7.uu + bin8.uu + bin9.uu
ud = bin7.ud + bin8.ud + bin9.ud
du = bin7.du + bin8.du + bin9.du
dd = bin7.dd + bin8.dd + bin9.dd
try:
pol = polarizations[bin7.fill]
except KeyError:
print bin7.fill, 'has no final polarization values'
continue
generator['near_plus'].FillFromHistogramManager(mgr, trgname, 1, uu, ud, du, dd, pol.py, pol.pb)
generator['near_minus'].FillFromHistogramManager(mgr, trgname, -1, uu, ud, du, dd, pol.py, pol.pb)
generator['away_plus'].FillFromHistogramManager(mgr, trgname, 1, uu, ud, du, dd, pol.py, pol.pb)
generator['away_minus'].FillFromHistogramManager(mgr, trgname, -1, uu, ud, du, dd, pol.py, pol.pb)
tfile.Close()
#ROOT.gStyle.SetOptStat('n')
#ROOT.gStyle.SetOptFit(111)
fit = {
'near_plus' : ROOT.TF1('near_plus', 'pol0'),
'near_minus' : ROOT.TF1('near_minus', 'pol0'),
'away_plus' : ROOT.TF1('away_plus', 'pol0'),
'away_minus' : ROOT.TF1('away_minus', 'pol0'),
}
fit['away_plus'].SetLineColor(ROOT.kRed)
fit['away_minus'].SetLineColor(ROOT.kRed)
c1 = ROOT.TCanvas('plus','Comparison of near and away side for #pi^{+}')
h1_near = generator['near_plus'].GetAsymmetry('ll')
h1_near.GetYaxis().SetRangeUser(-0.11, 0.11)
h1_away = generator['away_plus'].GetAsymmetry('ll')
h1_away.SetLineColor(ROOT.kRed)
h1_near.Draw()
h1_near.Fit(fit['near_plus'],'','same')
h1_away.Draw('same')
h1_away.Fit(fit['away_plus'],'','same')
leg1 = ROOT.TLegend(0.13,0.7,0.43,0.89)
leg1.AddEntry(fit['near_plus'],'%f +/- %f' %
(fit['near_plus'].GetParameter(0), fit['near_plus'].GetParError(0)),'l')
leg1.AddEntry(fit['away_plus'],'%f +/- %f' %
(fit['away_plus'].GetParameter(0), fit['away_plus'].GetParError(0)),'l')
leg1.Draw()
c2 = ROOT.TCanvas('minus','Comparison of near and away side for #pi^{-}')
h2_near = generator['near_minus'].GetAsymmetry('ll')
h2_near.GetYaxis().SetRangeUser(-0.11, 0.11)
h2_away = generator['away_minus'].GetAsymmetry('ll')
h2_away.SetLineColor(ROOT.kRed)
h2_near.Draw()
h2_near.Fit(fit['near_minus'],'','same')
h2_away.Draw('same')
h2_away.Fit(fit['away_minus'],'','same')
leg2 = ROOT.TLegend(0.13,0.7,0.43,0.89)
leg2.AddEntry(fit['near_minus'],'%f +/- %f' %
(fit['near_minus'].GetParameter(0), fit['near_minus'].GetParError(0)),'l')
leg2.AddEntry(fit['away_minus'],'%f +/- %f' %
(fit['away_minus'].GetParameter(0), fit['away_minus'].GetParError(0)),'l')
leg2.Draw()
print 'Size of systematic assigned if we take the difference btw the fits with errors:'
val = math.fabs( fit['near_plus'].GetParameter(0) - fit['away_plus'].GetParameter(0) )
err = math.sqrt(fit['near_plus'].GetParError(0) ** 2 + fit['away_plus'].GetParError(0) ** 2)
print 'plus : %f' % (val+err,)
val = math.fabs( fit['near_minus'].GetParameter(0) - fit['away_minus'].GetParameter(0) )
err = math.sqrt(fit['near_minus'].GetParError(0) ** 2 + fit['away_minus'].GetParError(0) ** 2)
print 'minus : %f' % (val+err,)
raw_input('press enter:')
def qa_pid():
allFiles = glob(run5_hist_dir + '/chargedPions_*.hist.root')
fill_runlists = {}
reverse_dict = {}
for fname in allFiles:
run = analysis.getRun(fname)
reverse_dict[run] = 0.0
answer = analysis.getAllFills(reverse_dict.keys())
for run,fill in answer:
reverse_dict[run] = int(fill)
try:
fill_runlists[fill].append(run)
except KeyError:
fill_runlists[fill] = [run]
nSigmaRun = ROOT.TH1D('nSigmaRun', 'Mean nSigmaPion per run', len(allFiles), 0.5, len(allFiles)+0.5)
nSigmaFill = ROOT.TH1D('nSigmaFill', 'blerg2', len(fill_runlists), 0.5, len(fill_runlists)+0.5)
ps = ROOT.TPostScript('blerg.ps')
c = ROOT.TCanvas('c','',100,100,600,800)
pad = 1
ROOT.gStyle.SetOptStat('m')
for row,fname in enumerate(allFiles):
if row % 15 == 0:
c.Update()
ps.NewPage()
c.Clear()
c.Divide(3,5)
pad = 1
c.cd(pad)
print fname
tfile = ROOT.TFile(fname)
mgr = analysis.HistogramManager(tfile,['nSigmaPion'])
h = mgr.anyspin['alltrigs'].tracks_sum['nSigmaPion']
run = analysis.getRun(fname)
h.SetTitle('%3d - %d - %d' % (row+1, run, reverse_dict[run]))
h.SetStats(True)
if h.GetMean() > 0:
h.SetLineColor(ROOT.kRed)
print 'bad = fill, run'
h.DrawCopy()
nSigmaRun.SetBinContent(row+1, h.GetMean())
nSigmaRun.SetBinError(row+1, h.GetMeanError())
pad += 1
ps.Close()
c = ROOT.TCanvas()
nSigmaRun.Draw()
raw_input('press enter:')
def jetpatch_phi_correlation(tree, patchNumber):
patchPhi = analysis.histos.JetCuts.patchPhi2006
h = ROOT.TH1D('h','',720,-360,360)
for entry in tree:
for i in range(12):
adc = tree.event.jetPatchAdc(i)
if adc > analysis.histos.JetCuts.triggerThresholds[137221]:
for jet in tree.event.jets():
diff = math.degrees(jet.Phi()) - patchPhi[i]
h.Fill(diff)
h.Draw()
raw_input('press enter:')
def pid_calibration(h, useElectrons=True):
"""takes an nSigmaPion histo and does the triple-Gaussian fit"""
h.SetStats(True)
#h.SetTitle('n#sigma(#pi) calibration for F7305')
h.SetXTitle('n#sigma(#pi)')
ROOT.gStyle.SetOptFit(111)
if useElectrons:
fit = ROOT.TF1('fit','gaus(0)+gaus(3)+gaus(6)', -6.0, 6.0)
else:
fit = ROOT.TF1('fit','gaus(0)+gaus(3)', -6.0, 6.0)
fit.SetParameter(0, h.GetMaximum() * 0.9)
fit.SetParameter(1, 0.0)
fit.SetParameter(2, 1.0)
fit.SetParameter(3, h.GetMaximum() * 0.5)
fit.SetParameter(4, -2.0)
fit.SetParameter(5, 1.0)
if useElectrons:
fit.SetParameter(6, h.GetMaximum() * 0.05)
#fit.SetParLimits(6, 0.0, h.GetMaximum() * 0.1)
fit.SetParameter(7, 3.)
fit.SetParLimits(7, 1.5, 5.0) ## this one drops b/g by 10% !
fit.SetParameter(8, 1.0)
fit.SetParName(0, '#pi magnitude')
fit.SetParName(1, '#pi mean')
fit.SetParName(2, '#pi width')
fit.SetParName(3, 'p/K magnitude')
fit.SetParName(4, 'p/K mean')
fit.SetParName(5, 'p/K width')
if useElectrons:
fit.SetParName(6, 'ele magnitude')
fit.SetParName(7, 'ele mean')
fit.SetParName(8, 'ele width')
h.Fit(fit, 'rq')
h.DrawCopy()
pifit = ROOT.TF1('pifit', 'gaus', -6.0, 6.0)
pifit.FixParameter(0, fit.GetParameter(0))
pifit.FixParameter(1, fit.GetParameter(1))
pifit.FixParameter(2, fit.GetParameter(2))
pifit.SetLineColor(ROOT.kRed)
pkfit = ROOT.TF1('pkfit', 'gaus', -6.0, 6.0)
pkfit.FixParameter(0, fit.GetParameter(3))
pkfit.FixParameter(1, fit.GetParameter(4))
pkfit.FixParameter(2, fit.GetParameter(5))
pkfit.SetLineColor(ROOT.kGreen)
if useElectrons:
elefit = ROOT.TF1('elefit', 'gaus', -6.0, 6.0)
elefit.FixParameter(0, fit.GetParameter(6))
elefit.FixParameter(1, fit.GetParameter(7))
elefit.FixParameter(2, fit.GetParameter(8))
elefit.SetLineColor(ROOT.kBlue)
pifit.DrawCopy('same')
pkfit.DrawCopy('same')
if useElectrons:
elefit.DrawCopy('same')
lowBound = pifit.GetParameter(1) - 1.0 * pifit.GetParameter(2)
highBound = pifit.GetParameter(1) + 2.0 * pifit.GetParameter(2)
totalPions = pifit.Integral(-6.0, 6.0)
oldPions = pifit.Integral(-1.0, 2.0)
newPions = pifit.Integral(lowBound, highBound)
if useElectrons:
oldBg = pkfit.Integral(-1.0, 2.0) + elefit.Integral(-1.0, 2.0)
newBg = pkfit.Integral(lowBound, highBound) + elefit.Integral(lowBound, highBound)
else:
oldBg = pkfit.Integral(-1.0, 2.0)
newBg = pkfit.Integral(lowBound, highBound)
print 'tot=%7.2f old eff=%.2f new eff=%.2f old bg=%.2f new bg=%.2f' % \
(totalPions, oldPions/totalPions, newPions/totalPions, oldBg/fit.Integral(-1.0, 2.0), newBg/fit.Integral(lowBound, highBound))
#print 'old', pifit.Integral(-1.0, 2.0), 'new', pifit.Integral(lowBound, highBound)
#print 'old', pkfit.Integral(-1.0, 2.0), 'new', pkfit.Integral(lowBound, highBound)
#print 'old', elefit.Integral(-1.0, 2.0), 'new', elefit.Integral(lowBound, highBound)
#print h.GetTitle()[-4:], pifit.GetParameter(1)
#raw_input('press enter to continue:')
if useElectrons:
return fit, pifit, pkfit, elefit
else:
return fit, pifit, pkfit
def pid_calibration_allfills(mydir='/Users/kocolosk/data/run5/hist-by-fill'):
"""generates a PDF of triple-Gaussian fits for all fills, plus a histogram of pion means"""
allFiles = os.listdir(mydir)
hfill = ROOT.TH1D('hfill','mean of pion Gaussian by RHIC Fill', len(allFiles), 0.5, len(allFiles)+0.5)
ps = ROOT.TPostScript('pid.ps')
c = ROOT.TCanvas('c','',100,100,600,800)
pad = 1
## some cumulative stats
nEntries = 0
nTotalPions = 0
nOldPions = 0
nNewPions = 0
nOldBg = 0
nNewBg = 0
nOldCounts = 0
nNewCounts = 0
myrecords = [] ## fname, pi mean, pi sigma
counter = 0
for fname in allFiles:
if not fname.endswith('.root'): continue
if counter % 15 == 0:
c.Update()
ps.NewPage()
c.Clear()
c.Divide(3,5)
pad = 1
counter += 1
c.cd(pad)
print fname
tfile = ROOT.TFile(os.path.join(mydir, fname))
mgr = analysis.HistogramManager(tfile, ['nSigmaPion'])
h = mgr.anyspin['alltrigs'].tracks_sum['nSigmaPion']
h.SetTitle('n#sigma(#pi) calibration for F%s' % (fname[-14:-10],))
fit, pifit, pkfit, elefit = pid_calibration(h)
mean = fit.GetParameter(1)
error = fit.GetParError(1)
sigma = fit.GetParameter(2)
hfill.SetBinContent(counter+1, mean)
hfill.SetBinError(counter+1, error)
myrecords.append((fname, mean, sigma))
pad += 1
## stats
lowBound = pifit.GetParameter(1) - 1.0 * pifit.GetParameter(2)
highBound = pifit.GetParameter(1) + 2.0 * pifit.GetParameter(2)
binWidth = h.GetBinWidth(1)
nEntries += h.GetEntries()
nTotalPions += pifit.Integral(-6.0, 6.0) / binWidth
nOldPions += pifit.Integral(-1.0, 2.0) / binWidth
nNewPions += pifit.Integral(lowBound, highBound) / binWidth
nOldBg += (pkfit.Integral(-1.0, 2.0) + elefit.Integral(-1.0, 2.0)) / binWidth
nNewBg += (pkfit.Integral(lowBound, highBound) + elefit.Integral(lowBound, highBound)) / binWidth
nOldCounts += fit.Integral(-1.0, 2.0) / binWidth
nNewCounts += fit.Integral(lowBound, highBound) / binWidth
ps.Close()
c = ROOT.TCanvas()
hfill.GetYaxis().SetRangeUser(-0.5, 0.8)
hfill.SetXTitle('fill index')
hfill.Draw('e')
for r in myrecords:
print '%d : (% 1.6f, %1.6f),' % (int(r[0][13:17]), r[1], r[2])
print 'Old Total Efficiency = %.4f' % (nOldPions/nTotalPions,)
print 'New Total Efficiency = %.4f' % (nNewPions/nTotalPions,)
print 'Old Background Fraction = %.4f' % (nOldBg/nOldCounts,)
print 'New Background Fraction = %.4f' % (nNewBg/nNewCounts,)
print 'Total Statistics Old = %.0f New %.0f' % (nOldCounts, nNewCounts)
raw_input('press enter:')
def print_statistics(runlist, keytype='event'):
"""Run 5 only: prints detailed (event/track) statistics for all runs in runlist"""
a = [0, 0, 0, 0, 0, 0, 0, 0]
print ' ---------------------------------------------------------------------------------- '
print '| 96011 96201 96211 96221 96233 | HT JP ALL |'
for row,run in enumerate(runlist):
if row % 10 == 0:
print ' ---------------------------------------------------------------------------------- '
f = ROOT.TFile('~/data/run5/hist/chargedPions_%d.hist.root' % run)
if keytype=='event':
h = analysis.HistogramManager(f,'bx7').anyspin
b = [
h['96011']['bx7'].GetEntries(),
h['96201']['bx7'].GetEntries(),
h['96211']['bx7'].GetEntries(),
h['96221']['bx7'].GetEntries(),
h['96233']['bx7'].GetEntries(),
h['hightower']['bx7'].GetEntries(),
h['jetpatch']['bx7'].GetEntries(),
h['alltrigs']['bx7'].GetEntries()
]
else:
h = analysis.HistogramManager(f,'pt').anyspin
b = [
h['96011'].tracks_sum['pt'].GetEntries(),
h['96201'].tracks_sum['pt'].GetEntries(),
h['96211'].tracks_sum['pt'].GetEntries(),
h['96221'].tracks_sum['pt'].GetEntries(),
h['96233'].tracks_sum['pt'].GetEntries(),
h['hightower'].tracks_sum['pt'].GetEntries(),
h['jetpatch'].tracks_sum['pt'].GetEntries(),
h['alltrigs'].tracks_sum['pt'].GetEntries()
]
print '| %d %d : %7d %7d %7d %7d %7d | %7d %7d %7d |' % (analysis.getFill(run), run, \
b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7] )
for i in range(len(b)):
a[i] += b[i]
print ' ---------------------------------------------------------------------------------- '
print '| sum : %7d %7d %7d %7d %7d | %7d %7d %7d |' % \
(a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7])
def runlist_luminosity(runlist):
"""prints integrated luminosity seen by minbias trigger in runs of this list"""
lumiSum = 0
for run in runlist:
if run > 7000000:
f = ROOT.TFile('~/data/run6/tree/chargedPions_%d.tree.root' % (run,))
minBiasId = 117001
else:
path = '/Users/kocolosk/data/run5/tree/chargedPions_%d.tree.root' % run
if os.path.isfile(path):
f = ROOT.TFile(path)
else:
path = '/Users/kocolosk/data/run5/tree-minbias/chargedPions_%d.tree.root' % run
f = ROOT.TFile(path)
minBiasId = 96011
try:
tree = f.tree
lumi = analysis.tree.integratedLuminosity(tree, minBiasId)
print '%d: %3.6f nb^-1' % (run, lumi)
lumiSum += lumi
except AttributeError:
pass
print 'Integrated Recorded Luminosity for Runlist: %3.6f pb^-1' % (lumiSum/1000,)
def spinInfoForFrank():
"""not actually a plot"""
chain = ROOT.TChain('tree')
chain.Add('~/data/run5/tree/chargedPions_*')
## only adding these while the other ones spin
chain.Add('~/data/run5/tree/backup-2008-01-08-trigger-prescales/chargedPions_*')
chain.SetBranchStatus('*',0)
chain.SetBranchStatus('mRunId',1)
chain.SetBranchStatus('mEventId',1)
chain.SetBranchStatus('mSpinBit',1)
chain.SetBranchStatus('mBx7',1)
chain.SetBranchStatus('mSpinQA',1)
f = ROOT.TFile('spin_info.root','recreate')
nt = ROOT.TNtuple('nt','spin info','run:event:spinbit:bx7:qa')
for entry in chain:
ev = entry.event
nt.Fill(ev.runId(), ev.eventId(), ev.spinBit(), ev.bx7(), ev.isSpinValid())
nt.Write()
f.Close()
def ssa_by_fill(asym_key='ly', runlist=analysis.final_runlist_run5, variable='pt', year=2006, bins=None):
"""generates canvas for asym_key (ly,lb,ls,us). Each data point is the SSA for a fill"""
tuples = analysis.getAllFills(runlist)
fills = []
for run, fill in tuples:
## temporary hacks
## http://www.star.bnl.gov/HyperNews-star/protected/get/starspin/3324.html
if 6144002 <= run <= 6144029: fill = 7128
if 6144041 <= run <= 6144042: fill = 7129
if 6145067 <= run <= 6145068: fill = 7136
if 6146001 <= run <= 6146026: fill = 7138
if run in runlist:
fills.append(int(fill))
fills = analysis.uniqify(fills)
asym_plus = {}
asym_minus = {}
for f in fills:
asym_plus[f] = analysis.AsymmetryGenerator(name='F%s_plus' % f, bins=bins or [1,0.0,1.0], key=variable)
asym_minus[f] = analysis.AsymmetryGenerator(name='F%s_minus' % f, bins=bins or [1,0.0,1.0], key=variable)
scalar_path = os.environ['STAR'] + '/StRoot/StSpinPool/StTamuRelLum/inputs/run5.txt'
scalars = analysis.ScalarCounts(scalar_path)
polarizations = analysis.Polarizations.Final
allFiles = glob(run5_hist_dir + '/chargedPions_*.hist.root')
for fname in allFiles:
run = analysis.getRun(fname)
if runlist is None or run in runlist:
print fname, run
tfile = ROOT.TFile(fname)
mgr = analysis.HistogramManager(tfile,('pt',))
try:
bin7 = scalars[str(run) + '-5-7']
bin8 = scalars[str(run) + '-5-8']
bin9 = scalars[str(run) + '-5-9']
except KeyError:
print run, 'is not in the scalars database'
continue
uu = bin7.uu + bin8.uu + bin9.uu
ud = bin7.ud + bin8.ud + bin9.ud
du = bin7.du + bin8.du + bin9.du
dd = bin7.dd + bin8.dd + bin9.dd
try:
pol = polarizations[bin7.fill]
except KeyError:
print bin7.fill, 'has no final polarization values'
continue
asym_plus[bin7.fill].FillFromHistogramManager(mgr, 'jetpatch', 1, uu, ud, du, dd, pol.py, pol.pb)
asym_minus[bin7.fill].FillFromHistogramManager(mgr, 'jetpatch', -1, uu, ud, du, dd, pol.py, pol.pb)
tfile.Close()
title = {'ly':'Yellow Beam', 'lb':'Blue Beam', 'ls':'Like-Sign', 'us':'Unlike-Sign'}
final_hist_plus = ROOT.TH1D('final_hist_plus','#pi^{+} %s SSA' % title[asym_key], len(fills), 0.5, len(fills)+0.5)
final_hist_minus = ROOT.TH1D('final_hist_minus','#pi^{-} %s SSA' % title[asym_key], len(fills), 0.5, len(fills)+0.5)
marker_color = {'ly':ROOT.kYellow, 'lb':ROOT.kBlue, 'ls':ROOT.kRed, 'us':ROOT.kBlack}
for h in (final_hist_plus, final_hist_minus):
h.SetMarkerColor( marker_color[asym_key] )
h.GetYaxis().SetRangeUser(-0.2, 0.2)
h.SetXTitle('fill index')
final_hist_plus.SetMarkerStyle(21)
final_hist_minus.SetMarkerStyle(20)
for i,f in enumerate(fills):
hplus = asym_plus[f].GetAsymmetry(asym_key)
final_hist_plus.SetBinContent( i+1, hplus.GetBinContent(1) )
final_hist_plus.SetBinError( i+1, hplus.GetBinError(1) )
print '%d % .4f % .4f % .4f' % (f, hplus.GetBinContent(1), hplus.GetBinError(1), hplus.GetBinContent(1)/hplus.GetBinError(1))
hplus.Delete()
hminus = asym_minus[f].GetAsymmetry(asym_key)
final_hist_minus.SetBinContent( i+1, hminus.GetBinContent(1) )
final_hist_minus.SetBinError( i+1, hminus.GetBinError(1) )
hminus.Delete()
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetErrorX(0.0)
ROOT.gStyle.SetOptFit(111)
cp = ROOT.TCanvas('%s_ssa_fill_plus' % asym_key)
final_hist_plus.Draw('e1')
final_hist_plus.Fit('pol0')
cm = ROOT.TCanvas('%s_ssa_fill_minus' % asym_key)
final_hist_minus.Draw('e1')
final_hist_minus.Fit('pol0')
raw_input('wait here')
cp.Print('.png')
cm.Print('.png')
def ssa(asym_key='ly', runlist=analysis.final_runlist_run5, variable='pt'):
"""plots a ssa against the given variable"""
## note: need to redefine binning if variable != pt
asym_plus = analysis.AsymmetryGenerator('asym_plus', key=variable)
asym_minus = analysis.AsymmetryGenerator('asym_minus', key=variable)
scalar_path = os.environ['STAR'] + '/StRoot/StSpinPool/StTamuRelLum/inputs/run5.txt'
scalars = analysis.ScalarCounts(scalar_path)
polarizations = analysis.Polarizations.Final
allFiles = glob(run5_hist_dir + '/chargedPions_*.hist.root')
for fname in allFiles:
run = analysis.getRun(fname)
if runlist is None or run in runlist:
print fname, run
tfile = ROOT.TFile(fname)
mgr = analysis.HistogramManager(tfile,('pt',))
try:
bin7 = scalars[str(run) + '-5-7']
bin8 = scalars[str(run) + '-5-8']
bin9 = scalars[str(run) + '-5-9']
except KeyError:
print run, 'is not in the scalars database'
continue
uu = bin7.uu + bin8.uu + bin9.uu
ud = bin7.ud + bin8.ud + bin9.ud
du = bin7.du + bin8.du + bin9.du
dd = bin7.dd + bin8.dd + bin9.dd
try:
pol = polarizations[bin7.fill]
except KeyError:
print bin7.fill, 'has no final polarization values'
continue
asym_plus.FillFromHistogramManager(mgr, 'jetpatch', 1, uu, ud, du, dd, pol.py, pol.pb)
asym_minus.FillFromHistogramManager(mgr, 'jetpatch', -1, uu, ud, du, dd, pol.py, pol.pb)
tfile.Close()
title = {'ly':'Yellow Beam', 'lb':'Blue Beam', 'ls':'Like-Sign', 'us':'Unlike-Sign'}
marker_color = {'ly':ROOT.kYellow, 'lb':ROOT.kBlue, 'ls':ROOT.kRed, 'us':ROOT.kBlack}
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetErrorX(0.0)
ROOT.gStyle.SetOptFit(111)
hp = asym_plus.GetAsymmetry(asym_key)
hp.SetTitle(title[asym_key] + ' SSA for #pi^{+}')
hp.SetMarkerStyle(21)
hm = asym_minus.GetAsymmetry(asym_key)
hm.SetTitle(title[asym_key] + ' SSA for #pi^{-}')
hm.SetMarkerStyle(20)
for h in (hp,hm):
h.SetMarkerColor(marker_color[asym_key])
h.SetXTitle(variable)
if asym_key in ('ly', 'lb'):
h.GetYaxis().SetRangeUser(-0.05, 0.05)
else:
h.GetYaxis().SetRangeUser(-0.1, 0.1)
cp = ROOT.TCanvas('cp')
hp.Fit('pol0')
hp.Draw('e1')
cm = ROOT.TCanvas('cm')
hm.Fit('pol0')
hm.Draw('e1')
raw_input('wait here')
cp.Print('%s_ssa_%s_plus.png' % (asym_key, variable))
cm.Print('%s_ssa_%s_minus.png' % (asym_key, variable))
def pid_background_asymmetry(runlist=analysis.final_runlist_run5):
"""plots A_{LL} for charged tracks outside PID window and fits with a pol0"""
asym_plus = analysis.AsymmetryGenerator('asym_plus', key='pt_bg')
asym_minus = analysis.AsymmetryGenerator('asym_minus', key='pt_bg')
#scalar_path = os.environ['STAR'] + '/StRoot/StSpinPool/StTamuRelLum/inputs/run5.txt'
scalar_path = os.environ['STAR'] + '/StRoot/StSpinPool/StTamuRelLum/inputs/run6.txt'
scalars = analysis.ScalarCounts(scalar_path)
polarizations = analysis.Polarizations.Final
allFiles = glob(run5_hist_dir + '/chargedPions_*.hist.root')
allFiles += glob(run6_hist_dir + '/chargedPions_*.hist.root')
for fname in allFiles:
run = analysis.getRun(fname)
if runlist is None or run in runlist:
print fname, run
tfile = ROOT.TFile(fname)
mgr = analysis.HistogramManager(tfile,('pt_bg',))
try:
bin6 = scalars[str(run) + '-5-6']
bin7 = scalars[str(run) + '-5-7']
bin8 = scalars[str(run) + '-5-8']
bin9 = scalars[str(run) + '-5-9']
except KeyError:
try:
bin6 = scalars[str(run) + '-6-6']
bin7 = scalars[str(run) + '-6-7']
bin8 = scalars[str(run) + '-6-8']
bin9 = scalars[str(run) + '-6-9']
except KeyError:
print run, 'is not in the scalars database'
continue
if run > 7000000:
uu = bin6.uu + bin7.uu + bin8.uu + bin9.uu
ud = bin6.ud + bin7.ud + bin8.ud + bin9.ud
du = bin6.du + bin7.du + bin8.du + bin9.du
dd = bin6.dd + bin7.dd + bin8.dd + bin9.dd
else:
uu = bin7.uu + bin8.uu + bin9.uu
ud = bin7.ud + bin8.ud + bin9.ud
du = bin7.du + bin8.du + bin9.du
dd = bin7.dd + bin8.dd + bin9.dd
try:
pol = polarizations[bin7.fill]
except KeyError:
print bin7.fill, 'has no final polarization values'
continue
asym_plus.FillFromHistogramManager(mgr, 'jetpatch', 1, uu, ud, du, dd, pol.py, pol.pb)
asym_minus.FillFromHistogramManager(mgr, 'jetpatch', -1, uu, ud, du, dd, pol.py, pol.pb)
tfile.Close()
#title = {'ly':'Yellow Beam', 'lb':'Blue Beam', 'ls':'Like-Sign', 'us':'Unlike-Sign'}
#marker_color = {'ly':ROOT.kYellow, 'lb':ROOT.kBlue, 'ls':ROOT.kRed, 'us':ROOT.kBlack}
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetErrorX(0.0)
ROOT.gStyle.SetOptFit(111)
hp = asym_plus.GetAsymmetry('ll')
hp.SetTitle('PID Background A_{LL} for #pi^{+} (Run 6 data)')
hp.SetMarkerStyle(21)
hm = asym_minus.GetAsymmetry('ll')
hm.SetTitle('PID Background A_{LL} for #pi^{-} (Run 6 data)')
hm.SetMarkerStyle(20)
for h in (hp,hm):
#h.SetMarkerColor(marker_color[asym_key])
h.SetXTitle('p_{T}')
# if asym_key in ('ly', 'lb'):
h.GetYaxis().SetRangeUser(-0.1, 0.1)
#else:
# h.GetYaxis().SetRangeUser(-0.1, 0.1)
cp = ROOT.TCanvas('cp')
hp.Fit('pol0')
hp.Draw('e1')
cm = ROOT.TCanvas('cm')
hm.Fit('pol0')
hm.Draw('e1')
for h in (hp,hm):
print h.GetName()
for i in range(h.GetNbinsX()):
print 'y=% .2e, stat=%.2e' % (h.GetBinContent(i+1), h.GetBinError(i+1))
raw_input('wait here')
cp.Print('pid_background_asymmetry_plus.png')
cm.Print('pid_background_asymmetry_minus.png')
def print_ssa(runlist=analysis.final_runlist_run5, charge=1):
"""prints (val +/- err) => (nSig) for y,b,ls,us for each run in list"""
scalar_path = os.environ['STAR'] + '/StRoot/StSpinPool/StTamuRelLum/inputs/run5.txt'
scalars = analysis.ScalarCounts(scalar_path)
polarizations = analysis.Polarizations.Final
print '------------------------------------------------------------------------------------\
---------------------------------------------------------------'
print ' Fill | Run | Yellow Beam | Blue Beam |\
Like Sign | Unlike Sign '
print '------------------------------------------------------------------------------------\
---------------------------------------------------------------'
for run in runlist:
gen = analysis.AsymmetryGenerator(name=str(run), bins=[1,2.0,10.0])
f = ROOT.TFile(run5_hist_dir + '/chargedPions_%d.hist.root' % run)
mgr = analysis.HistogramManager(f,('pt',))
try:
bin7 = scalars[str(run) + '-5-7']
bin8 = scalars[str(run) + '-5-8']
bin9 = scalars[str(run) + '-5-9']
except KeyError:
print run, 'is not in the scalars database'
continue
uu = bin7.uu + bin8.uu + bin9.uu
ud = bin7.ud + bin8.ud + bin9.ud
du = bin7.du + bin8.du + bin9.du
dd = bin7.dd + bin8.dd + bin9.dd
try:
pol = polarizations[bin7.fill]
except KeyError:
print bin7.fill, 'has no final polarization values'
continue
gen.FillFromHistogramManager(mgr, 'jetpatch', charge, uu, ud, du, dd, pol.py, pol.pb)
f.Close()
y = gen.GetAsymmetry('ly')
line = 'F%d | R%d | (% .5f +- %.5f) => % .2f |' % (bin7.fill, run, y.GetBinContent(1), \
y.GetBinError(1), (y.GetBinContent(1)/y.GetBinError(1)) )
b = gen.GetAsymmetry('lb')
line += ' (% .5f +- %.5f) => % .2f |' % (b.GetBinContent(1), \
b.GetBinError(1), (b.GetBinContent(1)/b.GetBinError(1)))
ls = gen.GetAsymmetry('ls')
line += ' (% .5f +- %.5f) => % .2f |' % (ls.GetBinContent(1), \
ls.GetBinError(1), (ls.GetBinContent(1)/ls.GetBinError(1)))
us = gen.GetAsymmetry('us')
line += ' (% .5f +- %.5f) => % .2f' % (us.GetBinContent(1), \
us.GetBinError(1), (us.GetBinContent(1)/us.GetBinError(1)))
print line
[ h.Delete() for h in (y,b,ls,us) ]
def asigma(runlist=analysis.transverse_run6):
"""plots asigma -- duh"""
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetErrorX(0.0)
ROOT.gStyle.SetOptFit(111)
asym_plus = analysis.AsymmetryGenerator('asym_plus', key='pt')
asym_minus = analysis.AsymmetryGenerator('asym_minus', key='pt')
#scalar_path = os.environ['STAR'] + '/StRoot/StSpinPool/StTamuRelLum/inputs/run5.txt'
scalar_path = os.environ['STAR'] + '/StRoot/StSpinPool/StTamuRelLum/inputs/run6.txt'
scalars = analysis.ScalarCounts(scalar_path)
polarizations = analysis.Polarizations.Final
allFiles = glob(run5_hist_dir + '-transverse/chargedPions_*.hist.root')
allFiles += glob(run6_hist_dir + '-transverse/chargedPions_*.hist.root')
for fname in allFiles:
run = analysis.getRun(fname)
if runlist is None or run in runlist:
print fname, run
tfile = ROOT.TFile(fname)
mgr = analysis.HistogramManager(tfile,('pt',))
try:
bin6 = scalars[str(run) + '-5-6']
bin7 = scalars[str(run) + '-5-7']
bin8 = scalars[str(run) + '-5-8']
bin9 = scalars[str(run) + '-5-9']
except KeyError:
try:
bin6 = scalars[str(run) + '-6-6']
bin7 = scalars[str(run) + '-6-7']
bin8 = scalars[str(run) + '-6-8']
bin9 = scalars[str(run) + '-6-9']
except KeyError:
print run, 'is not in the scalars database'
continue
if run > 7000000:
uu = bin6.uu + bin7.uu + bin8.uu + bin9.uu
ud = bin6.ud + bin7.ud + bin8.ud + bin9.ud
du = bin6.du + bin7.du + bin8.du + bin9.du
dd = bin6.dd + bin7.dd + bin8.dd + bin9.dd
else:
uu = bin7.uu + bin8.uu + bin9.uu
ud = bin7.ud + bin8.ud + bin9.ud
du = bin7.du + bin8.du + bin9.du
dd = bin7.dd + bin8.dd + bin9.dd
try:
pol = polarizations[bin7.fill]
except KeyError:
print bin7.fill, 'has no final polarization values'
continue
asym_plus.FillFromHistogramManager(mgr, 'jetpatch', 1, uu, ud, du, dd, pol.py, pol.pb)
asym_minus.FillFromHistogramManager(mgr, 'jetpatch', -1, uu, ud, du, dd, pol.py, pol.pb)
tfile.Close()
hp = asym_plus.GetAsymmetry('ll')
hp.SetTitle('A_{#Sigma} for #pi^{+} using Run 6 data')
hp.SetMarkerStyle(21)
hm = asym_minus.GetAsymmetry('ll')
hm.SetTitle('A_{#Sigma} for #pi^{-} using Run 6 data')
hm.SetMarkerStyle(20)
for h in (hp,hm):
h.SetXTitle('p_{T}')
h.GetYaxis().SetRangeUser(-0.105, 0.10)
cp = ROOT.TCanvas('cp')
hp.Fit('pol0')
hp.Draw('e1')
cm = ROOT.TCanvas('cm')
hm.Fit('pol0')
hm.Draw('e1')
for h in (hp,hm):
print h.GetName()
for i in range(h.GetNbinsX()):
print 'y=% .2e, stat=%.2e' % (h.GetBinContent(i+1), h.GetBinError(i+1))
raw_input('wait here')
cp.Print('asigma_plus.png')
cm.Print('asigma_minus.png')
def pid_pt_dependence(runlist=analysis.final_runlist_run5, tfile=None):
"""plots recalibrated nSigmaPion for charge-summed pions in each pT bin"""
if tfile is None:
nsig = [ ROOT.TH1D('nsig_%d' % i,'',100,-6.0,6.0) for i in range(5)]
nsig[0].SetTitle('Recalibrated n#sigma(#pi), 2<p_{T}<10')
nsig[1].SetTitle('Recalibrated n#sigma(#pi), 8<p_{T}<10')
nsig[2].SetTitle('Recalibrated n#sigma(#pi), 6<p_{T}<8')
nsig[3].SetTitle('Recalibrated n#sigma(#pi), 4<p_{T}<6')
nsig[4].SetTitle('Recalibrated n#sigma(#pi), 2<p_{T}<4')
ptbins = [2.0, 4.0, 6.0, 8.0]
ptbins.reverse()
ecuts = analysis.histos.EventCuts()
for run in runlist:
fill = analysis.getFill(run)
tcuts = analysis.histos.TrackCuts(fill)
pidFit = analysis.histos.pidCalibration[fill]
if run > 7000000:
fname = run6_tree_dir + '/chargedPions_%d.tree.root' % run
else:
fname = run5_tree_dir + '/chargedPions_%d.tree.root' % run
if not os.path.isfile(fname):
fname = run5_tree_dir + '-minbias/chargedPions_%d.tree.root' % run
print fname
f = ROOT.TFile(fname)
for t in f.tree:
ecuts.set(t.event)
if not ecuts.all: continue
for track in t.event.tracks():
tcuts.set(track)
if tcuts.eta and tcuts.dca and tcuts.fit:
## recalibrate nSigmaPion
nSigmaPion = (track.nSigmaPion() - pidFit[0]) / pidFit[1]
nsig[0].Fill(nSigmaPion)
for i,ptcut in enumerate(ptbins):
if track.pt() > ptcut:
nsig[i+1].Fill(nSigmaPion)
break
else:
nsig = [ tfile.Get('nsig_%d' % i) for i in range(5) ]
c = []
outfile = ROOT.TFile('/tmp/pid_pt_dependence.root','recreate')
for h in nsig:
fits = pid_calibration(h)
#fits = pid_calibration(h, useElectrons=(h.GetName() != 'nsig_1'))
[ h.GetListOfFunctions().Add(f) for f in fits[1:] ]
c.append(ROOT.TCanvas())
h.Draw()
h.Write()
c[-1].Print('.png')
raw_input('wait here:')
outfile.Close()
def systematic_uncertainty_run5(charge='plus', key=None):
"""returns final bin-by-bin systematic uncertainties. key can be one of (None)"""
plus_all_meas = [
analysis.DataPoint( y=-5.85e-03, stat=4.57e-03, sys=0.00, x=3.0, xlow=2.0, binwidth=2.0 ),
analysis.DataPoint( y= 2.74e-02, stat=1.15e-02, sys=0.00, x=5.0, xlow=4.0, binwidth=2.0 ),
analysis.DataPoint( y= 3.50e-03, stat=2.24e-02, sys=0.00, x=7.0, xlow=6.0, binwidth=2.0 ),
analysis.DataPoint( y=-1.96e-02, stat=4.05e-02, sys=0.00, x=9.0, xlow=8.0, binwidth=2.0 )
]
minus_all_meas = [
analysis.DataPoint( y=-2.04e-03, stat=4.71e-03, sys=0.00, x=3.0, xlow=2.0, binwidth=2.0 ),
analysis.DataPoint( y=-1.09e-03, stat=1.21e-02, sys=0.00, x=5.0, xlow=4.0, binwidth=2.0 ),
analysis.DataPoint( y=-3.65e-02, stat=2.37e-02, sys=0.00, x=7.0, xlow=6.0, binwidth=2.0 ),
analysis.DataPoint( y= 3.42e-03, stat=4.32e-02, sys=0.00, x=9.0, xlow=8.0, binwidth=2.0 )
]
## math.fabs(nSigmaPion) > 2
plus_all_pid_bg = [
analysis.DataPoint( y= 1.60e-02, stat=7.21e-03, sys=0.00, x=3.0, xlow=2.0, binwidth=2.0 ),
analysis.DataPoint( y=-5.02e-03, stat=1.67e-02, sys=0.00, x=5.0, xlow=4.0, binwidth=2.0 ),
analysis.DataPoint( y=-4.15e-02, stat=3.39e-02, sys=0.00, x=7.0, xlow=6.0, binwidth=2.0 ),
analysis.DataPoint( y=-1.58e-02, stat=6.39e-02, sys=0.00, x=9.0, xlow=8.0, binwidth=2.0 )
]
minus_all_pid_bg = [
analysis.DataPoint( y= 6.63e-03, stat=7.08e-03, sys=0.00, x=3.0, xlow=2.0, binwidth=2.0 ),
analysis.DataPoint( y=-6.25e-03, stat=1.71e-02, sys=0.00, x=5.0, xlow=4.0, binwidth=2.0 ),
analysis.DataPoint( y= 3.39e-02, stat=3.80e-02, sys=0.00, x=7.0, xlow=6.0, binwidth=2.0 ),
analysis.DataPoint( y= 1.69e-02, stat=7.74e-02, sys=0.00, x=9.0, xlow=8.0, binwidth=2.0 )
]
## math.fabs(nSigmaPion) > 2, 2006 data
plus_all_pid_bg_2006 = [
analysis.DataPoint( y= 1.47e-02, stat=5.85e-03, sys=0.00, x=3.0, xlow=2.0, binwidth=2.0 ),
analysis.DataPoint( y=-4.32e-03, stat=1.09e-02, sys=0.00, x=5.0, xlow=4.0, binwidth=2.0 ),
analysis.DataPoint( y= 9.59e-03, stat=1.90e-02, sys=0.00, x=7.0, xlow=6.0, binwidth=2.0 ),
analysis.DataPoint( y=-1.01e-03, stat=3.11e-02, sys=0.00, x=9.0, xlow=8.0, binwidth=2.0 )
]
minus_all_pid_bg_2006 = [
analysis.DataPoint( y= 4.98e-03, stat=6.15e-03, sys=0.00, x=3.0, xlow=2.0, binwidth=2.0 ),
analysis.DataPoint( y= 1.95e-02, stat=1.21e-02, sys=0.00, x=5.0, xlow=4.0, binwidth=2.0 ),
analysis.DataPoint( y=-1.91e-02, stat=2.22e-02, sys=0.00, x=7.0, xlow=6.0, binwidth=2.0 ),
analysis.DataPoint( y=-6.86e-03, stat=4.01e-02, sys=0.00, x=9.0, xlow=8.0, binwidth=2.0 )
]
## asigma from 2006 transverse runs, BJP1 triggers only
plus_asigma = [
analysis.DataPoint( y=-2.26e-03, stat=6.17e-03, sys=0.00, x=3.0, xlow=2.0, binwidth=2.0 ),
analysis.DataPoint( y= 9.57e-03, stat=1.28e-02, sys=0.00, x=5.0, xlow=4.0, binwidth=2.0 ),
analysis.DataPoint( y=-3.81e-02, stat=2.17e-02, sys=0.00, x=7.0, xlow=6.0, binwidth=2.0 ),
analysis.DataPoint( y=-3.15e-02, stat=3.42e-02, sys=0.00, x=9.0, xlow=8.0, binwidth=2.0 )
]
minus_asigma = [
analysis.DataPoint( y= 3.29e-05, stat=6.39e-03, sys=0.00, x=3.0, xlow=2.0, binwidth=2.0 ),
analysis.DataPoint( y=-4.37e-03, stat=1.37e-02, sys=0.00, x=5.0, xlow=4.0, binwidth=2.0 ),
analysis.DataPoint( y=-3.53e-02, stat=2.37e-02, sys=0.00, x=7.0, xlow=6.0, binwidth=2.0 ),
analysis.DataPoint( y=-2.44e-02, stat=3.85e-02, sys=0.00, x=9.0, xlow=8.0, binwidth=2.0 )
]
pid_bg_frac = [0.10, 0.09, 0.10, 0.16]
non_long_frac = [0.018, 0.018, 0.018, 0.018]
## quadrature sum of ZDC/BBC comparison and beam-gas background study
relative_lumi_syst = math.sqrt(4.9e-04 ** 2 + 3.0e-04 ** 2)
## trigger bias
plus_trigger_bias = [0.0, 0.0, 0.0, 0.0]
minus_trigger_bias = [0.0, 0.0, 0.0, 0.0]
if charge == 'plus':
all_meas = plus_all_meas
trigger_bias = plus_trigger_bias
pid_bg = plus_all_pid_bg
## next line combines Run 5 and Run 6 b/g asymmetries
[pbg.add(plus_all_pid_bg_2006[i]) for i,pbg in enumerate(pid_bg)]
asigma = plus_asigma
elif charge == 'minus':
all_meas = minus_all_meas
trigger_bias = minus_trigger_bias
pid_bg = minus_all_pid_bg
## next line combines Run 5 and Run 6 b/g asymmetries
[pbg.add(minus_all_pid_bg_2006[i]) for i,pbg in enumerate(pid_bg)]
asigma = minus_asigma
else:
raise KeyError(charge)
syst = []
for i,datum in enumerate(all_meas):
dpid = math.fabs(pid_bg[i].y - datum.y)
err = math.sqrt(pid_bg[i].stat**2 + datum.stat**2)
## use error on background if measurements are consistent
if (pid_bg[i].stat > dpid): dpid = pid_bg[i].stat
## correct equation is just A_sigma, not A_LL - A_sigma
#dasigma = math.fabs(asigma[i].y - datum.y)
dasigma = math.fabs(asigma[i].y)
err = math.sqrt(asigma[i].stat**2 + datum.stat**2)
## use error on background if measurements are consistent
if (asigma[i].stat > dasigma): dasigma = asigma[i].stat
tot = math.sqrt( trigger_bias[i]**2 + \
(dpid*pid_bg_frac[i])**2 + \
(dasigma*non_long_frac[i])**2 + \
(relative_lumi_syst)**2 \
)
syst.append(tot)
print 'Systematic Uncertainty for charge=%s, pT=%.1f' % (charge, datum.x)
print 'trigger bias = %.2e' % (trigger_bias[i])
print 'pid background = %.3f * %.2e = %.2e' % (pid_bg_frac[i], dpid, pid_bg_frac[i]*dpid)
print 'non-long beam = %.3f * %.2e = %.2e' % (non_long_frac[i], dasigma, non_long_frac[i]*dasigma)
print 'relative lumi = %.2e' % relative_lumi_syst
print 'Total: %e' % tot
print '----------------------------------------------------'
return syst
def dis2008_run6_projections():
"""using real 2006 data, plot projected statistical significance of Run 6 inclusive"""
ROOT.gStyle.SetOptDate(0)
asym_plus = analysis.AsymmetryGenerator('asym_plus')
asym_minus = analysis.AsymmetryGenerator('asym_minus')
runlist = analysis.long2_run6
scalar_path = os.environ['STAR'] + '/StRoot/StSpinPool/StTamuRelLum/inputs/run6.txt'
scalars = analysis.ScalarCounts(scalar_path)
polarizations = analysis.Polarizations.Final
from analysis.asym import theoryCurves
plusGraphs = [
theoryCurves(analysis.asym.werner_plus_dss_cteqm5_std, analysis.xsec.werner_plus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_plus_dss_cteqm5_zero, analysis.xsec.werner_plus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_plus_dss_cteqm5_max, analysis.xsec.werner_plus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_plus_dss_cteqm5_min, analysis.xsec.werner_plus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_plus_dss_cteqm5_gsc, analysis.xsec.werner_plus_dss_cteqm5_pt).getGraph()
]
minusGraphs = [
theoryCurves(analysis.asym.werner_minus_dss_cteqm5_std, analysis.xsec.werner_minus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_minus_dss_cteqm5_zero, analysis.xsec.werner_minus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_minus_dss_cteqm5_max, analysis.xsec.werner_minus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_minus_dss_cteqm5_min, analysis.xsec.werner_minus_dss_cteqm5_pt).getGraph(),
theoryCurves(analysis.asym.werner_minus_dss_cteqm5_gsc, analysis.xsec.werner_minus_dss_cteqm5_pt).getGraph()
]
## generate the asymmetries
allFiles = glob(run6_hist_dir + '/chargedPions_*.hist.root')
for fname in allFiles:
run = analysis.getRun(fname)
if runlist is None or run in runlist:
print fname, run
tfile = ROOT.TFile(fname)
mgr = analysis.HistogramManager(tfile,['pt'])
try:
bin6 = scalars[str(run) + '-5-6']
bin7 = scalars[str(run) + '-5-7']
bin8 = scalars[str(run) + '-5-8']
bin9 = scalars[str(run) + '-5-9']
except KeyError:
print run, 'is not in the scalars database'
continue
uu = bin6.uu + bin7.uu + bin8.uu + bin9.uu
ud = bin6.ud + bin7.ud + bin8.ud + bin9.ud
du = bin6.du + bin7.du + bin8.du + bin9.du
dd = bin6.dd + bin7.dd + bin8.dd + bin9.dd
try:
pol = polarizations[bin7.fill]
except KeyError:
print bin7.fill, 'has no final polarization values'
asym_plus.FillFromHistogramManager(mgr, 'jetpatch', 1, uu, ud, du, dd, pol.py, pol.pb)
asym_minus.FillFromHistogramManager(mgr, 'jetpatch', -1, uu, ud, du, dd, pol.py, pol.pb)
tfile.Close()
## fun with graphics
h1 = asym_plus.GetAsymmetry('ll')
[ h1.SetBinContent(i+1, 0.0) for i in range(4) ]
g1 = ROOT.TGraphErrors(h1)
h2 = asym_minus.GetAsymmetry('ll')
[ h2.SetBinContent(i+1, 0.0) for i in range(4) ]
g2 = ROOT.TGraphErrors(h2)
for grList in (plusGraphs, minusGraphs):
#grList[1].SetLineStyle(3)
grList[1].SetLineColor(ROOT.kBlue)
#grList[2].SetLineStyle(4)
grList[2].SetLineColor(ROOT.kRed)
#grList[3].SetLineStyle(2)
grList[3].SetLineColor(ROOT.kGreen)
#grList[4].SetLineStyle(5)
grList[4].SetLineColor(ROOT.kMagenta)
for gr in grList:
gr.SetLineWidth(3)
## ignore bin width errors
for gr in (g1,g2):
for point in range(gr.GetN()):
gr.SetPointError(point, 0.0, gr.GetErrorY(point))
line = ROOT.TLine(2.0, 0.0, 10.0, 0.0)
line.SetLineStyle(2)
latex = ROOT.TLatex()
leg = ROOT.TLegend(0.13, 0.65, 0.35, 0.88)
leg.SetFillStyle(0)
leg.SetBorderSize(0)
leg.AddEntry(plusGraphs[0],' GRSV-STD', 'l')
leg.AddEntry(plusGraphs[1],' #Delta G = 0', 'l')
leg.AddEntry(plusGraphs[2],' #Delta G = G', 'l')
leg.AddEntry(plusGraphs[3],' #Delta G = -G', 'l')
leg.AddEntry(plusGraphs[4],' GS Set C', 'l')
bg = ROOT.TH1D(h1)
bg.Reset()
bg.SetYTitle(' A_{LL}')
bg.GetYaxis().SetRangeUser(-0.11, 0.11)
## pi-plus
c1 = ROOT.TCanvas('c1','A_{LL} for #pi^{+}', 1060, 800)
bg.SetXTitle('#pi^{+} P_{T} (GeV/c)')
bg.DrawCopy()
g1.SetMarkerSize(0.9);
g1.SetMarkerStyle(21)
g1.Draw('p')
[ g.Draw('l') for g in plusGraphs ]
#systGraph['plus'].SetLineColor(1)
#systGraph['plus'].SetFillColor(15)
#systGraph['plus'].Draw('fl')
line.Draw('same')
leg.Draw('p')
latex.DrawLatex(2.3,0.12," #vec{p} + #vec{p} #rightarrow #pi^{+} + X at #sqrt{s}=200 GeV \
-1< #eta^{#pi}< 1 ")
latex.DrawLatex(2.6,-0.07,"2006 STAR Projections");
## pi-minus
c2 = ROOT.TCanvas('c2','A_{LL} for #pi^{-}', 1060, 800)
bg.SetXTitle('#pi^{-} P_{T} (GeV/c)')
bg.DrawCopy()
g2.SetMarkerSize(0.9);
g2.SetMarkerStyle(20)
g2.Draw('p')
[ g.Draw('l') for g in minusGraphs ]
#systGraph['minus'].SetLineColor(1)
#systGraph['minus'].SetFillColor(15)
#systGraph['minus'].Draw('fl')
line.Draw('same')
leg.Draw('p')
latex.DrawLatex(2.3,0.12," #vec{p} + #vec{p} #rightarrow #pi^{-} + X at #sqrt{s}=200 GeV \
-1< #eta^{#pi}< 1 ")
latex.DrawLatex(2.6,-0.07,"2006 STAR Projections")
raw_input('wait here:')
c1.Print('.gif')
c2.Print('.gif')
def z_slope_run5(trig='jetpatch', runlist=None, charge=0):
"""docstring for z_slope_run5"""
z_jet = None
z_away_jet = None
keepMeOpen = []
allFiles = glob(run5_hist_dir + '/chargedPions_*.hist.root')
for fname in allFiles[:]:
run = analysis.getRun(fname)
if runlist is None or run in runlist:
print fname, run
tfile = ROOT.TFile(fname)
mgr = analysis.HistogramManager(tfile,['z_jet', 'z_away_jet'],(trig,))
if z_jet is None:
z_jet = mgr['anyspin'][trig].trackHistograms(charge)['z_jet'].Clone()
z_away_jet = mgr['anyspin'][trig].trackHistograms(charge)['z_away_jet'].Clone()
keepMeOpen.append(tfile)
else:
z_jet.Add(mgr['anyspin'][trig].trackHistograms(charge)['z_jet'])
z_away_jet.Add(mgr['anyspin'][trig].trackHistograms(charge)['z_away_jet'])
z_jet_pt = [z_jet.ProjectionY('z_jet_pt_%d' % (i+1,), i+1,i+1) \
for i in range(z_jet.GetNbinsX())]
z_away_jet_pt = [z_away_jet.ProjectionY('z_away_jet_pt_%d' % (i+1,), i+1,i+1) \
for i in range(z_away_jet.GetNbinsX())]
ps = ROOT.TPostScript('zslopes_%s.ps' % trig)
c = ROOT.TCanvas('c','',100,100,600,800)
c.Divide(3,4)
if trig =='96233':
threshold = 6.4
elif trig == '96221':
threshold = 4.5
else:
threshold = 0
f = ROOT.gROOT.GetFunction('expo')
f.SetLineWidth(2)
f.SetLineStyle(2)
for i,g in enumerate(z_jet_pt):
title = '%.1f < p_{T,jet} < %.1f ' % (z_jet.GetBinLowEdge(i+1), z_jet.GetBinLowEdge(i+2))
pad = c.cd(i+1)
pad.SetTitle(title)
ROOT.gPad.SetLogy()
## anything lower is restricted by jet pT cut
fit_min = 2.0 / z_jet.GetBinLowEdge(i+1)
## kinda arbitrary, just ensuring enough energy in BTOW to fire trigger
fit_max = (z_jet.GetBinLowEdge(i+1) - threshold)/z_jet.GetBinLowEdge(i+1)
if fit_max - fit_min < 0: fit_max += 0.5
z_jet_pt[i].SetTitle(title)
z_jet_pt[i].GetXaxis().SetTitle('z')
z_jet_pt[i].Fit('expo','r', '', fit_min, fit_max)
writer = ROOT.TLatex()
writer.SetNDC()
writer.DrawLatex(0.42, 0.95, 'slope %.2f #chi^{2}: %.1f/%d' % \
(f.GetParameter(1), f.GetChisquare(), f.GetNDF()) )
writer2 = ROOT.TLatex()
writer2.SetNDC()
writer2.SetTextColor(ROOT.kRed)
z_away_jet_pt[i].SetMarkerColor(ROOT.kRed)
z_away_jet_pt[i].SetLineColor(ROOT.kRed)
z_away_jet_pt[i].Fit('expo', 'r', 'same', fit_min, fit_max)
writer2.DrawLatex(0.42, 0.85, 'slope %.2f #chi^{2}: %.1f/%d' % \
(f.GetParameter(1), f.GetChisquare(), f.GetNDF()) )
raw_input('wait here:')
ps.Close()
def away_side_asymmetries_run6(runlist):
asym_plus = analysis.AsymmetryGenerator('asym_plus', key='away_lead_pt')
asym_minus = analysis.AsymmetryGenerator('asym_minus', key='away_lead_pt')
scalar_path = os.environ['STAR'] + '/StRoot/StSpinPool/StTamuRelLum/inputs/run6.txt'
scalars = analysis.ScalarCounts(scalar_path)
polarizations = analysis.Polarizations.Final
## generate the asymmetries
allFiles = glob(run6_hist_dir + '/chargedPions_*.hist.root')[:]
for fname in allFiles:
run = analysis.getRun(fname)
if runlist is None or run in runlist:
print fname, run
tfile = ROOT.TFile(fname)
mgr = analysis.HistogramManager(tfile,['away_lead_pt'])
try:
bin6 = scalars.get(str(run) + '-5-6') or scalars[str(run) + '-6-6']
bin7 = scalars.get(str(run) + '-5-7') or scalars[str(run) + '-6-7']
bin8 = scalars.get(str(run) + '-5-8') or scalars[str(run) + '-6-8']
bin9 = scalars.get(str(run) + '-5-9') or scalars[str(run) + '-6-9']
except KeyError:
print run, 'is not in the scalars database'
continue
uu = bin6.uu + bin7.uu + bin8.uu + bin9.uu
ud = bin6.ud + bin7.ud + bin8.ud + bin9.ud
du = bin6.du + bin7.du + bin8.du + bin9.du
dd = bin6.dd + bin7.dd + bin8.dd + bin9.dd
try:
pol = polarizations[bin7.fill]
except KeyError:
print bin7.fill, 'has no final polarization values'
continue
asym_plus.FillFromHistogramManager(mgr, 'jetpatch', 1, uu, ud, du,
dd, pol.py, pol.pb)
asym_minus.FillFromHistogramManager(mgr, 'jetpatch', -1, uu, ud, du,
dd, pol.py, pol.pb)
tfile.Close()
line = ROOT.TLine(2.0, 0.0, 10.0, 0.0)
line.SetLineStyle(2)
c1 = ROOT.TCanvas()
h1 = asym_plus.GetAsymmetry('ll')
h1.GetYaxis().SetRangeUser(-0.1, 0.1)
h1.SetTitle('Run 6 away-side A_{LL} BJP1 #pi^{+}')
h1.SetXTitle('p_{T}')
h1.SetMarkerStyle(21)
h1.Draw('e1')
line.Draw('same')
c2 = ROOT.TCanvas()
h2 = asym_minus.GetAsymmetry('ll')
h2.GetYaxis().SetRangeUser(-0.1, 0.1)
h2.SetTitle('Run 6 away-side A_{LL} BJP1 #pi^{-}')
h2.SetXTitle('p_{T}')
h2.SetMarkerStyle(20)
h2.Draw('e1')
line.Draw('same')
raw_input('wait here:')
def ssa_by_run(asym_key='ly', runlist=analysis.final_runlist_run5, variable='pt', year=2006, bins=[1,2.0,10.0]):
"""generates canvas for asym_key (ly,lb,ls,us). Each data point is the SSA for a fill"""
asym_plus = {}
asym_minus = {}
# if variable == 'jet_pt':
# bins = [1,5.0,50.0]
# else:
# bins = [1,2.0,10.0]
for f in runlist:
asym_plus[f] = analysis.AsymmetryGenerator(name='R%s_plus' % f, bins=bins, key=variable, useR123=True)
asym_minus[f] = analysis.AsymmetryGenerator(name='R%s_minus' % f, bins=bins, key=variable, useR123=True)
if year == 2005:
scalar_path = os.environ['STAR'] + '/StRoot/StSpinPool/StTamuRelLum/inputs/run5.txt'
allFiles = glob(run5_hist_dir + '/chargedPions_*.hist.root')
else:
scalar_path = os.environ['STAR'] + '/StRoot/StSpinPool/StTamuRelLum/inputs/run6.txt'
allFiles = glob(run6_hist_dir + '/chargedPions_*.hist.root')
scalars = analysis.ScalarCounts(scalar_path)
polarizations = analysis.Polarizations.Final
for fname in allFiles:
run = analysis.getRun(fname)
if runlist is None or run in runlist:
print fname, run
tfile = ROOT.TFile(fname)
mgr = analysis.HistogramManager(tfile,(variable,))
try:
bin6 = scalars.get(str(run) + '-5-6') or scalars[str(run) + '-6-6']
bin7 = scalars.get(str(run) + '-5-7') or scalars[str(run) + '-6-7']
bin8 = scalars.get(str(run) + '-5-8') or scalars[str(run) + '-6-8']
bin9 = scalars.get(str(run) + '-5-9') or scalars[str(run) + '-6-9']
except KeyError:
print run, 'is not in the scalars database'
continue
uu = bin7.uu + bin8.uu + bin9.uu
ud = bin7.ud + bin8.ud + bin9.ud
du = bin7.du + bin8.du + bin9.du
dd = bin7.dd + bin8.dd + bin9.dd
if year == 2006:
uu += bin6.uu
ud += bin6.ud
du += bin6.du
dd += bin6.dd
try:
pol = polarizations[bin7.fill]
except KeyError:
print bin7.fill, 'has no final polarization values'
continue
asym_plus[run].FillFromHistogramManager(mgr, 'jetpatch', 1, uu, ud, du, dd, pol.py, pol.pb)
asym_minus[run].FillFromHistogramManager(mgr, 'jetpatch', -1, uu, ud, du, dd, pol.py, pol.pb)
tfile.Close()
title = {'ll': 'Double Spin', 'ly':'Yellow Beam', 'lb':'Blue Beam', 'ls':'Like-Sign', 'us':'Unlike-Sign'}
final_hist_plus = ROOT.TH1D('final_hist_plus','#pi^{+} %s SSA' % title[asym_key], len(runlist), 0.5, len(runlist)+0.5)
final_hist_minus = ROOT.TH1D('final_hist_minus','#pi^{-} %s SSA' % title[asym_key], len(runlist), 0.5, len(runlist)+0.5)
sigma_plus = ROOT.TH1D('sigma_plus', '#pi^{+} %s SSA Deviation Per Run' % \
title[asym_key], 50, -7.0, 7.0)
sigma_minus = ROOT.TH1D('sigma_minus', '#pi^{-} %s SSA Deviation Per Run' % \
title[asym_key], 50, -7.0, 7.0)
if variable == 'jet_pt':
[h.SetTitle('Jet %s SSA' % title[asym_key]) for h in (final_hist_minus,final_hist_plus)]
marker_color = {'ll': ROOT.kBlack, 'ly':ROOT.kYellow, 'lb':ROOT.kBlue, 'ls':ROOT.kRed, 'us':ROOT.kBlack}
for h in (final_hist_plus, final_hist_minus):
h.SetMarkerColor( marker_color[asym_key] )
h.GetYaxis().SetRangeUser(-0.2, 0.2)
h.SetXTitle('run index')
final_hist_plus.SetMarkerStyle(21)
final_hist_minus.SetMarkerStyle(20)
for h in (sigma_plus, sigma_minus):
h.SetXTitle('n#sigma')
for i,f in enumerate(runlist):
hplus = asym_plus[f].GetAsymmetry(asym_key)
try:
sigma_plus.Fill(hplus.GetBinContent(1)/hplus.GetBinError(1))
final_hist_plus.SetBinContent( i+1, hplus.GetBinContent(1) )
final_hist_plus.SetBinError( i+1, hplus.GetBinError(1) )
print '%d % .4f % .4f % .4f' % (f, hplus.GetBinContent(1), hplus.GetBinError(1), hplus.GetBinContent(1)/hplus.GetBinError(1))
except ZeroDivisionError:
print 'ACK', f, hplus.GetBinContent(1), hplus.GetBinError(1)
hplus.Delete()
hminus = asym_minus[f].GetAsymmetry(asym_key)
try:
sigma_minus.Fill(hminus.GetBinContent(1)/hminus.GetBinError(1))
final_hist_minus.SetBinContent( i+1, hminus.GetBinContent(1) )
final_hist_minus.SetBinError( i+1, hminus.GetBinError(1) )
except ZeroDivisionError:
print 'ACK', f, hminus.GetBinContent(1), hminus.GetBinError(1)
hminus.Delete()
ROOT.gStyle.SetOptStat('oume')
ROOT.gStyle.SetOptFit(111)
c1 = ROOT.TCanvas()
sigma_minus.Fit('gaus')
c2 = ROOT.TCanvas()
sigma_plus.Fit('gaus')
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetErrorX(0.0)
ROOT.gStyle.SetOptFit(111)
cp = ROOT.TCanvas('%s_ssa_run_plus' % asym_key)
final_hist_plus.Draw('e1')
final_hist_plus.Fit('pol0')
print ROOT.gROOT.GetFunction('pol0').GetProb()
cm = ROOT.TCanvas('%s_ssa_run_minus' % asym_key)
final_hist_minus.Draw('e1')
final_hist_minus.Fit('pol0')
print ROOT.gROOT.GetFunction('pol0').GetProb()
raw_input('wait here')
cp.Print('.png')
cm.Print('.png')
def datamc(simuFile, dataDir, runlist, trigger):
fsimu = ROOT.TFile(simuFile)
simu = analysis.HistogramManager(fsimu)
event_keys = ['vz', 'vzBBC', 'jet_pt', 'lead_neutral', 'inclusive_jet_mult',
'dijet_mult']
track_keys = ['pt', 'eta', 'phi', 'nHitsFit', 'dEdx', 'dcaG', 'nSigmaPion',
'pt_near', 'pt_away', 'pt_bg',
'away_mult', 'near_mult', 'away_lead_pt', 'near_lead_pt',
'lead_matched', 'lead_cutfail', 'z_away2', 'z_away3', 'z_away4',
'away2_eta', 'away2_nHitsFit', 'away2_dcaG', 'vz', 'distortedPt']
log_scale = ('lead_neutral', 'inclusive_jet_mult', 'dijet_mult', 'pt', 'dcaG',
'pt_near', 'pt_away', 'pt_bg', 'away_mult', 'near_mult', 'away_lead_pt',
'near_lead_pt', 'lead_matched', 'lead_cutfail', 'z_away2', 'z_away3', 'z_away4',
'away2_dcaG', 'jet_pt', 'distortedPt')
## normalize based on integrated luminosity in mb
## lumi for long2_run6, 2008-08-28: 5.43 pb^-1
norm = 5.43E+09
## lumi for final_runlist_run5, 2008-09-04: 2.11 pb^-1
# norm = 2.11E+09
print 'normalization factor for simulations: %.2E' % norm
## scale normalization down to PID efficiency (but neglecting contam)
# track_norm = norm * 0.82
## if I include p/k/e contamination this factor is approximately
track_norm = norm * 0.93
rebin = ('vz', 'vzBBC', 'jet_pt')
keepme = []
for key in event_keys:
c = analysis.graphics.canvas1e(key)
cpad = c.cd(1)
epad = c.cd(2)
if key in log_scale: cpad.SetLogy()
d = analysis.hadd_interactive(dataDir, runlist, trigger, 'anyspin', None, key)
if d.GetTitle() == '': d.SetTitle(key)
s = simu.anyspin[trigger][key]
s.Scale(norm)
s.SetLineColor(ROOT.kRed)
s.SetMarkerColor(ROOT.kRed)
if key in rebin:
d.Rebin()
s.Rebin()
cpad.cd()
d.Draw('e')
s.Draw('hist same')
## time for the ratios
line = ROOT.TLine(d.GetBinLowEdge(1), 0.0, \
d.GetBinLowEdge(d.GetNbinsX()+1), 0.0)
line.SetLineStyle(4)
epad.cd()
ratio = d.Clone()
ratio.Add(s, -1.0)
ratio.Divide(d)
ratio.SetTitle('')
ratio.GetYaxis().SetTitle('(data-simu)/data')
ratio.GetYaxis().SetRangeUser(-1.0, 1.0)
ratio.Draw()
line.Draw('same')
c.Update()
keepme.extend([c,d,s])
for key in track_keys:
c = analysis.graphics.canvas3('track ' + key)
mpad = c.cd(1)
ppad = c.cd(2)
empad = c.cd(3)
eppad = c.cd(4)
c2 = analysis.graphics.canvas1e('track sum ' + key)
pad = c2.cd(1)
epad = c2.cd(2)
pads = (mpad, ppad, pad)
if key in log_scale:
[p.SetLogy() for p in pads]
dkey = key=='distortedPt' and 'pt' or key
da = analysis.hadd_interactive(dataDir, runlist, trigger, 'anyspin', 'sum', dkey)
dm = analysis.hadd_interactive(dataDir, runlist, trigger, 'anyspin', 'minus', dkey)
dp = analysis.hadd_interactive(dataDir, runlist, trigger, 'anyspin', 'plus', dkey)
d2 = (da, dm, dp)
da.SetTitle(key + ' for #pi^{-} + #pi^{+}')
dm.SetTitle(key + ' for #pi^{-}')
dp.SetTitle(key + ' for #pi^{+}')
sa = simu.anyspin[trigger].tracks_sum[key]
sm = simu.anyspin[trigger].tracks_minus[key]
sp = simu.anyspin[trigger].tracks_plus[key]
s2 = (sa, sm, sp)
for s in s2:
s.SetLineColor(ROOT.kRed)
s.SetMarkerColor(ROOT.kRed)
s.Scale(track_norm)
if key in rebin:
[h.Rebin() for h in d2+s2]
pad.cd()
da.Draw('e')
sa.Draw('hist same')
mpad.cd()
dm.Draw('e')
sm.Draw('hist same')
ppad.cd()
dp.Draw('e')
sp.Draw('hist same')
## time for the ratios
line = ROOT.TLine(dm.GetBinLowEdge(1), 0.0, \
dm.GetBinLowEdge(dm.GetNbinsX()+1), 0.0)
line.SetLineStyle(4)
epad.cd()
ratio = da.Clone()
ratio.Add(sa, -1.0)
ratio.Divide(da)
ratio.Draw()
line.Draw('same')
empad.cd()
mratio = dm.Clone()
mratio.Add(sm, -1.0)
mratio.Divide(dm)
mratio.Draw()
line.Draw('same')
eppad.cd()
pratio = dp.Clone()
pratio.Add(sp, -1.0)
pratio.Divide(dp)
pratio.Draw()
line.Draw('same')
ratios = (ratio, mratio, pratio)
for h in ratios:
h.SetTitle('')
h.GetYaxis().SetTitle('(data-simu)/data')
h.GetYaxis().SetRangeUser(-1.0, 1.0)
c.Update()
c2.Update()
keepme.extend([c, da, dm, dp, sa, sm, sp, ratio, mratio, pratio])
save = raw_input('save these histograms? (y/N): ')
if save == 'y':
for item in keepme:
if item.ClassName() == 'TCanvas':
item.Print(item.GetTitle().replace(' ', '_') + '.png')
[o.Delete() for o in keepme]
def mcasym(fname, trigger='jetpatch', keys=None):
f = ROOT.TFile(fname)
keys = keys or ['STD','MAX','MIN','ZERO','GS_NLOC']
print keys
mgr = analysis.HistogramManager(f, keys = keys+['denom'])
keepme = []
line = ROOT.TLine(0,0,1,0)
line.SetLineStyle(2)
color = {
'STD': ROOT.kBlack,
'MAX': ROOT.kRed,
'MIN': ROOT.kGreen,
'ZERO': ROOT.kBlue,
'GS_NLOC': ROOT.kMagenta
}
c = analysis.graphics.canvas2()
for i,key in enumerate(keys):
opt = i>0 and 'e2 same' or 'e2'
c.cd(1)
line.Draw()
minus = mgr.anyspin[trigger].tracks_minus[key].Clone()
# minus.GetXaxis().SetRangeUser(0.1, 0.8)
minus.GetYaxis().SetRangeUser(-0.05,0.05)
minus.SetLineColor(color[key])
minus.SetFillColor(color[key])
minus.SetTitle('#pi^{-}')
minus.GetXaxis().SetTitle('z')
minus.Draw(opt)
c.cd(2)
line.Draw()
plus = mgr.anyspin[trigger].tracks_plus[key].Clone()
# plus.GetXaxis().SetRangeUser(0.1, 0.8)
plus.GetYaxis().SetRangeUser(-0.05,0.05)
plus.SetLineColor(color[key])
plus.SetFillColor(color[key])
plus.SetTitle('#pi^{+}')
plus.GetXaxis().SetTitle('z')
plus.Draw(opt)
keepme.extend([c,minus,plus])
raw_input('wait:')
| 2.125 | 2 |
tensorflow_federated/python/tests/backend_test.py | ddayzzz/federated | 0 | 12761382 | # Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_federated as tff
def get_all_execution_contexts():
return [
('native_local', tff.backends.native.create_local_execution_context()),
('native_sizing', tff.backends.native.create_sizing_execution_context()),
('native_debug',
tff.backends.native.create_thread_debugging_execution_context()),
]
def with_contexts(*args):
"""A decorator for creating tests parameterized by context.
Note: To use this decorator your test is required to inherit from
`parameterized.TestCase`.
The decorator can be called without arguments:
```
@with_contexts
def foo(self):
...
```
or with arguments:
```
@with_contexts(
('label', executor),
...
)
def foo(self):
...
```
If the decorator is specified without arguments or is called with no
arguments, the default contexts used are those returned by
`get_all_execution_contexts`.
If the decorator is called with arguments the arguments must be in a form that
is accpeted by `parameterized.named_parameters`.
Args:
*args: Either a test function to be decorated or named executors for the
decorated method, either a single iterable, or a list of tuples or dicts.
Returns:
A test generator to be handled by `parameterized.TestGeneratorMetaclass`.
"""
def decorator(fn, *named_contexts):
if not named_contexts:
named_contexts = get_all_execution_contexts()
@parameterized.named_parameters(*named_contexts)
def wrapped_fn(self, context):
context_stack = tff.framework.get_context_stack()
with context_stack.install(context):
fn(self)
return wrapped_fn
if len(args) == 1 and callable(args[0]):
return decorator(args[0])
else:
return lambda fn: decorator(fn, *args)
class ExecutionContextsTest(parameterized.TestCase):
@with_contexts
def test_federated_value(self):
@tff.federated_computation
def foo(x):
return tff.federated_value(x, tff.SERVER)
result = foo(10)
self.assertIsNotNone(result)
@with_contexts
def test_federated_zip(self):
@tff.federated_computation([tff.FederatedType(tf.int32, tff.CLIENTS)] * 2)
def foo(x):
return tff.federated_zip(x)
result = foo([[1, 2], [3, 4]])
self.assertIsNotNone(result)
@with_contexts
def test_federated_zip_with_twenty_elements(self):
# This test will fail if execution scales factorially with number of
# elements zipped.
num_element = 20
num_clients = 2
@tff.federated_computation([tff.FederatedType(tf.int32, tff.CLIENTS)] *
num_element)
def foo(x):
return tff.federated_zip(x)
value = [list(range(num_clients))] * num_element
result = foo(value)
self.assertIsNotNone(result)
@with_contexts
def test_identity(self):
@tff.federated_computation
def foo(x):
return x
result = foo(10)
self.assertIsNotNone(result)
if __name__ == '__main__':
tff.test.set_no_default_context()
absltest.main()
| 2.078125 | 2 |
hackerearth/Algorithms/Will she accept him/test.py | ATrain951/01.python-com_Qproject | 4 | 12761383 | <gh_stars>1-10
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'4',
'kalyan nobodyintheworld',
'rahul allgirlsallhunontheplanet',
'physicsguy nobodynobodylikesitbetter',
'lal llaggyel',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'We are only friends\n' +
'Love you too\n' +
'We are only friends\n' +
'Love you too\n')
if __name__ == '__main__':
unittest.main()
| 2.765625 | 3 |
scripts/kegg_parser.py | Midnighter/component-contribution | 1 | 12761384 | #!/usr/bin/python
import logging
import re
from itertools import imap
import gzip
from types import StringType
class KeggParsingException(Exception):
pass
def NormalizeNames(name_str):
"""Normalize a KEGG-style list of names."""
all_names = name_str.replace('\t', ' ').split(';')
return [n.strip() for n in all_names]
def NormalizeReactions(reactions_str, verbose=False):
"""Normalize a KEGG-style list of reaction IDs.
NOTE(flamholz): Some enzymes have lists of reactions as such:
"RXXXXX > RXXXXY RYYYZZ"
where RXXXXX is a general reaction and the others have specific
substrates. We may want special parsing for this, but for now
we don't have it.
Args:
reactions_str: the string containing a list of reactions.
verbose: whether to log lots of warnings.
Returns:
A list of KEGG reaction IDs parsed as integers.
"""
if not reactions_str:
return []
l = []
pattern = re.compile('.*(R\d{5}).*')
for r in reactions_str.split():
m = pattern.match(r)
if m:
r_str = m.groups()[0]
r_int = int(r_str[1:])
l.append(r_int)
elif verbose:
logging.warning('Failed to parse reaction ID %s', r)
logging.info('Full reaction string: %s', reactions_str)
return l
def NormalizeOrganisms(organisms_str):
"""Normalize a KEGG-style list of organism names."""
return organisms_str.split('\t')
def ParseOrthologyMapping(orthology_str):
"""Parses the orthology string to a mapping.
Args:
orthology_str: the orthology string in the KEGG file.
Returns:
A mapping from orthology IDs to names.
"""
splitted = orthology_str.split('\t')
pattern = re.compile('^(K\d{5}) (.*)$')
d = {}
for match in imap(pattern.match, splitted):
if not match:
continue
groups = match.groups()
orthology_id = groups[0]
int_orthology_id = int(orthology_id[1:])
name = groups[1]
d[int_orthology_id] = name
return d
def ParseOrganismToGeneMapping(genes_str):
"""Parses the genes string to a mapping.
TODO(flamholz): Keep open reading frame data as well.
Args:
genes_str: the orthology string in the KEGG file.
Returns:
A mapping from organisms to gene names.
"""
splitted = genes_str.split('\t')
pattern = re.compile('^([A-Z]{3}): (.*)$')
d = {}
for match in imap(pattern.match, splitted):
if not match:
continue
groups = match.groups()
organism_id = groups[0]
gene_ids_w_names = re.split('\s', groups[1])
gene_ids = [s.split('(')[0] for s in gene_ids_w_names]
if gene_ids:
d[organism_id] = gene_ids
return d
class EntryDictWrapper(dict):
def GetStringField(self, field_name, default_value=None):
if field_name not in self:
if default_value is not None:
return default_value
raise Exception("Missing obligatory string field: " + field_name)
return self[field_name]
def GetStringListField(self, field_name, default_value=None):
val = self.GetStringField(field_name, default_value=False)
if val == False:
if default_value == None:
raise Exception("Missing obligatory string-list field: " + field_name)
return default_value
return val.split()
def GetBoolField(self, field_name, default_value=True):
val = self.GetStringField(field_name, default_value=False)
if val == False:
if default_value == None:
raise Exception("Missing obligatory boolean field: " + field_name)
return default_value
elif val.upper() == 'TRUE':
return True
elif val.upper() == 'FALSE':
return False
def GetFloatField(self, field_name, default_value=None):
val = self.GetStringField(field_name, default_value=False)
if val == False:
if default_value == None:
raise Exception("Missing obligatory float field: " + field_name)
return default_value
return float(val)
def GetVFloatField(self, field_name, default_value=()):
val = self.GetStringField(field_name, default_value=False)
if val == False:
if default_value == None:
raise Exception("Missing obligatory vector-float field: " + field_name)
return default_value
return [float(x) for x in val.split()]
class ParsedKeggFile(dict):
"""A class encapsulating a parsed KEGG file."""
def __init__(self):
"""Initialize the ParsedKeggFile object."""
self.ordered_entries = []
pass
def _AddEntry(self, entry, fields):
"""Protected helper for adding an entry from the file.
Args:
entry: the entry key.
fields: the fields for the entry.
"""
if entry in self:
logging.warning('Overwriting existing entry for %s', entry)
else:
self.ordered_entries.append(entry)
self[entry] = EntryDictWrapper(fields)
def entries(self):
return self.ordered_entries
@staticmethod
def FromKeggFile(file):
"""Parses a file from KEGG.
Args:
filename: the file handle or name of the file to parse.
Returns:
A dictionary mapping entry names to fields.
"""
if type(file) == StringType:
if file[-3:] == '.gz':
kegg_file = gzip.open(file)
else:
kegg_file = open(file, 'r')
else:
kegg_file = file
return ParsedKeggFile._FromKeggFileHandle(kegg_file)
@staticmethod
def _FromKeggFileHandle(kegg_file):
"""Parses a file from KEGG. Uses a file handle directly.
For testing.
Args:
filename: the name of the file to parse.
Returns:
A dictionary mapping entry names to fields.
"""
parsed_file = ParsedKeggFile()
line_counter = 0
line = kegg_file.readline()
field_map = {}
field = None
while line:
if line[0:3] == '///':
if field_map:
entry = re.split('\s\s+', field_map['ENTRY'])[0].strip()
parsed_file._AddEntry(entry, field_map)
field = None
field_map = {}
elif line[0] in [' ', '\t']:
if field == None:
raise KeggParsingException('First line starts with a whitespace (space/tab)')
value = line.strip()
field_map[field] = field_map[field] + "\t" + value
else:
try:
field, value = line.split(None, 1)
except ValueError:
raise KeggParsingException('ERROR: line %d cannot be split: %s' % (line_counter, line))
field_map[field] = value
line = kegg_file.readline()
line_counter += 1
if 'ENTRY' in field_map:
entry = re.split('\s\s+', field_map['ENTRY'])[0].strip()
parsed_file._AddEntry(entry, field_map)
kegg_file.close()
return parsed_file
@staticmethod
def FromKeggAPI(s):
"""Parses a file from KEGG. The result string from the KEGG API.
For testing.
Args:
s: the string that is the result of serv.bget(...) using the KEGG API
Returns:
A dictionary mapping entry names to fields.
"""
parsed_file = ParsedKeggFile()
curr_field = ""
field_map = {}
for line in s.split('\n'):
field = line[0:12].strip()
value = line[12:].strip()
if field[:3] == "///":
entry = re.split('\s\s+', field_map['ENTRY'])[0]
parsed_file._AddEntry(entry, field_map)
field_map = {}
else:
if field != "":
curr_field = field
if curr_field in field_map:
field_map[curr_field] = field_map[curr_field] + "\t" + value
else:
field_map[curr_field] = value
if 'ENTRY' in field_map:
entry = re.split('\s\s+', field_map['ENTRY'])[0]
parsed_file._AddEntry(entry, field_map)
return parsed_file
@staticmethod
def ParseKeggReactionLine(line):
rexp = '([a-zA-Z0-9,_]+)\s+([C\s\+\d\.]+)\s+(<?[-=]>?)\s+([C\s\+\d\.]+)(.*)'
try:
rid, left_clause, dir_clause, right_clause, remainder = re.findall(rexp, line)[0]
except Exception, e:
raise Exception(str(e) + ': ' + line)
if dir_clause in ['=>', '->', '<=>', '<->', '=', '-']:
reaction = left_clause + " <=> " + right_clause
elif dir_clause in ['<=', '<-']:
reaction = right_clause + " <=> " + left_clause
else:
raise ValueError("unclear reaction direction symbol: " + dir_clause)
flux = 1
if remainder != "":
for (f) in re.findall('\(x([0-9\.\-\s]+)\)', remainder):
flux = float(f)
return reaction, rid, flux
@staticmethod
def ParseReactionModule(field_map):
rids = []
fluxes = []
reactions = []
for line in field_map["REACTION"].split('\t'):
if line.strip() == '':
continue
reaction, rid, flux = ParsedKeggFile.ParseKeggReactionLine(line)
reactions.append(reaction)
rids.append(rid)
fluxes.append(flux)
return rids, fluxes, reactions
@staticmethod
def ParseBoundModule(field_map):
bounds = {} # a dictionary from KEGG IDs to a tuple of (low,up) bounds
rexp = '(C[0-9]+)\s+([0-9e\-\+]+)\s*(.*)'
for line in field_map["BOUND"].split('\t'):
try:
cid, low, up = re.findall(rexp, line)[0]
except Exception, e:
raise Exception(str(e) + ': ' + line)
up = up or low
low = float(low.strip())
up = float(up.strip())
bounds[cid] = (low, up)
return bounds | 3.15625 | 3 |
src/testing/c1.py | nsnave/qft | 1 | 12761385 | <gh_stars>1-10
# Random Comment
x = 3
y = x + 2
y = y * 2
print(y)
| 2.140625 | 2 |
openstates/openstates-master/openstates/ak/legislators.py | Jgorsick/Advocacy_Angular | 0 | 12761386 | <reponame>Jgorsick/Advocacy_Angular<filename>openstates/openstates-master/openstates/ak/legislators.py
import re
import lxml.html
from billy.scrape.legislators import LegislatorScraper, Legislator
from openstates.utils import LXMLMixin
class AKLegislatorScraper(LegislatorScraper, LXMLMixin):
jurisdiction = 'ak'
latest_only = True
def _scrape_legislator(self, chamber, term, url):
page = self.lxmlize(url)
(_title, name) = page.xpath(
'//div[@class="holder-legislator"]/h1/text()')
(photo_url, ) = page.xpath('//div[@class="bioleft"]/img/@src')
bio = page.xpath('//div[@class="bioright"]/a/..//text()')
bio = {x.split(':')[0].strip(): x.split(':')[1].strip()
for x in bio if x.strip()}
email = bio['Email']
district = bio['District']
party = self._party_map[bio['Party']]
leg = Legislator(
term = term,
chamber = chamber,
district = district,
full_name = name,
party = party,
photo_url = photo_url
)
leg.add_source(url)
capitol_office = [
x.strip()
for x in page.xpath('//div[@class="bioleft"]//text()')
if x.strip()
]
assert capitol_office[0] == 'Session Contact'
assert capitol_office[3].startswith('Phone:')
assert capitol_office[4].startswith('Fax:')
leg.add_office(
type = 'capitol',
name = 'Capitol Office',
address = capitol_office[1] + '\n' + capitol_office[2],
phone = capitol_office[3][len('Phone: '): ] if
len(capitol_office[3]) > len('Phone:') else None,
fax = capitol_office[4][len('Fax: '): ] if
len(capitol_office[4]) > len('Fax:') else None,
)
district_office = [
x.strip()
for x in page.xpath('//div[@class="bioright"][2]//text()')
if x.strip()
]
# Some members don't have district offices listed, so skip them
if any('AK' in x for x in district_office):
assert district_office[0] == 'Interim Contact'
assert district_office[3].startswith('Phone:')
assert district_office[4].startswith('Fax:')
leg.add_office(
type = 'district',
name = 'District Office',
address = district_office[1] + '\n' + district_office[2],
phone = district_office[3][len('Phone: '): ] if
len(district_office[3]) > len('Phone:') else None,
fax = district_office[4][len('Fax: '): ] if
len(district_office[4]) > len('Fax:') else None,
)
self.save_legislator(leg)
def scrape(self, chamber, term):
self._party_map = {
'Democrat': 'Democratic',
'Republican': 'Republican',
'Non Affiliated': 'Independent',
}
if chamber == 'upper':
url = 'http://senate.legis.state.ak.us/'
else:
url = 'http://house.legis.state.ak.us/'
page = self.lxmlize(url)
for link in page.xpath('//ul[@class="item lists"]/li/a/@href'):
self._scrape_legislator(chamber, term, link)
| 2.515625 | 3 |
vendor/packages/translate-toolkit/translate/storage/statistics.py | DESHRAJ/fjord | 0 | 12761387 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Module to provide statistics and related functionality.
"""
from translate import lang
from translate.lang import factory
# calling classifyunits() in the constructor is probably not ideal.
# idea: have a property for .classification that calls it if necessary
# If we add units or change translations, statistics are out of date
# Compare with modules/Status.py in pootling that uses a bitmask to
# filter units
# Add support for reading and writing Pootle style .stats files
# Consider providing quickstats
class Statistics(object):
"""Manages statistics for storage objects."""
def __init__(self, sourcelanguage='en', targetlanguage='en', checkerstyle=None):
self.sourcelanguage = sourcelanguage
self.targetlanguage = targetlanguage
self.language = lang.factory.getlanguage(self.sourcelanguage)
# self.init_checker(checkerstyle)
self.classification = {}
def init_checker(self, checkerstyle=None):
from translate.filters import checks
from translate.filters import pofilter
checkerclasses = [checkerstyle or checks.StandardChecker, pofilter.StandardPOChecker]
self.checker = pofilter.POTeeChecker(checkerclasses=checkerclasses)
def fuzzy_units(self):
"""Return a list of fuzzy units."""
if not self.classification:
self.classifyunits()
units = self.getunits()
return [units[item] for item in self.classification["fuzzy"]]
def fuzzy_unitcount(self):
"""Returns the number of fuzzy units."""
return len(self.fuzzy_units())
def translated_units(self):
"""Return a list of translated units."""
if not self.classification:
self.classifyunits()
units = self.getunits()
return [units[item] for item in self.classification["translated"]]
def translated_unitcount(self):
"""Returns the number of translated units."""
return len(self.translated_units())
def untranslated_units(self):
"""Return a list of untranslated units."""
if not self.classification:
self.classifyunits()
units = self.getunits()
return [units[item] for item in self.classification["blank"]]
def untranslated_unitcount(self):
"""Returns the number of untranslated units."""
return len(self.untranslated_units())
def getunits(self):
"""Returns a list of all units in this object."""
return []
def get_source_text(self, units):
"""Joins the unit source strings in a single string of text."""
source_text = ""
for unit in units:
source_text += unit.source + "\n"
plurals = getattr(unit.source, "strings", [])
if plurals:
source_text += "\n".join(plurals[1:])
return source_text
def wordcount(self, text):
"""Returns the number of words in the given text."""
return len(self.language.words(text))
def source_wordcount(self):
"""Returns the number of words in the source text."""
source_text = self.get_source_text(self.getunits())
return self.wordcount(source_text)
def translated_wordcount(self):
"""Returns the number of translated words in this object."""
text = self.get_source_text(self.translated_units())
return self.wordcount(text)
def untranslated_wordcount(self):
"""Returns the number of untranslated words in this object."""
text = self.get_source_text(self.untranslated_units())
return self.wordcount(text)
def classifyunit(self, unit):
"""Returns a list of the classes that the unit belongs to.
:param unit: the unit to classify
"""
classes = ["total"]
if unit.isfuzzy():
classes.append("fuzzy")
if unit.gettargetlen() == 0:
classes.append("blank")
if unit.istranslated():
classes.append("translated")
#TODO: we don't handle checking plurals at all yet, as this is tricky...
source = unit.source
target = unit.target
if isinstance(source, str) and isinstance(target, unicode):
source = source.decode(getattr(unit, "encoding", "utf-8"))
#TODO: decoding should not be done here
# checkresult = self.checker.run_filters(unit, source, target)
checkresult = {}
for checkname, checkmessage in checkresult.iteritems():
classes.append("check-" + checkname)
return classes
def classifyunits(self):
"""Makes a dictionary of which units fall into which classifications.
This method iterates over all units.
"""
self.classification = {}
self.classification["fuzzy"] = []
self.classification["blank"] = []
self.classification["translated"] = []
self.classification["has-suggestion"] = []
self.classification["total"] = []
# for checkname in self.checker.getfilters().keys():
# self.classification["check-" + checkname] = []
for item, unit in enumerate(self.unit_iter()):
classes = self.classifyunit(unit)
# if self.basefile.getsuggestions(item):
# classes.append("has-suggestion")
for classname in classes:
if classname in self.classification:
self.classification[classname].append(item)
else:
self.classification[classname] = item
self.countwords()
def countwords(self):
"""Counts the source and target words in each of the units."""
self.sourcewordcounts = []
self.targetwordcounts = []
for unit in self.unit_iter():
self.sourcewordcounts.append([self.wordcount(text) for text in getattr(unit.source, "strings", [""])])
self.targetwordcounts.append([self.wordcount(text) for text in getattr(unit.target, "strings", [""])])
def reclassifyunit(self, item):
"""Updates the classification of a unit in self.classification.
:param item: an integer that is an index in .getunits().
"""
unit = self.getunits()[item]
self.sourcewordcounts[item] = [self.wordcount(text) for text in unit.source.strings]
self.targetwordcounts[item] = [self.wordcount(text) for text in unit.target.strings]
classes = self.classifyunit(unit)
# if self.basefile.getsuggestions(item):
# classes.append("has-suggestion")
for classname, matchingitems in self.classification.items():
if (classname in classes) != (item in matchingitems):
if classname in classes:
self.classification[classname].append(item)
else:
self.classification[classname].remove(item)
self.classification[classname].sort()
# self.savestats()
| 2.296875 | 2 |
utils.py | ChrisDrozdowski/xclim_indices | 1 | 12761388 | <gh_stars>1-10
import tkinter as tk
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import askopenfilenames
from tkinter.filedialog import asksaveasfilename
from tkinter.filedialog import askdirectory
import xarray as xr
import datetime
def get_open_path(title=''):
'''
Prompts user for path to open an NC file
'''
root = tk.Tk()
root.withdraw()
path = askopenfilename(filetypes=[('NetCDF', '.nc .nc4')], title=title)
return path
def get_open_paths(title=''):
'''
Prompts user for path to open one or more NC files
'''
root = tk.Tk()
root.withdraw()
paths = askopenfilenames(filetypes=[('NetCDF', '.nc .nc4')], title=title)
return [path for path in paths]
def get_save_path(title=''):
'''
Prompts user for path to save an NC file
'''
root = tk.Tk()
root.withdraw()
path = asksaveasfilename(filetypes=[('NetCDF', '*.nc')], title=title)
if path and not path.endswith('.nc'):
path += '.nc'
return path
def get_folder_path(title=''):
'''
Prompts user for path to folder containing NC files
'''
root = tk.Tk()
root.withdraw()
path = askdirectory(title=title)
if path:
path += '/' # Append '/' to end of path
return path
def get_to_netcdf_encodings(ds, comp_level=None):
'''
Returns default encodings for an xarray Dataset for
use with Dataset::to_netcdf() method
Parameters:
ds xarray Dataset
comp_level Level of compression: 0-9. If 0 no compression.
Compression starts with 1.
If None, then leave it up to xarray to decide.
'''
# A bit convoluted byt allows for adding new encodings in future
# Merges two dicts of dicts based on keys in dict1
def merge(dict1, dict2):
for key in dict1:
if key in dict2:
dict1[key].update(dict2[key])
return dict1
# Map from Python datatype to default NetCDF _FillValue
# See netCDF4.default_fillvals
fillvalue_map = {
'int8': -127, # i1
'uint8': 255, # u1
'int16': -32767, # i2
'uint16': 65535, # u2
'int32': -2147483647, # i4
'uint32': 4294967295, # u4
'int64': -9223372036854775806, # i8
'uint64': 18446744073709551614, # u8
'float32': 9.969209968386869e+36, # f4
'float64': 9.969209968386869e+36, # f8
'str': '\x00' # S1
}
# Real compression levels used for NetCDF
real_comp_levels = [1,2,3,4,5,6,7,8,9]
# Set up a base dict with key for all variables in ds set to empty dicts
enc_base = {var_name: {} for var_name in ds.variables}
enc_fv = {} # Encodings for _FillValue
enc_cl = {} # Encodings for compression level
# Iterate each variable > 2 dimensions
for var_name in ds.data_vars:
# _FillValue encodings
# Meant to fix when xarray makes them Python nan which
# we don't want
if len(ds[var_name].dims) > 2:
# Test if _FillValue already exists. If not, add to encoding
# using default value for data type
if not '_FillValue' in ds[var_name].attrs:
def_fv = fillvalue_map[ds[var_name].dtype.name]
enc_fv[var_name] = dict(_FillValue=def_fv)
else:
pass
else:
enc_fv[var_name] = dict(_FillValue=None)
# Compression encodings
if len(ds[var_name].dims) > 2:
if comp_level in real_comp_levels:
enc_cl[var_name] = dict(zlib=True, complevel=comp_level)
elif comp_level == 0:
enc_cl[var_name] = dict(zlib=False)
else:
pass
else:
pass
# Merge the dictionaries and return the merged one
merged = merge(enc_base, enc_fv)
merged = merge(merged, enc_cl)
return merged
def add_to_history(ds, txt='', prepend=True):
'''
Adds text to `history` attribute for xarray Dataset
'''
hist = ''
dt = datetime.datetime.now().astimezone().replace(microsecond=0).isoformat()
if 'history' in ds.attrs:
hist = ds.attrs['history']
hist = hist.strip('\n')
if prepend is True:
hist = dt + ' ' + txt + '\n' + hist
else:
hist = hist + '\n' + dt + ' ' + txt
ds.attrs['history'] = hist
def convert_calendar(ds, cal='proleptic_gregorian'):
'''
Sets `calendar` attribute for `time` variable for xarray Dataset
to a specified one. Default 'proleptic_gregorian'
'''
if 'time' in ds.variables:
ds.time.encoding['calendar'] = cal
| 2.921875 | 3 |
Github/Hangman/hangman.py | ZachhHsu/Stancode_SC101_project | 0 | 12761389 | """
File: hangman.py
Name: <NAME>
-----------------------------
This program plays hangman game.
Users sees a dashed word, trying to
correctly figure the un-dashed word out
by inputting one character each round.
If the user input is correct, show the
updated word on console. Players have N_TURNS
chances to try and win this game.
"""
import random
# This constant controls the number of guess the player has.
N_TURNS = 7
def main():
# Display the dashed word and turns remained
turns_remained = N_TURNS
word = random_word()
old_word = ''
print('The word looks like: ', end='')
for i in range(len(word)):
old_word += '-'
print(old_word)
print('You have ' + str(turns_remained) + ' guesses left.')
# Create a scaffold
scaffold()
for k in range(8): # left and right poles
print('| |')
# Start guessing the word
while True:
guess = input('Your guess: ')
if guess.isalpha() and len(guess) == 1: # case insensitive
guess = guess.upper()
new_word = '' # set up an empty string for the new guessing result this round
# Correct guess
if guess in word:
for j in range(len(word)):
# Restore already correctly guessed characters
if old_word[j].isalpha():
new_word += old_word[j]
# Add correct and not yet guessed characters
elif guess == word[j]:
new_word += guess
# Dashed undiscovered slots
else:
new_word += '-'
print('You are correct!')
# Win the game if all dashes are un-dashed
if '-' not in new_word:
print('You win!!')
print('The word was: ' + new_word)
break
print('The word looks like: ' + new_word)
# Update the new guessing result to old_word
old_word = new_word
# Incorrect guess
else:
turns_remained -= 1 # lose one turn
print('There is no ' + guess + '\'s in the word.')
# Hang body parts
hang(turns_remained)
# Loose the game if no turns are left
if turns_remained == 0:
print('You are completely hung : (')
print('The word was: ' + word)
break
print('You have ' + str(turns_remained) + ' guesses left.')
# Illegal format
else:
print('illegal format.')
def hang(n):
"""
Hang body parts
"""
# Head
if n == 6:
scaffold()
print('| ( * * ) |')
print('| ( V ) |')
for i in range(6):
print('| |')
# Body
elif n == 5:
scaffold()
print('| ( * * ) |')
print('| ( V ) |')
print('| # |')
print('| # |')
print('| # |')
print('| |')
print('| |')
print('| |')
# Left arm
elif n == 4:
scaffold()
print('| ( * * ) |')
print('| ( o ) |')
print('| # |')
print('| ~ ~ # |')
print('| # |')
print('| |')
print('| |')
print('| |')
# Right arm
elif n == 3:
scaffold()
print('| ( * * ) |')
print('| ( o ) |')
print('| # |')
print('| ~ ~ # ~ ~ |')
print('| # |')
print('| |')
print('| |')
print('| |')
# Left leg
elif n == 2:
scaffold()
print('| ( Q Q ) |')
print('| ( W ) |')
print('| # |')
print('| ~ ~ # ~ ~ |')
print('| # |')
print('| / |')
print('| \\ |')
print('| |')
# Right leg
elif n == 1:
scaffold()
print('| ( Q Q ) |')
print('| ( W ) |')
print('| # |')
print('| ~ ~ # ~ ~ |')
print('| # |')
print('| / \\ |')
print('| \\ / |')
print('| |')
else:
scaffold()
print('| ( X X ) |')
print('| ( ^ ) |')
print('| # |')
print('| ~ ~ # ~ ~ |')
print('| # |')
print('| / \\ |')
print('| \\ / |')
print('| |')
def scaffold():
"""
Create a scaffold
"""
print('<Your Status>')
print('-----------------')
print('| | |')
print('| | |')
def random_word():
"""
Here are some random vocabularies to be guessed
"""
num = random.choice(range(9))
if num == 0:
return "NOTORIOUS"
elif num == 1:
return "GLAMOROUS"
elif num == 2:
return "CAUTIOUS"
elif num == 3:
return "DEMOCRACY"
elif num == 4:
return "BOYCOTT"
elif num == 5:
return "ENTHUSIASTIC"
elif num == 6:
return "HOSPITALITY"
elif num == 7:
return "BUNDLE"
elif num == 8:
return "REFUND"
##### DO NOT EDIT THE CODE BELOW THIS LINE #####
if __name__ == '__main__':
main()
| 4.34375 | 4 |
exercicios/tuplas/desafio72.py | costagguilherme/python-desafios | 0 | 12761390 | a = ('zero', 'um', 'dois', 'três', 'quatro', 'cinco', 'seis', 'sete', 'oito', 'nove', 'dez', 'onze', 'doze', 'treze', 'cartoze', 'quinze')
n = int(input('DIGITE UM NÚMERO DE 0 A 15: '))
if n > 15 or n < 0:
while True:
n = int(input('DIGITE UM NÚMERO DE 0 A 15: '))
if 0 <= n <= 15:
break
print(f'Você digitou o numéro {a[n]}') | 3.875 | 4 |
kymatio/phaseexp1d/phaseexp/global_const.py | sixin-zh/kymatio_wph | 0 | 12761391 | import os
from os.path import dirname, abspath, join
import torch
CODEPATH = dirname(abspath(__file__))
DATAPATH = abspath(join(join(CODEPATH, os.pardir), 'data'))
RESPATH = abspath(join(join(CODEPATH, os.pardir), 'results'))
Tensor = torch.DoubleTensor
| 1.976563 | 2 |
02_Arrays/anti_diagonals.py | Sheetal0601/InterviewBit | 61 | 12761392 | <reponame>Sheetal0601/InterviewBit<gh_stars>10-100
# Anti Diagonals
# https://www.interviewbit.com/problems/anti-diagonals/
#
# Give a N*N square matrix, return an array of its anti-diagonals. Look at the example for more details.
#
# Example:
#
# Input:
#
# 1 2 3
# 4 5 6
# 7 8 9
#
# Return the following :
#
# [
# [1],
# [2, 4],
# [3, 5, 7],
# [6, 8],
# [9]
# ]
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class Solution:
# @param A : list of list of integers
# @return a list of list of integers
def diagonal(self, A):
res = [list() for i in range(2 * len(A) - 1)]
for i in range(len(A)):
for j in range(len(A)):
res[i + j].append(A[i][j])
return res
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if __name__ == "__main__":
s = Solution()
print(s.diagonal([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
]))
| 3.78125 | 4 |
ControlDeBasesDeDatos/MySQL.py | jaimeandrescatano/ekorre | 2 | 12761393 | <reponame>jaimeandrescatano/ekorre<gh_stars>1-10
#!/usr/bin/env python
# Control de una base de datos en MySQL con Python
# 16072012
# Este programa accede a una base de datos previamente creada en MySQL
# desde PHPMyAdmin, permite realizar las operaciones principales de un
# sistema CRUD
#
# - Create: crea un registro en una tabla
# - Retrieve: permite buscar y presentar un registro de una tabla
# - Update: permite actualizar un registro de una tabla
# - Delete: permite eliminar el registro de una tabla
import MySQLdb as mdb
import sys
import os
os.system("clear")
con = None
print "Conectando con servidor MySQL \n"
try:
con = mdb.connect('localhost', 'root', '1milliondollar', 'PythonTest1');
cur = con.cursor()
cur.execute("SELECT VERSION()")
data = cur.fetchone()
print "Database version : %s " % data
print "System ready! \n"
except mdb.Error, e:
print "Error %d: %s" % (e.args[0],e.args[1])
sys.exit(1)
while True:
print "0- Show current tables \n"
print "1- Add data \n"
print "2- Search \n"
print "3- Update data \n"
print "4- Delete register \n"
print "5- Exit program \n"
dato = raw_input()
if dato == "0":
os.system("clear")
print "The database has this tables: \n"
texto="SHOW tables"
cur.execute(texto)
con.commit()
numrows = int(cur.rowcount)
for i in range(numrows):
row = cur.fetchone()
print row[0]
raw_input("\nPress any key to return")
os.system("clear")
if dato == "1":
os.system("clear")
tabla = raw_input("\nAdd data \nSelect table: ")
nombre = raw_input("Nombre: ")
telefono = raw_input("telefono: ")
texto="INSERT INTO "+tabla+"(Name,Phone) VALUES('"+nombre+"','"+telefono+"')"
cur.execute(texto)
con.commit()
print "\n"+nombre+" info has been added to "+tabla+"! \n"
raw_input("\nPress any key to return")
os.system("clear")
if dato == "2":
os.system("clear")
tabla = raw_input("\nSearch \nSelect table to search data: ")
campo = raw_input("Choose search field: ")
parametro = raw_input("Search data: ")
texto="SELECT * FROM "+tabla+" WHERE "+campo+" LIKE '%"+parametro+"%'"
cur.execute(texto)
con.commit()
numrows = int(cur.rowcount)
for i in range(numrows):
row = cur.fetchone()
print row[0], row[1]
raw_input("\nPress any key to return")
os.system("clear")
if dato == "3":
os.system("clear")
tabla = raw_input("\nUpdate \nIn which table is the data?: ")
parametro = raw_input("Id of the register: ")
campo = raw_input("Which field to update: ")
datonuevo = raw_input("New data: ")
texto="UPDATE "+tabla+" SET "+campo+" = '"+datonuevo+"' WHERE ID = "+parametro
#~ print texto
cur.execute(texto)
con.commit()
print "Register "+parametro+" has been updated!\n"
raw_input("\nPress any key to return")
os.system("clear")
if dato == "4":
os.system("clear")
tabla = raw_input("\nDelete \nIn which table is the data?: ")
parametro = raw_input("Id of the register that you want to delete? ")
texto="DELETE FROM "+tabla+" WHERE Id = "+parametro
cur.execute(texto)
con.commit()
print "Register "+parametro+" has been deleted!\n"
raw_input("\nPress any key to return")
os.system("clear")
if dato == "5":
break
print "Bye bye!"
if con:
con.close()
| 3.234375 | 3 |
package.py | cxy1997/Thunder | 2 | 12761394 | from Tkinter import PhotoImage
from random import randint
dX=[-3,3,-3,3]
dY=[-3,-3,3,3]
def nd(n, x):
if n == 1 or n == 3:
return (x + 2) % 4
elif n == 2:
return x + 1
elif n == 4:
return x - 1
class pcg:
def __init__(self, x, y, direction, master):
self.x = x
self.y = y
self.direction = direction
self.master = master
self.pic = self.master.master.canvas.create_image(self.x, self.y, image = self.master.img)
def upd(self):
if 20 > self.x + dX[self.direction]:
self.direction = nd(2, self.direction)
elif self.x + dX[self.direction] > 460:
self.direction = nd(4, self.direction)
self.x += dX[self.direction]
if 20 > self.y + dY[self.direction]:
self.direction = nd(1, self.direction)
elif self.y + dY[self.direction] > 590:
self.direction = nd(3, self.direction)
self.y += dY[self.direction]
self.master.master.canvas.move(self.pic, dX[self.direction], dY[self.direction])
class Packages:
def __init__(self, master):
self.master = master
self.img = PhotoImage(file = 'images\\package.gif')
self.data = []
self.length = 0
def clear(self):
for i in self.data:
self.master.canvas.delete(i.pic)
del self.data
self.data = []
self.length = 0
def add(self, x, y):
self.data.append(pcg(x, y, randint(2, 3), self))
self.length += 1
def upd(self):
x = []
for j in range(self.length):
i = self.data[j]
i.upd()
if (i.x - self.master.sonic.x) ** 2 + (i.y - self.master.sonic.y) ** 2 < 2000:
if self.master.sonic.hp < 120:
self.master.sonic.hp += 25
if self.master.sonic.hp > 120:
self.master.sonic.hp = 120
elif self.master.sonic.weaponmode < 3:
self.master.sonic.weaponmode += 1
else:
self.master.sonic.shield += 240
x = [j]+x
self.master.score += 100
self.length -= len(x)
for i in x:
self.master.canvas.delete(self.data[i].pic)
del self.data[i]
if not self.master.boss.status:
for i in self.data:
self.master.canvas.lift(i.pic)
self.master.enemies.up() | 2.890625 | 3 |
batch_reduction/combat.py | phylatechnologies/ibd_classification_benchmark | 0 | 12761395 | import numpy as np
import pandas as pd
import statsmodels.api as sm
from sklearn.preprocessing import OneHotEncoder
import statistics
import math
import sys
import itertools
import time
np.seterr(over='raise', under="ignore")
def batch_pp(df, covariates, batch_column, ignore):
"""This function takes in a df, the name of the covariate columns, and the batch column
and it outputs a feature count matrix, feature zero inflation matrix,
batch dummy matrix (one hot vectors as rows), covariate matrix (concatenated one hot vectors )
(covariates coefficient matrix [X_ij], batch dummy matrix [X_batch],
the zero inflation matrix [I_ijk], and count matrix [Y])
NOTE: this df can be a combination of datasets, or an individual dataset"""
# df: [dataframe] input with rows as samples and columns as feature counts.
# should only have OTU names ,covariates, and batch_column in keyspace
# covariates: [List] of the covariates to retain and estimate betas for
# batch_column: [string] column that defines the batches in this dataframe
# ignore: [List] of column names to ignore
################################### Check proper input ###################################
if (batch_column not in df.keys()):
raise ValueError("Column name " + str(batch_column) + " not found")
if (not set(covariates) <= set(df.keys())):
raise ValueError("Covariate columns not found in dataframe")
################################### Turn batch column to one hot vector ###################################
# note: for all features, batch matrix and covariate matrix will be the same.
X_batch = pd.get_dummies(df[batch_column], drop_first=False)
################################### Turn covariate columns covariate matrix ###################################
# number of columns is the number of betas to estimate
X_cov = pd.get_dummies(df[covariates], drop_first=True)
intercept = [1 for _ in range(X_cov.shape[0])]
# adding intercept term
X_cov.insert(0, "intercept", intercept)
################################### Build the feature zero inflation matrix ###################################
# turn numbers to 1 and keep zeroes the way they are
otu_keys = df.keys().drop(ignore)
I = df[otu_keys].replace('0.0', False).astype(bool).replace(False, 0).replace(True, 1)
df_dict = {"X_cov": X_cov,
"X_batch": X_batch,
"I": I,
"Y": df[otu_keys],
"ignore": df[ignore]}
return df_dict
def reduce_batch_effects(Y, I, X_cov, X_batch, verbose=False):
"""This function takes in the output of batch_pp and does the feature-wise batch reduction"""
# INPUT:
# Y: matrix of feature counts with the columns as features and columns as sample counts as rows
# I: matrix of feature zero inflation (1s where values are >=1, 0s o.w.)
# X_cov: covariance matrix (this will give us the betas we need to estimate)
# X_batch: dummy matrix of batch values
# OUTPUT:
# corrected matrix
# merge the dummy variables for the covariates and also for the batch to get the whole design matrix
X_mat = pd.concat([X_cov, X_batch], axis=1).astype(float)
# type conversions and index storing
Y = Y.astype(float)
num_beta_cov = X_cov.shape[1]
num_beta_batch = X_batch.shape[1]
num_features = len(Y.keys())
num_samples = Y.shape[0]
Z = pd.DataFrame(index=Y.index, columns=Y.columns)
# for each of the features, we will calculate the batch reduction coefficients, then reduce the batch effects
count = 0
otu_names = list(Y.keys())
otu_names = [x for x in otu_names if Y[x][Y[x] > 0].count() > 2]
sigma_p_store = {}
beta_params_store = pd.DataFrame(columns=Y.columns, index=X_mat.columns)
beta_cov_store = pd.DataFrame(columns=Y.columns, index=X_cov.columns)
beta_batch_store = {}
start = time.time()
for p in otu_names:
# select only the feature as a row
y_ijp = Y[p]
y_store = Y[p] # storing the original column(unchanged)
I_ijp = I[p].astype(float)
if (count % 100 == 0 and verbose):
print("Estimating β_cov, β_batch, and σ_p for feature {}".format(count))
# --------- Estimate beta_p and beta_batch through OLS regression --------------
# ignore the keys with zero counts and only fit with non zero samples
fit_index = list(y_ijp.to_numpy().astype(float).nonzero()[0])
zero_index = list(set(range(num_samples)) - set(fit_index))
zero_keys = y_store.keys()[zero_index]
# use only non zero counts for index to fit our OLS
y_ijp = y_ijp.iloc[fit_index]
# y_ijp = y_ijp[fit_index] # PREVIOUS VERSION
X_design_mat = X_mat.iloc[fit_index, :]
X_cov_mat = X_cov.iloc[fit_index, :]
X_batch_mat = X_batch.iloc[fit_index, :]
# fit ols
model = sm.OLS(y_ijp, X_design_mat)
res = model.fit()
############# Calculate sigma_p using the standard deviation of previous regression ###########
residuals = y_ijp - X_cov_mat.dot(res.params[:num_beta_cov])
sigma_hat_p = statistics.stdev(residuals)
# store in feature keyed dictionary of standard deviations
sigma_p_store[p] = sigma_hat_p
# separate the beta cov from the beta batch
beta_params = res.params
beta_cov = res.params[:num_beta_cov]
beta_batch = res.params[num_beta_cov:]
# store list of beta parameters indexed by feature
beta_params_store[p] = beta_params
beta_cov_store[p] = beta_cov
beta_batch_store[p] = beta_batch
####################################### Calculate Z_ijp #######################################
z_ijp = (y_ijp - X_cov_mat.dot(res.params[:num_beta_cov])) / sigma_hat_p
Z[p] = z_ijp
count += 1
if count % 25 == 0:
end = time.time()
print('{}/{} completed in: {}s'.format(count, len(otu_names), round(end - start, 2)))
# ------------ LOOP END -----------------------------------------------------------------
end = time.time()
print('Total OLS time: {}s'.format(round(end - start, 2)))
Z = Z.fillna(0)
beta_params_store = beta_params_store.astype(float)
# return X_mat.dot(beta_params_store)
estimates = eb_estimator(X_batch, Z, sigma_p=sigma_p_store, X_add=X_cov.dot(beta_cov_store), verbose=verbose)
return estimates
def eb_estimator(X_batch, Z, sigma_p, X_add, max_itt=6000, verbose=False):
"""This function returns the empirical bayes estimates for gamma_star_p and delta_star_p
as well as the standerdized OTU counts"""
# X_batch: Batch effects dummy matrix (n x alpha) matrix
# Z: Matrix of standerdized data (n x p ) matrix
# sigma_p: Vec of OTU variances
# X_add: matrix to add back after parameter estimation
# max_itt: Maximum number of iterations until convergence
# smooth_delta: bool flag for whether or not we replace the 0 values in delta_i by 1
# Standardized matrix init
Z_out = pd.DataFrame(index=Z.index, columns=Z.columns)
# number of genes/otus
G = Z.shape[1]
# number of samples in each batch
N = X_batch.sum(axis=0)
# sample mean for each OTU in each per batch (p X alpha) matrix
gamma_hat = Z.T.dot(X_batch) / N
# parameter estimates for batch effect location - gamma
gamma_bar = gamma_hat.mean(axis=0).astype(float)
tau_bar = ((gamma_hat.sub(gamma_bar) ** 2).sum(axis=0)) / (G - 1)
# parameter estimates for batch effect scale - delta (p X alpha) matrix
delta_hat = (((Z - X_batch.dot(gamma_hat.T)) ** 2).T.dot(X_batch)) / (N - 1)
v_bar = delta_hat.sum(axis=0) / G
s_bar = ((delta_hat.sub(v_bar) ** 2).sum(axis=0)) / (G - 1)
lambda_bar = (v_bar + (2 * s_bar)) / (s_bar)
theta_bar = (v_bar ** 3 + v_bar * s_bar) / (s_bar)
# iteratively solve for gamma_star_ip and delta_star_ip
# initialize the keyed matrices
gamma_star_mat = pd.DataFrame(index=gamma_hat.index, columns=gamma_hat.columns)
delta_star_mat = pd.DataFrame(index=gamma_hat.index, columns=gamma_hat.columns)
batches = gamma_hat.keys()
genes = list(gamma_hat.T.keys())
genes = [x for x in genes if Z[x].max() != 0]
start = time.time()
count = 0
for i in batches:
# get individual variables to focus on
theta_i = theta_bar[i]
lambda_i = lambda_bar[i]
n = N[i]
tau_i = tau_bar[i]
gamma_bar_i = gamma_bar[i]
for p in genes:
gene_counts_in_batch = X_batch[i] * Z[p]
gene_counts_in_batch = gene_counts_in_batch[gene_counts_in_batch != 0]
changed_samples = gene_counts_in_batch.keys()
gamma_hat_ip = gamma_hat[i][p]
# initial iteration values
delta_star_ip_init = delta_hat[i][p]
gamma_star_ip_init = f_gamma_star_ip(tau_i, gamma_bar_i, gamma_hat_ip, delta_star_ip_init, n)
# calculate the next step in the iteration
delta_star_ip_next = f_delta_star_ip(theta_i, lambda_i, gene_counts_in_batch, gamma_star_ip_init, n)
gamma_star_ip_next = f_gamma_star_ip(tau_i, gamma_bar_i, gamma_hat_ip, delta_star_ip_next, n)
conv_delta = abs(delta_star_ip_next - delta_star_ip_init)
conv_gamma = abs(gamma_star_ip_next - gamma_star_ip_init)
itt = 1
while ((conv_delta + conv_gamma) > 1e-8):
# store previous iteration of the values
delta_star_ip_init = delta_star_ip_next
gamma_star_ip_init = gamma_star_ip_next
# take our next "guess" for the values
delta_star_ip_next = f_delta_star_ip(theta_i, lambda_i, gene_counts_in_batch, gamma_star_ip_init, n)
gamma_star_ip_next = f_gamma_star_ip(tau_i, gamma_bar_i, gamma_hat_ip, delta_star_ip_init, n)
# calculate how close we are to convergence
conv_delta = abs(delta_star_ip_next - delta_star_ip_init)
conv_gamma = abs(gamma_star_ip_next - gamma_star_ip_init)
itt += 1
if (itt == max_itt):
raise ValueError("Maximum iteration reached for convergence. Try setting a higher limit")
if (verbose):
print("OTU {} on dataset {} Convergence took {} steps".format(p[-15:], i, itt))
# store found values in the relevant matrices
gamma_star_mat[i][p] = gamma_star_ip_next
delta_star_mat[i][p] = delta_star_ip_next
a = (sigma_p[p] / delta_star_ip_next)
b = (Z[p][changed_samples] - gamma_star_ip_next)
c = X_add[p]
Z_out[p][changed_samples] = (a * b + c)[changed_samples]
count += 1
end = time.time()
print('{}/{} completed in: {}s'.format(count, len(batches), round(end - start, 2)))
# ------------ LOOP END -----------------------------------------------------------------
end = time.time()
print('Total Batch Reduction Parameter Estimation time: {}s'.format(round(end - start, 2)))
Z_out = Z_out.fillna(0)
return {"gamma_star": gamma_star_mat,
"delta_star": delta_star_mat,
"BR": Z_out}
def f_delta_star_ip(theta_bar, lambda_bar, Z_in_batch, gamma_star, n):
"""This is the function to calculate delta star given gamma_star """
# INPUT
# theta_bar: theta estimate for batch i (scale estimate for delta star_ip)
# lambda_bar: lamda estimate for batch i (shape estimate for delta star_ip)
# Z_in_batch: vector of correctd counts for otu p in in batch o
# gamma_star: posterior mean for location parameter of OTU p in batch i
# n: number of samples in batch i
# OUTPUT
# delta_star_ip: posterior mean for location parameter of OTU p in batch i
return (theta_bar + 0.5 * (((Z_in_batch - gamma_star) ** 2).sum())) / ((n / 2) + lambda_bar - 1)
def f_gamma_star_ip(tau_bar, gamma_bar, gamma_hat, delta_star, n):
"""This is the function to calculate gamma star given delta_star"""
# INPUT
# tau_bar: tau estimate in batch i
# gamma_bar: gamma mean estimate for batch i
# gamma_hat: sample mean for each OTU p in batch i
# delta_star: posterior mean for scale parameter of OTU p in batch i
# n: number of samples in batch i
# OUTPUT
# gamma_star_ip: posterior mean for location parameter of OTU p in batch i
return (n * tau_bar * gamma_hat + delta_star * gamma_bar) / (n * tau_bar + delta_star)
def combat(in_df, covariates, batches, ignore, verbose=False):
df = in_df.copy()
for i in range(len(batches.keys())):
print("Performing ComBat Batch Correction for {}".format(batches[i].upper()))
df[df.columns.difference(ignore)] = df[df.columns.difference(ignore)]
t = batch_pp(df, covariates=covariates,batch_column=batches[i], ignore=ignore)
r = reduce_batch_effects(Y=t['Y'], X_cov=t['X_cov'], I=t['I'], X_batch=t['X_batch'], verbose=verbose)
try:
df = pd.concat([r["BR"], t['ignore']], axis=1)
except:
print('Error Occurred - returning original data set')
return ("error", r["BR"])
return df
| 2.96875 | 3 |
vs_utils/features/gridmol/tests/test_molecule.py | rbharath/pande-gas | 12 | 12761396 | <reponame>rbharath/pande-gas
"""
Tests for molecule.py.
"""
import numpy as np
import unittest
from ..molecule import GridAtom, GridMol
class TestGridMol(unittest.TestCase):
"""
Tests for GridMol.
"""
def setUp(self):
"""
Set up tests.
"""
self.mol = GridMol((11, 11, 11))
def test_add_atom(self):
"""
Test GridMol.add_atom.
"""
self.mol.add_atom((1, 2, 1), 1.6)
assert len(self.mol.atoms) == 1
def test_get_occupancy(self):
"""
Test GridMol.get_occupancy.
"""
self.mol.add_atom((1, 2, 1), 1.6)
self.mol.add_atom((1, 1, 1), 1.6)
occupancy = self.mol.get_occupancy()
assert occupancy.shape == self.mol.shape
assert np.count_nonzero(occupancy == 0)
assert np.count_nonzero(occupancy == 1)
assert not np.count_nonzero(occupancy > 1)
# check that most of the grid is empty
assert np.count_nonzero(occupancy) < 0.2 * self.mol.size
def test_get_distance(self):
"""
Test GridMol.get_distance.
"""
self.mol.add_atom((1, 2, 1), 1.6)
self.mol.add_atom((1, 1, 1), 1.6)
distances = self.mol.get_distance()
# confirm that negative values are inside atoms
# this tests distance correspondence with occupancy
mask = self.mol.get_occupancy()
assert np.all(distances[mask] <= 0)
assert np.all(distances[~mask] > 0)
# check for sane positive distances
assert np.amax(distances) < max(self.mol.get_real_shape())
# check that negative distances are not too large
# min should be no larger than the largest atom radius plus the probe
# radius (by definition)
assert np.fabs(np.amin(distances)) <= (
self.mol.atoms[0].radius + self.mol.probe_radius)
# check that most distances are significantly less than max
threshold = max(self.mol.get_real_shape()) / 2.
assert np.count_nonzero(np.fabs(distances) < threshold) > (
0.9 * distances.size)
class TestGridAtom(unittest.TestCase):
"""
Tests for GridAtom.
"""
def setUp(self):
"""
Set up tests.
"""
self.mol = GridMol((11, 11, 11))
self.atom = GridAtom(self.mol, (1, 2, 1), 1.6)
def test_atom_is_in_grid(self):
"""
Test GridAtom.atom_is_in_grid.
"""
assert GridAtom.atom_is_in_grid(self.mol, self.atom.center,
self.atom.radius,
self.mol.probe_radius)
assert not GridAtom.atom_is_in_grid(self.mol, (1, 2, 3), 1.6,
self.mol.probe_radius)
def test_get_grid_mask(self):
"""
Test GridAtom.get_grid_mask.
"""
mask = self.atom.get_grid_mask()
assert np.count_nonzero(mask)
# add up grid volume and compare to atom volume
grid_volume = np.count_nonzero(mask) * self.mol.spacing ** 3
effective_radius = self.atom.radius + self.mol.probe_radius
atom_volume = 4/3. * np.pi * effective_radius ** 3
assert np.fabs(grid_volume - atom_volume) < 10
| 2.671875 | 3 |
Stonks.py | VarunAndMaanas/Investment-Optimizer-and-Stock-Growth-Predictor | 0 | 12761397 | from google.protobuf.symbol_database import Default
import nltk
import random
import pickle
from nltk.corpus.reader.chasen import test
from pandas.core.indexes import period
from statsmodels.tsa.seasonal import _extrapolate_trend
nltk.download('punkt')
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
from numpy.lib.function_base import append, select
lemmatizer = WordNetLemmatizer()
import pandas as pd
import yfinance as yf
import streamlit as st
import statsmodels.api as sm
import datetime as dt
import plotly.graph_objects as go
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
import requests
import json
import numpy as np
from keras.models import load_model
from bs4 import BeautifulSoup
import csv
from requests.exceptions import ConnectionError
words=pickle.load(open('words.pkl','rb'))
classes=pickle.load(open('classes.pkl','rb'))
model = load_model("stock_model.h5")
intents=json.loads(open('training.json').read())
def calcMovingAverage(data, size):
df = data.copy()
df['sma'] = df['Adj Close'].rolling(size).mean()
df['ema'] = df['Adj Close'].ewm(span=size, min_periods=size).mean()
df.dropna(inplace=True)
return df
def calc_macd(data):
df = data.copy()
df['ema12'] = df['Adj Close'].ewm(span=12, min_periods=12).mean()
df['ema26'] = df['Adj Close'].ewm(span=26, min_periods=26).mean()
df['macd'] = df['ema12'] - df['ema26']
df['signal'] = df['macd'].ewm(span=9, min_periods=9).mean()
df.dropna(inplace=True)
return df
def calcBollinger(data, size):
df = data.copy()
df["sma"] = df['Adj Close'].rolling(size).mean()
df["bolu"] = df["sma"] + 2*df['Adj Close'].rolling(size).std(ddof=0)
df["bold"] = df["sma"] - 2*df['Adj Close'].rolling(size).std(ddof=0)
df["width"] = df["bolu"] - df["bold"]
df.dropna(inplace=True)
return df
def graphMyStock(finalvar,a,b,col):
stock2 = yf.Ticker(finalvar)
info2=stock2.info
ln2=info2['longName']
opt1b, opt2b = st.beta_columns(2)
with opt1b:
numYearMAb = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=a)
with opt2b:
windowSizeMAb = st.number_input('Window Size (Day): ', min_value=5, max_value=500, value=20, key=b)
start2 = dt.datetime.today()-dt.timedelta(numYearMAb * 365)
end2 = dt.datetime.today()
livedata2 = yf.download(finalvar,start2,end2)
df_ma2 = calcMovingAverage(livedata2, windowSizeMAb)
df_ma2 = df_ma2.reset_index()
fig2 = go.Figure()
fig2.add_trace(
go.Scatter(
x = df_ma2['Date'],
y = df_ma2['Adj Close'],
name = '('+ finalvar+ ') '+ "Prices Over Last " + str(numYearMAb) + " Year(s)",
mode='lines',
line=dict(color=col)
)
)
fig2.update_layout(showlegend=True,legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01,
))
fig2.update_layout(legend_title_text='Trend')
fig2.update_yaxes(tickprefix="$")
st.plotly_chart(fig2, use_container_width=True)
def graphAllStocks(stocka,stockb,stockc,a,b,col1,col2,col3):
stock2 = yf.Ticker(stocka)
info2=stock2.info
ln2=info2['longName']
st.write('')
st.subheader('**Graph of optimal stocks:** ')
opt1b, opt2b = st.beta_columns(2)
with opt1b:
numYearMAb = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=a)
with opt2b:
windowSizeMAb = st.number_input('Window Size (Day): ', min_value=5, max_value=500, value=20, key=b)
start2 = dt.datetime.today()-dt.timedelta(numYearMAb * 365)
end2 = dt.datetime.today()
livedata2 = yf.download(stocka,start2,end2)
df_ma2 = calcMovingAverage(livedata2, windowSizeMAb)
df_ma2 = df_ma2.reset_index()
fig2 = go.Figure()
fig2.add_trace(
go.Scatter(
x = df_ma2['Date'],
y = df_ma2['Adj Close'],
name = '('+ stocka+ ') '+ "Prices Over Last " + str(numYearMAb) + " Year(s)",
mode='lines',
line=dict(color=col1)
)
)
livedata2=yf.download(stockb,start2,end2)
df_ma2= calcMovingAverage(livedata2, windowSizeMAb)
df_ma2= df_ma2.reset_index()
fig2.add_trace(
go.Scatter(
x=df_ma2['Date'],
y=df_ma2['Adj Close'],
name = '('+ stockb+ ') '+ "Prices Over Last " + str(numYearMAb) + " Year(s)",
mode='lines',
line=dict(color=col2)
))
livedata3=yf.download(stockc,start2,end2)
df_ma3= calcMovingAverage(livedata3, windowSizeMAb)
df_ma3= df_ma3.reset_index()
fig2.add_trace(
go.Scatter(
x=df_ma3['Date'],
y=df_ma3['Adj Close'],
name = '('+ stockc+ ') '+ "Prices Over Last " + str(numYearMAb) + " Year(s)",
mode='lines',
line=dict(color=col3)
))
fig2.update_layout(showlegend=True,legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1,
))
fig2.update_layout(legend_title_text='Trend')
fig2.update_yaxes(tickprefix="$")
st.plotly_chart(fig2, use_container_width=True)
def RootWordGen(lw):
j=nltk.word_tokenize(lw)
j= [lemmatizer.lemmatize(word.lower()) for word in j]
return(j)
def matrix(sentence, words, show_details=True):
sentence_words= RootWordGen(sentence)
# sentence_words is bag of words - matrix of N words, vocabulary matrix
bag = [0]*len(words)
#matrix contains number of elements = vocabulary, preset value=0
for s in sentence_words:
#traverses root words
for i,w in enumerate(words):
#i is roll no/dir no
#w is unique word
#makes directory, gives a 'roll no' to each word. If 'cramping' is entered, directory till cramping prints along w roll number, then matrix with 0s other than one 1 (one being element number=roll no of cramping)
if w == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
if show_details:
#will give name of bag of unique base word the entered word is found in
print ("found in bag: %s" % w)
#removes commas from list, returns matrix
return(np.array(bag))
def predict_class(sentence, model):
# filter out predictions below a threshold probability
pred= matrix(sentence, words,show_details=False)
res = model.predict(np.array([pred]))[0]
ERROR_THRESHOLD = 0.25
global results
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
global results1
results1 = [[i,r] for i,r in enumerate(res)]
print(results)
#for guesses above threshold
#f=open('r.txt','w')
#for all guesses
#f1=open('s.txt','w')
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
results1.sort(key=lambda x: x[1], reverse=True)
pr=results1[0]
global pp
pp=pr[1]
print(pp)
global return_list
return_list = []
global return_list1
return_list1=[]
for r in results1:
return_list1.append({"intent": classes[r[0]], "probability": str(r[1])})
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
#for x in return_list1:
# f1.write(str(x))
#for x in return_list:
#print(x)
#f.write(str(x))
return return_list[0]
def getResponse(ints, intents_json):
global tag
tag = ints[0]['intent']
print(tag)
list_of_intents = intents_json['intents']
for i in list_of_intents:
if(i['tag']== tag):
result = random.choice(i['responses'])
break
return result
def FinalPrediction(msg):
ints = predict_class(msg, model)
res = getResponse(ints, intents)
return res
stockdata = pd.read_csv("SP500.csv")
symbols = stockdata['Symbol'].sort_values().tolist()
st.title('Investment Optimizer and Stock Growth Predictor')
#We'll add this when we come up with something
expander=st.beta_expander(label='',expanded=False)
expander.write("This application aims at evaluating stock trends and current news to predict it's future growth. It provides a clean and efficient user interface to view current prices and fluctuation history. It also provides a tool to identify an ideal combination of stocks that one should invest in based on the given budget, using our machine learning and optimization algorithm. We have named our ML model 'ATHENA', which stands for Algorithmic Enhancer")
st.write("")
st.write("")
st.write('**Would you like to know where to invest or understand each Stock?**')
a=st.radio("", ("Invest", "Understand"))
if(a=="Invest"):
budget=st.sidebar.number_input("Enter your budget ($): ")
if(st.sidebar.button("Enter")):
st.header("")
st.header("**Following is the combination of stocks you should invest in: ** ")
st.write("")
st.write('Processing...')
invest=[]
invstock_sym=[]
invstock_name=[]
f= open("SP500.csv",'r')
rd=csv.reader(f)
for x in rd:
if x!=[]:
if x[2]=='badboy':
invstock_sym.append(x[0])
invstock_name.append(x[1])
invstock_price=[]
for ticker in invstock_sym:
ticker_yahoo = yf.Ticker(ticker)
data = ticker_yahoo.history()
last_quote = (data.tail(1)['Close'].iloc[0])
invstock_price.append(float(last_quote))
invstock_conf=[]
st.markdown("""
<style>
.stProgress .st-bo {
background-color: green;
}
</style>
""", unsafe_allow_html=True)
my_bar=st.progress(0)
progresscount=10
for badgirl in invstock_name:
checkerb=0
try:
send="https://www.google.com/search?q=should+you+invest+in+ "+badgirl.lower()+" stock"
res=requests.get(send)
except ReadTimeout:
checkerb=checkerb+1
except ConnectionError or ConnectionAbortedError or ConnectionRefusedError:
checkerb=checkerb+1
else:
soup=BeautifulSoup(res.content, "html.parser")
all_links=[]
count=0
for i in soup.select("a"):
if count==1:
break
link=i.get("href")
if("/url?q=https://" in link):
if(("/url?q=https://support.google.com" not in link) and ("/url?q=https://accounts.google.com" not in link)):
x=link.split("https://")
y=x[1].split("&sa")
new="https://"+y[0]
all_links.append(new)
z=i.text
if("..." in z):
type2=z.split("...")
name=type2[0]
else:
type1=z.split(" › ")
name=type1[0]
count+=1
list1=[]
c=0
for i in all_links:
if c==1:
break
option=requests.get(i)
soup=BeautifulSoup(option.content, "html.parser")
pageinfo=soup.select("p")
for j in pageinfo:
m=j.text
n=m.split(' ')
for i in n:
list1.append(i)
c=c+1
tex=' '.join(list1)
find=predict_class(tex,model)
varun=[]
varun.append(float(find['probability']))
varun.append(find['intent'])
invstock_conf.append(varun)
progresscount=progresscount+10
my_bar.progress(progresscount)
stocks={}
for i in range(len(invstock_name)):
temp=[]
if invstock_conf[i][1]=='up':
temp.append(invstock_conf[i][0])
temp.append(invstock_price[i])
temp.append(invstock_name[i])
temp.append(invstock_sym[i])
length= len(stocks)
stocks[length]=temp
###### NEED TO GET "STOCKS" DICTIONARY DATA FROM ########
all_stocks={}
for i in range(len(stocks)):
if((budget >= stocks[i][1]) and (stocks[i][0]>0.5)):
n=len(all_stocks)
all_stocks[n]=[stocks[i][0], stocks[i][1], stocks[i][2], stocks[i][3]]
if len(all_stocks)>=3:
st.balloons()
quad1={}
quad2={}
quad3={}
quad4={}
for i in range(len(all_stocks)):
if((all_stocks[i][0]>=0.8) and (all_stocks[i][1]<=100)):
quad1[i]=[all_stocks[i][0], all_stocks[i][1], all_stocks[i][2],all_stocks[i][3]]
elif((all_stocks[i][0]>=0.8) and (all_stocks[i][1]>100)):
quad2[i]=[all_stocks[i][0], all_stocks[i][1], all_stocks[i][2],all_stocks[i][3]]
elif((all_stocks[i][0]<0.8) and (all_stocks[i][1]<=100)):
quad3[i]=[all_stocks[i][0], all_stocks[i][1], all_stocks[i][2],all_stocks[i][3]]
else:
quad4[i]=[all_stocks[i][0], all_stocks[i][1], all_stocks[i][2],all_stocks[i][3]]
def inputs(quad):
global invest
spq=[]
for i in quad:
spq.append(quad[i][1])
length=len(spq)
for i in range(length):
if(len(invest)==3):
break
minval=min(spq)
for i in quad:
if(quad[i][1]==minval):
invest.append(quad[i])
spq.remove(minval)
inputs(quad1)
if(len(invest)<3):
inputs(quad2)
if(len(invest)<3):
inputs(quad3)
if(len(invest)<3):
inputs(quad4)
#stock1 should get 60%
#stock2 should get 30%
#stock3 should get 10%
s1=budget*0.6
s2=budget*0.3
s3=budget*0.1
n_s1=s1//invest[0][1]
n_s2=s2//invest[1][1]
n_s3=s3//invest[2][1]
left=budget-invest[0][1]*n_s1-invest[1][1]*n_s2-invest[2][1]*n_s3
invest_val=[]
for i in range(3):
invest_val.append(invest[i][1])
a_s1=0
a_s2=0
a_s3=0
a_s3=left//invest[2][1]
left=left-a_s3*invest[2][1]
a_s2=left//invest[1][1]
left=left-a_s2*invest[1][1]
a_s1=left//invest[0][1]
left=left-a_s1*invest[0][1]
t_s1=n_s1+a_s1
t_s2=n_s2+a_s2
t_s3=n_s3+a_s3
st.write("")
st.subheader('**Summary:** ')
summary_table={}
names=[]
prices=[]
nstocks=[]
totalcosts=[]
confidences=[]
for i in range(len(invest)):
names.append(invest[i][2])
prices.append(invest[i][1])
if(i==0):
nstocks.append(t_s1)
tcost=t_s1*invest[i][1]
totalcosts.append(tcost)
if(i==1):
nstocks.append(t_s2)
tcost=t_s2*invest[i][1]
totalcosts.append(tcost)
if(i==2):
nstocks.append(t_s3)
tcost=t_s3*invest[i][1]
totalcosts.append(tcost)
confidences.append(invest[i][0])
summary_table["Stock Name"]=names
summary_table["Cost per Stock"]=prices
summary_table["Number to Purchase"]=nstocks
summary_table["Total Cost"]=totalcosts
summary_table["Our Confidence"]=confidences
column_order=["Stock Name", "Cost per Stock", "Number to Purchase", "Total Cost", "Our Confidence"]
summary_df=pd.DataFrame(data=summary_table)
st.dataframe(summary_df)
st.write("")
bala='**Your balance:** '+ '_$' + str(left) +'_'
st.write(bala)
graphAllStocks(invest[0][3],invest[1][3],invest[2][3],14,15,'royalblue','springgreen','indianred')
st.header('**In depth review:** ')
st.write('')
text1='Your first stock: ' + '_' + str(invest[0][2]) + '_'
st.header(text1)
graphMyStock(invest[0][3],1,2,'royalblue')
text1a='**Price:** '+ '_$'+ str(invest[0][1]) + '_'
st.write(text1a)
text1b='**Number of stocks you should buy:** '+ '_' + str(t_s1) + '_'
st.write(text1b)
text1c="**Athena's confidence: **"+'_'+ str(100*invest[0][0])+'%' + '_'
st.write(text1c)
st.write('')
st.write('')
text2='Your second stock: ' +'_'+ str(invest[1][2])+ '_'
st.header(text2)
graphMyStock(invest[1][3],3,4,'springgreen')
text2a='**Price:** '+ '_$'+ str(invest[1][1])+ '_'
st.write(text2a)
text2b='**Number of stocks you should buy:** '+'_'+ str(t_s2)+ '_'
st.write(text2b)
text2c="**Athena's confidence:** "+'_'+ str(100*invest[1][0]) + '%'+'_'
st.write(text2c)
st.write('')
st.write('')
text3= 'Your third stock: '+'_'+ str(invest[2][2])+ '_'
st.header(text3)
graphMyStock(invest[2][3],5,6,'indianred')
text3a='**Price:** '+ '_$'+ str(invest[2][1])+ '_'
st.write(text3a)
text3b='**Number of stocks you should buy: **'+'_'+ str(t_s3)+'_'
st.write(text3b)
text3c="**Athena's confidence: **"+'_'+ str(100*invest[2][0]) + '%'+'_'
st.write(text3c)
st.write('')
st.write('')
st.header("")
st.header("")
st.write("Disclaimer: We are not liable for the results or actions taken on the basis of these predictions.")
else:
st.write('Budget too low to diversify')
if a=='Understand':
ticker = st.sidebar.selectbox(
'Choose a Stock',symbols)
stock = yf.Ticker(ticker)
info=stock.info
ln=info['longName']
st.title(info['longName'])
st.title(ticker)
opt1, opt2 = st.beta_columns(2)
with opt1:
numYearMA = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=0)
with opt2:
windowSizeMA = st.number_input('Window Size (Day): ', min_value=5, max_value=500, value=20, key=1)
start = dt.datetime.today()-dt.timedelta(numYearMA * 365)
end = dt.datetime.today()
livedata = yf.download(ticker,start,end)
df_ma = calcMovingAverage(livedata, windowSizeMA)
df_ma = df_ma.reset_index()
fig = go.Figure()
fig.add_trace(
go.Scatter(
x = df_ma['Date'],
y = df_ma['Adj Close'],
name = '('+ ticker+ ') '+ "Prices Over Last " + str(numYearMA) + " Year(s)",
mode='lines',
line=dict(color='royalblue')
)
)
compstock2=st.selectbox('Choose stock to compare with: ', symbols)
st.info("If you don't wish to compare, select the same stock again")
livedata2=yf.download(compstock2,start,end)
df_ma2= calcMovingAverage(livedata2, windowSizeMA)
df_ma2= df_ma2.reset_index()
fig.add_trace(
go.Scatter(
x=df_ma2['Date'],
y=df_ma2['Adj Close'],
name = '('+ compstock2+ ') '+ "Prices Over Last " + str(numYearMA) + " Year(s)",
mode='lines',
line=dict(color='firebrick')
))
fig.update_layout(showlegend=True,legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01,
))
fig.update_layout(legend_title_text='Trend')
fig.update_yaxes(tickprefix="$")
st.plotly_chart(fig, use_container_width=True)
livedata3 = yf.download(ticker,start,end)
df_ma3 = calcMovingAverage(livedata3, windowSizeMA)
df_ma3 = df_ma.reset_index()
train_data, test_data = df_ma3[0:int(len(df_ma3)*0.7)], df_ma3[int(len(df_ma3)*0.7):]
training_data = train_data['Adj Close'].values
test_data = test_data['Adj Close'].values
history = [x for x in training_data]
model_predictions = []
N_test_observations = len(test_data)
abcd=0
for time_point in range(N_test_observations):
model = ARIMA(history, order=(4,1,0))
model_fit = model.fit(disp=0)
output = model_fit.forecast()
yhat = output[0]
model_predictions.append(yhat[0])
true_test_value = test_data[time_point]
history.append(true_test_value)
abcd=abcd+1
af=time_point
MSE_error = mean_squared_error(test_data, model_predictions)
test_set_range = df_ma3[int(len(df_ma3)*0.7):]
dts=df_ma3.loc[:,['Date']]
new = pd.date_range(test_set_range.Date.iloc[-1], periods=30)
df1 = pd.DataFrame(new[1:], columns=['Date'])
df_fin = test_set_range.append(df1, ignore_index=True)
mps=[]
for i in range(30):
model = ARIMA(history, order=(4,1,0))
fitted = model.fit(disp=0)
ou=fitted.forecast()
yha = ou[0]
mps.append(yha[0])
history.append(yha[0])
future_dates=[]
dat=[]
for row in df_fin.itertuples():
dat.append(row[2])
mxq=dat[-1]-dt.timedelta(days=29)
future_dates.append(mxq)
for i in range (30):
date=future_dates[-1]+dt.timedelta(days=1)
future_dates.append(date)
myseries=pd.Series(mps)
st.subheader('Future Graph Trend for '+ info['longName']+' using Time Series Analysis')
figtsa=go.Figure()
figtsa.add_trace(
go.Scatter(
x=df_fin['Date'],
y=model_predictions,
name = 'Predicted Prices',
mode='lines'
)
)
figtsa.add_trace(
go.Scatter(
x=df_fin['Date'],
y=test_data,
mode='lines',
name='Previous model prediction graph'
)
)
figtsa.add_trace(
go.Scatter(
x=future_dates,
y=mps,
mode='lines',
name='Future Price Trend'
)
)
st.plotly_chart(figtsa, use_container_width=True)
st.subheader('Bollinger Band')
opta, optb = st.beta_columns(2)
with opta:
numYearBoll = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=6)
with optb:
windowSizeBoll = st.number_input('Window Size (Day): ', min_value=5, max_value=500, value=20, key=7)
startBoll= dt.datetime.today()-dt.timedelta(numYearBoll * 365)
endBoll = dt.datetime.today()
dataBoll = yf.download(ticker,startBoll,endBoll)
df_boll = calcBollinger(dataBoll, windowSizeBoll)
df_boll = df_boll.reset_index()
figBoll = go.Figure()
figBoll.add_trace(
go.Scatter(
x = df_boll['Date'],
y = df_boll['bolu'],
name = "Upper Band"
)
)
figBoll.add_trace(
go.Scatter(
x = df_boll['Date'],
y = df_boll['sma'],
name = "SMA" + str(windowSizeBoll) + " Over Last " + str(numYearBoll) + " Year(s)"
)
)
figBoll.add_trace(
go.Scatter(
x = df_boll['Date'],
y = df_boll['bold'],
name = "Lower Band"
)
)
figBoll.update_layout(showlegend=True,legend=dict(
orientation="h",
yanchor="bottom",
y=1,
xanchor="left",
x=0
))
figBoll.update_yaxes(tickprefix="$")
st.plotly_chart(figBoll, use_container_width=True)
st.sidebar.title("Stock News")
send="https://www.google.com/search?q=should+you+invest+in+ "+ln.lower()+" stock"
res=requests.get(send)
soup=BeautifulSoup(res.content, "html.parser")
all_links=[]
all_titles=[]
count=0
for i in soup.select("a"):
if count==5:
break
link=i.get("href")
if("/url?q=https://" in link):
if(("/url?q=https://support.google.com" not in link) and ("/url?q=https://accounts.google.com" not in link)):
x=link.split("https://")
y=x[1].split("&sa")
new="https://"+y[0]
all_links.append(new)
z=i.text
if("..." in z):
type2=z.split("...")
name=type2[0]
else:
type1=z.split(" › ")
name=type1[0]
all_titles.append(name)
count+=1
for i in range(len(all_titles)):
make="["+str(all_titles[i])+"]"+" "+"("+str(all_links[i])+")"
st.sidebar.markdown(make)
st.sidebar.write("")
st.sidebar.write("")
list1=[]
c=0
alllinksind=len(all_links)
for x in range(alllinksind):
checkera=0
if c==10:
break
try:
option=requests.get(all_links[x], timeout=3)
except ReadTimeout:
checkera=checkera+1
except ConnectionError or ConnectionAbortedError or ConnectionRefusedError:
checkera=checkera+1
else:
if checkera==0:
soup=BeautifulSoup(option.content, "html.parser")
pageinfo=soup.select('p')
paglen=len(pageinfo)
for j in range(paglen):
m=pageinfo[j].text
n=m.split(' ')
for i in n:
list1.append(i)
c=c+1
tex=' '.join(list1)
understand_prob=predict_class(tex,model)
finint=understand_prob['intent']
finprob=100*float(understand_prob['probability'])
if finint=='up':
fininta='Stock prices will go up'
elif finint=='down':
fininta='Stock prices will go down'
fina='**Stock trend prediction: **' + '_'+ str(fininta)+ '_'
finb="**Athena's confidence: **"+ '_'+ str(finprob)+'%' +'_'
st.subheader(fininta)
st.subheader(finb)
st.header("")
st.header("")
st.markdown("""
<style>
.small-font {
font-size:10px !important;
}
</style>
""", unsafe_allow_html=True)
st.markdown('<p class="small-font">Disclaimer: We are not liable for the results or actions taken on the basis of these predictions.</p>', unsafe_allow_html=True)
| 2.28125 | 2 |
make_art.py | nprapps/sotomayor | 1 | 12761398 | #!/usr/bin/env python
from glob import glob
import os
from shutil import rmtree
from itertools import chain
from PIL import Image
output_dir = 'www/img/art'
widths = [120, 480, 979, 1200]
rmtree(output_dir)
os.mkdir(output_dir)
for path in chain(glob('art/*.jpg'), glob('art/*.png')):
filename = os.path.split(path)[-1]
name = os.path.splitext(filename)[0]
original = Image.open(path)
if original.mode == 'LA':
original = original.convert('L')
for width in widths:
output_path = os.path.join(output_dir, '%s_%i.jpg' % (name, width))
width_pct = width / float(original.size[0])
height = int(float(original.size[1] * width_pct))
print 'Cutting %s at %ix%i' % (name, width, height)
img = original.resize((width, height), Image.ANTIALIAS)
img.save(output_path)
| 3 | 3 |
t/test_salsa20.py | warmchang/umash | 108 | 12761399 | """
Quick smoke test that our implementation of salsa20 does the right thing.
"""
from hypothesis import given
import hypothesis.strategies as st
from Crypto.Cipher import Salsa20
from umash import C, FFI
@given(
length=st.integers(min_value=1, max_value=512),
nonce=st.binary(min_size=8, max_size=8),
key=st.binary(min_size=32, max_size=32),
)
def test_salsa20(length, nonce, key):
expected = Salsa20.new(key, nonce).encrypt(b"\x00" * length)
buf = FFI.new("char[]", length)
C.salsa20_stream(buf, length, nonce, key)
assert bytes(FFI.buffer(buf, length)) == expected
| 2.296875 | 2 |
lib/googlecloudsdk/compute/subcommands/firewalls/update.py | bopopescu/google-cloud-sdk | 0 | 12761400 | <reponame>bopopescu/google-cloud-sdk
# Copyright 2014 Google Inc. All Rights Reserved.
"""Command for updating firewalls."""
from googlecloudapis.compute.v1 import compute_v1_messages as messages
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.compute.lib import base_classes
from googlecloudsdk.compute.lib import firewalls_utils
class UpdateFirewall(base_classes.ReadWriteCommand):
"""Update a firewall rule."""
@staticmethod
def Args(parser):
allow = parser.add_argument(
'--allow',
nargs='*',
metavar=firewalls_utils.ALLOWED_METAVAR,
help='The list of IP protocols and ports which will be allowed.')
allow.detailed_help = """\
A list of protocols and ports whose traffic will be allowed. Setting
this will override the current values.
+
'PROTOCOL' is the IP protocol whose traffic will be allowed.
'PROTOCOL' can be either the name of a well-known protocol
(e.g., ``tcp'' or ``icmp'') or the IP protocol number.
A list of IP protocols can be found at
link:http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml[].
+
A port or port range can be specified after 'PROTOCOL' to
allow traffic through specific ports. If no port or port range
is specified, connections through all ranges are allowed. For
example, the following will create a rule that allows TCP traffic
through port 80 and allows ICMP traffic:
$ {command} my-rule --allow tcp:80 icmp
+
TCP and UDP rules must include a port or port range.
"""
parser.add_argument(
'--description',
help=('A textual description for the firewall. Set to an empty string '
'to clear existing description.'))
source_ranges = parser.add_argument(
'--source-ranges',
metavar='CIDR_RANGE',
nargs='*',
help=('A list of IP address blocks that may make inbound connections '
'in CIDR format.'))
source_ranges.detailed_help = """\
A list of IP address blocks that are allowed to make inbound
connections that match the firewall rule to the instances on
the network. The IP address blocks must be specified in CIDR
format:
link:http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing[].
Setting this will override the existing source ranges for the firewall.
The following will clear the existing source ranges:
$ {command} my-rule --source-ranges
"""
source_tags = parser.add_argument(
'--source-tags',
metavar='TAG',
nargs='*',
help=('A list of instance tags indicating the set of instances on the '
'network which may accept inbound connections that match the '
'firewall rule.'))
source_tags.detailed_help = """\
A list of instance tags indicating the set of instances on the
network which may accept inbound connections that match the
firewall rule. If omitted, all instances on the network can
receive inbound connections that match the rule.
+
Tags can be assigned to instances during instance creation.
Setting this will override the existing source tags for the firewall.
The following will clear the existing source tags:
$ {command} my-rule --source-tags
"""
target_tags = parser.add_argument(
'--target-tags',
metavar='TAG',
nargs='*',
help=('A list of instance tags indicating the set of instances on the '
'network which may make network connections that match the '
'firewall rule.'))
target_tags.detailed_help = """\
A list of instance tags indicating the set of instances on the
network which may make network connections that match the
firewall rule. If omitted, all instances on the network can
make connections that match the rule.
+
Tags can be assigned to instances during instance creation.
Setting this will override the existing target tags for the firewall.
The following will clear the existing target tags:
$ {command} my-rule --target-tags
"""
parser.add_argument(
'name',
help='The name of the firewall to update.')
@property
def service(self):
return self.context['compute'].firewalls
@property
def print_resource_type(self):
return 'firewalls'
def Run(self, args):
self.new_allowed = firewalls_utils.ParseAllowed(args.allow)
args_unset = (args.allow is None
and args.description is None
and args.source_ranges is None
and args.source_tags is None
and args.target_tags is None)
if args_unset:
raise calliope_exceptions.ToolException(
'at least one property must be modified')
return super(UpdateFirewall, self).Run(args)
def GetGetRequest(self, args):
"""Returns the request for the existing Firewall resource."""
return (self.service,
'Get',
messages.ComputeFirewallsGetRequest(
firewall=args.name,
project=self.context['project']))
def GetSetRequest(self, args, replacement, existing):
return (self.service,
'Update',
messages.ComputeFirewallsUpdateRequest(
firewall=replacement.name,
firewallResource=replacement,
project=self.context['project']))
def Modify(self, args, existing):
"""Returns a modified Firewall message."""
if args.allow is None:
allowed = existing.allowed
else:
allowed = self.new_allowed
if args.description:
description = args.description
elif args.description is None:
description = existing.description
else:
description = None
if args.source_ranges:
source_ranges = args.source_ranges
elif args.source_ranges is None:
source_ranges = existing.sourceRanges
else:
source_ranges = []
if args.source_tags:
source_tags = args.source_tags
elif args.source_tags is None:
source_tags = existing.sourceTags
else:
source_tags = []
if args.target_tags:
target_tags = args.target_tags
elif args.target_tags is None:
target_tags = existing.targetTags
else:
target_tags = []
new_firewall = messages.Firewall(
name=existing.name,
allowed=allowed,
description=description,
network=existing.network,
sourceRanges=source_ranges,
sourceTags=source_tags,
targetTags=target_tags,
)
return new_firewall
UpdateFirewall.detailed_help = {
'brief': 'Update a firewall rule',
'DESCRIPTION': """\
*{command}* is used to update firewall rules that allow incoming
traffic to a network. Only arguments passed in will be updated on the
firewall. Other attributes will remain unaffected.
""",
}
| 2.59375 | 3 |
single.py | Gooner14/CNNcatvsdog | 0 | 12761401 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 1 18:22:15 2018
@author: arpit-mint
"""
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
#Initializing the CNN
classifier=Sequential()
#Step 1: Convolution
classifier.add(Convolution2D(32, kernel_size=(3, 3), input_shape=(64,64,3), activation='relu'))
#Step 2: Pooling
classifier.add(MaxPooling2D(pool_size=(2, 2)))
#2nd convolution layer
classifier.add(Convolution2D(32, kernel_size=(3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
#Step 3: Flattening
classifier.add(Flatten())
#Step 4: Full Connection
classifier.add(Dense(units=128,activation='relu'))
classifier.add(Dense(units=1,activation='sigmoid'))
#Compiling the CNN
classifier.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['accuracy'])
#Part 2: Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
'dataset/training_set',
target_size=(64, 64),
batch_size=32,
class_mode='binary')
single_prediction = test_datagen.flow_from_directory(
'dataset/single_prediction',
target_size=(64, 64),
batch_size=32,
class_mode='binary')
classifier.fit_generator(
training_set,
steps_per_epoch=8000,
epochs=2,
validation_data=single_prediction,
validation_steps=2) | 3.578125 | 4 |
floodsystem/geo.py | fm554/Lent_22-Lab_Group_29 | 0 | 12761402 | # Copyright (C) 2018 <NAME>
#
# SPDX-License-Identifier: MIT
"""This module contains a collection of functions related to
geographical data.
"""
from .utils import sorted_by_key # noqa
from haversine import haversine
# Task 1B
def stations_by_distance(stations, p):
"""returns all stations sorted by distance from coordinate
Input arguments: stations(a list of Monitoring station objects), p(lat, lon)
Returns: list of tuples of form (name, town, distance)
"""
station_distance = []
# calculate and append haversine distances
for station in stations:
distance = haversine(station.coord, p)
station_distance.append((station.name, station.town, distance))
#sort by key
station_distance = sorted_by_key(station_distance, 2)
return station_distance
# Task 1C
def stations_within_radius(stations, centre, r):
"""returns all stations within a radius of a coordinate
Input arguments: stations(a list of Monitoring station objects), centre(lat, lon), r(distance in Km)
Returns: list of tuples of form (name)
"""
station_in_radius= []
for station in stations:
distance2= haversine(station.coord, centre)
if distance2<=r:
station_in_radius.append((station.name))
station_in_radius= sorted(station_in_radius)
return station_in_radius
# Task 1D
def rivers_with_station(stations):
""""returns a set with a list of non duplicate rivers with monitoring stations
Input arguments: stations (a list of Monitoring Station objects)
Returns: Set
"""
rivers = set() #set means no requirement to check for duplicates
for station in stations:
rivers.add(station.river)
return rivers
def stations_by_river(stations):
""""maps river names to the list of station objects on a given river
Input arguments: stations (a list of Monitoring Station objects)
Returns: Dictionary {River: [list of monitoring stations on that river]]}
"""
rivers = rivers_with_station(stations) #reuses previous function
rivers_stations = {}
#iterate through rivers and stations to generate each key-value pair
for river in rivers:
river_stations = []
for station in stations:
if station.river == river: #check if the station's river matches the current
river_stations.append(station.name)
rivers_stations[river] = river_stations
return rivers_stations
#task 1E
def rivers_by_station_number(stations, N):
"""determines the N rivers with the greatest number of monitoring stations
Input arguments: stations (a list of Monitoring Station object), N(number of rivers)
Returns: list of tuples of form (river, number of stations)"""
if N<1:
print("error: N must be greater than 0")
rivers_by_station_number=[]
rivers=[]
for station in stations:
rivers.append(station.river)
for river in rivers:
rivers_by_station_number.append((river, (rivers.count(river)))) #iterating through rivers and counting the number of duplicate entries indicating each station
rivers_by_station_number=sorted_by_key(set(rivers_by_station_number), 1, reverse=True) #sorting by number of stations
N_stations=rivers_by_station_number[:N]
i = 0
while True: #check for further values that are the same as the Nth
if rivers_by_station_number[N+i][1] == rivers_by_station_number[N][1]:
N_stations.append(rivers_by_station_number[N+i])
else:
break
i += 1
return N_stations | 3.890625 | 4 |
setup.py | erberlin/namedzip | 24 | 12761403 | <reponame>erberlin/namedzip<filename>setup.py
from setuptools import setup
with open("README.rst", "r", encoding="utf-8") as f:
README = f.read()
setup(
name="namedzip",
version="1.1.0",
description="Extends zip() and itertools.zip_longest() to generate named tuples.",
long_description=README,
long_description_content_type="text/x-rst",
url="https://github.com/erberlin/namedzip",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
packages=["namedzip"],
python_requires=">=3.4",
)
| 1.59375 | 2 |
filer/test_utils/cli.py | pbmbrands/django-filer | 1 | 12761404 | <filename>filer/test_utils/cli.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from distutils.version import LooseVersion
import django
import os
gettext = lambda s: s
urlpatterns = []
DJANGO_1_3 = LooseVersion(django.get_version()) < LooseVersion('1.4')
def configure(**extra):
from django.conf import settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'filer.test_utils.cli'
defaults = dict(
CACHE_BACKEND='locmem:///',
DEBUG=True,
TEMPLATE_DEBUG=True,
DATABASE_SUPPORTS_TRANSACTIONS=True,
DATABASES={
'default': {'ENGINE': 'django.db.backends.sqlite3'}
},
USE_I18N=True,
MEDIA_ROOT='/media/',
STATIC_ROOT='/static/',
MEDIA_URL='/media/',
STATIC_URL='/static/',
ADMIN_MEDIA_PREFIX='/static/admin/',
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
SECRET_KEY='key',
TEMPLATE_LOADERS=(
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
),
INSTALLED_APPS = [
'filer',
'mptt',
'easy_thumbnails',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.admin',
'django.contrib.sessions',
'django.contrib.staticfiles',
],
ROOT_URLCONF='filer.test_utils.cli',
)
defaults.update(extra)
settings.configure(**defaults)
from django.contrib import admin
admin.autodiscover() | 1.710938 | 2 |
bb-master/sandbox/lib/python3.5/site-packages/buildbot/process/results.py | Alecto3-D/testable-greeter | 2 | 12761405 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from future.utils import lrange
ALL_RESULTS = lrange(7)
SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY, CANCELLED = ALL_RESULTS
Results = ["success", "warnings", "failure", "skipped", "exception", "retry", "cancelled"]
def statusToString(status):
if status is None:
return "not finished"
if status < 0 or status >= len(Results):
return "Invalid status"
return Results[status]
def worst_status(a, b):
# SKIPPED > SUCCESS > WARNINGS > FAILURE > EXCEPTION > RETRY > CANCELLED
# CANCELLED needs to be considered the worst.
for s in (CANCELLED, RETRY, EXCEPTION, FAILURE, WARNINGS, SUCCESS, SKIPPED):
if s in (a, b):
return s
def computeResultAndTermination(obj, result, previousResult):
possible_overall_result = result
terminate = False
if result == FAILURE:
if not obj.flunkOnFailure:
possible_overall_result = SUCCESS
if obj.warnOnFailure:
possible_overall_result = WARNINGS
if obj.flunkOnFailure:
possible_overall_result = FAILURE
if obj.haltOnFailure:
terminate = True
elif result == WARNINGS:
if not obj.warnOnWarnings:
possible_overall_result = SUCCESS
else:
possible_overall_result = WARNINGS
if obj.flunkOnWarnings:
possible_overall_result = FAILURE
elif result in (EXCEPTION, RETRY, CANCELLED):
terminate = True
result = worst_status(previousResult, possible_overall_result)
return result, terminate
class ResultComputingConfigMixin(object):
haltOnFailure = False
flunkOnWarnings = False
flunkOnFailure = True
warnOnWarnings = False
warnOnFailure = False
resultConfig = [
"haltOnFailure",
"flunkOnWarnings",
"flunkOnFailure",
"warnOnWarnings",
"warnOnFailure",
]
| 2.203125 | 2 |
WebMirror/management/rss_parser_funcs/feed_parse_extractPenguTaichou.py | fake-name/ReadableWebProxy | 193 | 12761406 | def extractPenguTaichou(item):
"""
<NAME>
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if item['title'].lower().startswith('sword shisho chapter'):
return buildReleaseMessageWithType(item, 'I was a Sword when I Reincarnated!', vol, chp, frag=frag, postfix=postfix)
return False
| 2.359375 | 2 |
connect4/common/helpers.py | hckr/engineering-thesis | 0 | 12761407 | <filename>connect4/common/helpers.py
# -*- coding: utf-8 -*-
import re
import yaml
def load_yaml_to_dict(file_path):
return yaml.safe_load(open(file_path, encoding='utf-8'))
def save_dict_to_yaml(dictionary, file_path):
yaml.dump(dictionary, open(file_path, 'w'), default_flow_style=False, encoding='utf-8')
def strip_ext(file_name):
return re.sub(r'\.[^\.]*$', '', file_name)
def atoi(text):
return int(text) if text.isdigit() else text
def natural_key(text):
return [atoi(c) for c in re.split('(\d+)', text)]
| 2.375 | 2 |
prototype/management/commands/find_duplicates.py | benthomasson/fsm-designer-svg | 0 | 12761408 | <filename>prototype/management/commands/find_duplicates.py
from django.core.management.base import BaseCommand
from django.db.models import Count
from prototype.models import State
from pprint import pprint
class Command(BaseCommand):
def handle(self, *args, **options):
dups = list(State.objects
.values('finite_state_machine_id', 'id')
.annotate(Count('pk'))
.order_by()
.filter(pk__count__gt=1))
pprint(dups)
for dup in dups:
del dup['pk__count']
pprint(list(State.objects
.filter(**dup)
.values()))
| 2.3125 | 2 |
zeus/modules/deformations/prune_deformation.py | shaido987/vega | 1 | 12761409 | # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""ResNetVariant for Detection."""
from zeus.common import ClassType, ClassFactory
from .deformation import Deformation
from zeus.modules.operators import ops
from zeus.modules.operators import PruneConv2DFilter, PruneBatchNormFilter, PruneLinearFilter
@ClassFactory.register(ClassType.NETWORK)
class PruneDeformation(Deformation):
"""Prune any Network."""
def __init__(self, desc, from_graph=False, weight_file=None):
super(PruneDeformation, self).__init__(desc, from_graph, weight_file)
self.is_adaptive_weight = True
def deform(self):
"""Deform Network."""
if not self.props:
return
for name, module in self.model.named_modules():
if isinstance(module, ops.Conv2d):
PruneConv2DFilter(module, self.props).filter()
elif isinstance(module, ops.BatchNorm2d):
PruneBatchNormFilter(module, self.props).filter()
elif isinstance(module, ops.Linear):
PruneLinearFilter(module, self.props).filter()
| 2.109375 | 2 |
cryptics/config.py | eigenfoo/cryptics | 9 | 12761410 | <filename>cryptics/config.py
import os
import json
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
INITIALIZE_DB_SQL = os.path.join(PROJECT_DIR, "queries", "initialize-db.sql")
SITEMAPS_JSON = os.path.join(PROJECT_DIR, "sitemaps.json")
SQLITE_DATABASE = os.path.join(PROJECT_DIR, "cryptics.sqlite3")
with open(SITEMAPS_JSON, "r") as f:
SITEMAPS = json.load(f)
| 2.265625 | 2 |
src/falconpy/host_group.py | CrowdStrike/falconpy | 111 | 12761411 | """CrowdStrike Falcon Host Groups API interface class
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
from ._util import generate_error_result, force_default
from ._util import handle_single_argument, process_service_request
from ._payload import host_group_create_payload, host_group_update_payload
from ._payload import generic_payload_list
from ._service_class import ServiceClass
from ._endpoint._host_group import _host_group_endpoints as Endpoints
class HostGroup(ServiceClass):
"""The only requirement to instantiate an instance of this class is one of the following:
- a valid client_id and client_secret provided as keywords.
- a credential dictionary with client_id and client_secret containing valid API credentials
{
"client_id": "CLIENT_ID_HERE",
"client_secret": "CLIENT_SECRET_HERE"
}
- a previously-authenticated instance of the authentication service class (oauth2.py)
- a valid token provided by the authentication service class (OAuth2.token())
"""
@force_default(defaults=["parameters"], default_types=["dict"])
def query_combined_group_members(self: object, parameters: dict = None, **kwargs) -> dict:
"""Search for members of a Host Group in your environment by providing an FQL filter
and paging details. Returns a set of host details which match the filter criteria.
Keyword arguments:
filter -- The filter expression that should be used to limit the results. FQL syntax.
An asterisk wildcard '*' includes all results.
id -- The ID of the Host Group to search for members of. String
limit -- The maximum number of records to return in this response. [Integer, 1-5000]
Use with the offset parameter to manage pagination of results.
offset -- The offset to start retrieving records from.
Use with the limit parameter to manage pagination of results.
parameters - full parameters payload, not required if using other keywords.
sort -- The property to sort by. FQL syntax (e.g. name|asc).
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/queryCombinedGroupMembers
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="queryCombinedGroupMembers",
keywords=kwargs,
params=parameters
)
@force_default(defaults=["parameters"], default_types=["dict"])
def query_combined_host_groups(self: object, parameters: dict = None, **kwargs) -> dict:
"""Search for Host Groups in your environment by providing an FQL filter and
paging details. Returns a set of Host Groups which match the filter criteria.
Keyword arguments:
filter -- The filter expression that should be used to limit the results. FQL syntax.
An asterisk wildcard '*' includes all results.
Available filter fields:
created_by modified_by
created_timestamp modified_timestamp
group_type name
limit -- The maximum number of records to return in this response. [Integer, 1-5000]
Use with the offset parameter to manage pagination of results.
offset -- The offset to start retrieving records from. Integer.
Use with the limit parameter to manage pagination of results.
parameters - full parameters payload, not required if using other keywords.
sort -- The property to sort by. FQL syntax (e.g. created_timestamp|asc).
Available sort fields:
created_by modified_by
created_timestamp modified_timestamp
group_type name
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/queryCombinedHostGroups
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="queryCombinedHostGroups",
keywords=kwargs,
params=parameters
)
@force_default(defaults=["body", "parameters"], default_types=["dict", "dict"])
def perform_group_action(self: object,
body: dict = None,
parameters: dict = None,
**kwargs
) -> dict:
"""Perform the specified action on the Host Groups specified in the request.
Keyword arguments:
action_name -- Action to perform on the host group. String.
Allowed values: 'add-hosts' or 'remove-hosts'.
action_parameters - List of dictionaries containing action specific parameter settings.
body -- full body payload, not required when using other keywords.
{
"action_parameters": [
{
"name": "string",
"value": "string"
}
],
"ids": [
"string"
]
}
ids -- List of host group IDs to perform an action against. String or list of strings.
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/performGroupAction
"""
if not body:
body = generic_payload_list(submitted_keywords=kwargs,
payload_value="ids"
)
if kwargs.get("action_parameters", None):
body["action_parameters"] = kwargs.get("action_parameters", None)
# _allowed_actions = ['add-hosts', 'remove-hosts']
# operation_id = "performGroupAction"
# parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
# action_name = parameter_payload.get("action_name", "Not Specified")
# act = kwargs.get("action_name", "Not Specified")
if kwargs.get("action_name", "Not Specified").lower() in ['add-hosts', 'remove-hosts']:
returned = process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="performGroupAction",
body=body,
keywords=kwargs,
params=parameters
)
else:
returned = generate_error_result("Invalid value specified for action_name parameter.")
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def get_host_groups(self: object, *args, parameters: dict = None, **kwargs) -> dict:
"""Retrieve a set of Host Groups by specifying their IDs.
Keyword arguments:
ids -- List of host group IDs to retrieve. String or list of strings.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/getHostGroups
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="getHostGroups",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["body"], default_types=["dict"])
def create_host_groups(self: object, body: dict = None, **kwargs) -> dict:
"""Create Host Groups by specifying details about the group to create.
Keyword arguments:
assignment_rule -- Assignment rule to apply. String.
body -- full body payload, not required when using other keywords.
{
"resources": [
{
"assignment_rule": "string",
"description": "string",
"group_type": "static",
"name": "string"
}
]
}
description -- Description of the host group. String.
group_type -- Type of Host Group to create. String.
name -- The Host Group name. String.
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/createHostGroups
"""
if not body:
body = host_group_create_payload(passed_keywords=kwargs)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="createHostGroups",
body=body
)
@force_default(defaults=["parameters"], default_types=["dict"])
def delete_host_groups(self: object, *args, parameters: dict = None, **kwargs) -> dict:
"""Delete a set of Host Groups by specifying their IDs.
Keyword arguments:
ids -- List of host group IDs to delete. String or list of strings.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: dict object containing API response.
HTTP Method: DELETE
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/deleteHostGroups
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="deleteHostGroups",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["body"], default_types=["dict"])
def update_host_groups(self: object, body: dict = None, **kwargs) -> dict:
"""
Update Host Groups by specifying the ID of the group and details to update.
Keyword arguments:
assignment_rule -- Assignment rule to apply. String.
body -- full body payload, not required when using other keywords.
{
"resources": [
{
"assignment_rule": "string",
"description": "string",
"id": "string",
"name": "string"
}
]
}
description -- Description of the host group. String.
id -- Host Group ID to be updated. String.
name -- The Host Group name. String.
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: PATCH
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/updateHostGroups
"""
if not body:
body = host_group_update_payload(passed_keywords=kwargs)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="updateHostGroups",
body=body
)
@force_default(defaults=["parameters"], default_types=["dict"])
def query_group_members(self: object, parameters: dict = None, **kwargs) -> dict:
"""Search for members of a Host Group in your environment by providing an FQL filter
and paging details. Returns a set of Agent IDs which match the filter criteria.
Keyword arguments:
filter -- The filter expression that should be used to limit the results. FQL syntax.
An asterisk wildcard '*' includes all results.
id -- The ID of the Host Group to search for members of. String.
limit -- The maximum number of records to return in this response. [Integer, 1-5000]
Use with the offset parameter to manage pagination of results.
offset -- The offset to start retrieving records from.
Use with the limit parameter to manage pagination of results.
parameters - full parameters payload, not required if using other keywords.
sort -- The property to sort by. FQL syntax (e.g. name|asc).
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/queryGroupMembers
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="queryGroupMembers",
keywords=kwargs,
params=parameters
)
@force_default(defaults=["parameters"], default_types=["dict"])
def query_host_groups(self: object, parameters: dict = None, **kwargs) -> dict:
"""Search for Host Groups in your environment by providing an FQL filter and
paging details. Returns a set of Host Group IDs which match the filter criteria.
Keyword arguments:
filter -- The filter expression that should be used to limit the results. FQL syntax.
An asterisk wildcard '*' includes all results.
Available filter fields:
created_by modified_by
created_timestamp modified_timestamp
group_type name
limit -- The maximum number of records to return in this response. [Integer, 1-5000]
Use with the offset parameter to manage pagination of results.
offset -- The offset to start retrieving records from.
Use with the limit parameter to manage pagination of results.
parameters - full parameters payload, not required if using other keywords.
sort -- The property to sort by. FQL syntax (e.g. created_timestamp|asc).
Available sort fields:
created_by modified_by
created_timestamp modified_timestamp
group_type name
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/queryHostGroups
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="queryHostGroups",
keywords=kwargs,
params=parameters
)
# These method names align to the operation IDs in the API but
# do not conform to snake_case / PEP8 and are defined here for
# backwards compatibility / ease of use purposes
queryCombinedGroupMembers = query_combined_group_members
queryCombinedHostGroups = query_combined_host_groups
performGroupAction = perform_group_action
getHostGroups = get_host_groups
createHostGroups = create_host_groups
deleteHostGroups = delete_host_groups
updateHostGroups = update_host_groups
queryGroupMembers = query_group_members
queryHostGroups = query_host_groups
# The legacy name for this class does not conform to PascalCase / PEP8
# It is defined here for backwards compatibility purposes only.
Host_Group = HostGroup # pylint: disable=C0103
| 1.0625 | 1 |
tests/helpers.py | maldieve/luma.emulator | 0 | 12761412 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2017-18 <NAME> and contributors
# See LICENSE.rst for details.
from contextlib import contextmanager
import hashlib
import os.path
import sys
if sys.version_info > (3, 0):
from io import StringIO
else:
from io import BytesIO as StringIO
try:
from unittest.mock import call, patch
except ImportError:
from mock import call, patch, Mock # noqa: F401
def md5(fname):
with open(fname, 'rb') as fp:
return hashlib.md5(fp.read()).hexdigest()
def get_reference_image(fname):
return os.path.abspath(os.path.join(
os.path.dirname(__file__),
'reference',
fname))
def assert_identical(rname, fname):
reference = get_reference_image(rname)
assert md5(reference) == md5(fname)
@contextmanager
def redirect_stdout(fileobj=None):
if fileobj is None:
fileobj = StringIO()
orig = sys.stdout
sys.stdout = fileobj
try:
yield fileobj
finally:
sys.stdout = orig
| 2.078125 | 2 |
Data Scientist Career Path/3. Python Fundamentals/5. Python List/1. Intro to List/6. list plus.py | myarist/Codecademy | 23 | 12761413 | <filename>Data Scientist Career Path/3. Python Fundamentals/5. Python List/1. Intro to List/6. list plus.py
orders = ["daisy", "buttercup", "snapdragon", "gardenia", "lily"]
# Create new orders here:
new_orders = ["lilac", "iris"]
orders_combined = orders + new_orders
broken_prices = [5, 3, 4, 5, 4] + [4] | 3.59375 | 4 |
docs/examples/base_server.py | darkanthey/oauth2-stateless | 19 | 12761414 | from wsgiref.simple_server import make_server
import oauth2
import oauth2.grant
import oauth2.error
import oauth2.store.memory
import oauth2.tokengenerator
import oauth2.web.wsgi
# Create a SiteAdapter to interact with the user.
# This can be used to display confirmation dialogs and the like.
class ExampleSiteAdapter(oauth2.web.AuthorizationCodeGrantSiteAdapter,
oauth2.web.ImplicitGrantSiteAdapter):
def authenticate(self, request, environ, scopes, client):
# Check if the user has granted access
if request.post_param("confirm") == "confirm":
return {}
raise oauth2.error.UserNotAuthenticated
def render_auth_page(self, request, response, environ, scopes, client):
response.body = '''
<html>
<body>
<form method="POST" name="confirmation_form">
<input type="submit" name="confirm" value="confirm" />
<input type="submit" name="deny" value="deny" />
</form>
</body>
</html>'''
return response
def user_has_denied_access(self, request):
# Check if the user has denied access
if request.post_param("deny") == "deny":
return True
return False
# Create an in-memory storage to store your client apps.
client_store = oauth2.store.memory.ClientStore()
# Add a client
client_store.add_client(client_id="abc", client_secret="xyz", redirect_uris=["http://localhost/callback"])
site_adapter = ExampleSiteAdapter()
# Create an in-memory storage to store issued tokens.
# LocalTokenStore can store access and auth tokens
token_store = oauth2.store.memory.TokenStore()
# Create the controller.
provider = oauth2.Provider(
access_token_store=token_store,
auth_code_store=token_store,
client_store=client_store,
token_generator=oauth2.tokengenerator.Uuid4TokenGenerator()
)
# Add Grants you want to support
provider.add_grant(oauth2.grant.AuthorizationCodeGrant(site_adapter=site_adapter))
provider.add_grant(oauth2.grant.ImplicitGrant(site_adapter=site_adapter))
# Add refresh token capability and set expiration time of access tokens to 30 days
provider.add_grant(oauth2.grant.RefreshToken(expires_in=2592000))
# Wrap the controller with the Wsgi adapter
app = oauth2.web.wsgi.Application(provider=provider)
if __name__ == "__main__":
httpd = make_server('', 8080, app)
httpd.serve_forever()
| 2.796875 | 3 |
do_calculation.py | Basdorsman/virtual-environment-test | 0 | 12761415 | import astropy
from astropy.cosmology import WMAP9 as cosmo
z = cosmo.comoving_distance([0.5, 1.0, 1.5])
print(z) | 2.265625 | 2 |
bin/imgt2counthist.py | laserson/vdj | 6 | 12761416 | #! /usr/bin/env python
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import optparse
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import vdj
import vdj.analysis
option_parser = optparse.OptionParser()
option_parser.add_option('-s','--samples')
option_parser.add_option('-q','--quantify',choices=['clone','junction','v','j','vj','vdj'])
option_parser.add_option('-f','--freq',action='store_true')
(options,args) = option_parser.parse_args()
if len(args) == 1:
inhandle = open(args[0],'r')
else:
raise ValueError, "Must give a single argument to vdjxml file"
# determine mapping between barcodes and samples
sampledict = {}
ip = open(options.samples,'r')
for line in ip:
sampledict[line.split()[0]] = line.split()[1]
ip.close()
features = ['barcode',options.quantify]
(uniq_feature_values,countdict) = vdj.analysis.imgt2countdict(inhandle,features)
max_size = max([max(cd.itervalues()) for cd in countdict.itervalues()])
# make the plots
outbasename = '.'.join(args[0].split('.')[:-1])
colors_10 = ['#e31a1c',
'#377db8',
'#4daf4a',
'#984ea3',
'#ff7f00',
'#ffff33',
'#a65628',
'#f781bf',
'#999999',
'#444444']
markers = 'ovs^<>ph*d'
fig = plt.figure()
ax = fig.add_subplot(111)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_position(('outward',5))
ax.spines['left'].set_position(('outward',5))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
for (i,(barcode,label)) in enumerate(sampledict.iteritems()):
num_chains = sum(countdict[barcode].values())
sizes = np.arange(1,max_size+1)
(hist,garbage) = np.histogram(countdict[barcode].values(),bins=sizes)
idxs = hist > 0
if options.freq == True:
freqs = np.float_(sizes) / num_chains
ax.plot(freqs[idxs],hist[idxs],marker=markers[i],linestyle='None',color=colors_10[i],markeredgewidth=0,markersize=4,clip_on=False,label=label)
else:
ax.plot(sizes[idxs],hist[idxs],marker=markers[i],linestyle='None',color=colors_10[i],markeredgewidth=0,markersize=4,clip_on=False,label=label)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel(options.quantify+(' frequency' if options.freq else ' counts'))
ax.set_ylabel('number')
leg = ax.legend(loc=0,numpoints=1,prop=mpl.font_manager.FontProperties(size='small'))
leg.get_frame().set_visible(False)
# fig.show()
fig.savefig(outbasename+'.%shist.png' % options.quantify)
fig.savefig(outbasename+'.%shist.pdf' % options.quantify)
| 2.25 | 2 |
easymunk/abc/game_object.py | fabiommendes/easymunk | 1 | 12761417 | from abc import ABC, abstractmethod
from typing import Any, TypeVar, Optional
GOType = TypeVar("GOType", bound="GameObjectInterface")
class GameObjectInterface(ABC):
"""
A game object is a composable element in game.
"""
_parent: Optional["GameObjectInterface"] = None
def __init_subclass__(cls, **kwargs):
cls._message_handlers_cache = {}
super().__init_subclass__(**kwargs)
@abstractmethod
def _iter_game_object_children(self):
raise NotImplementedError
def draw(self: GOType, camera: Any = None) -> GOType:
"""
Draw object using the given camera.
The default implementation is empty. THis method is useful to integrate
with game libraries with a canvas-like rendering metaphor.
"""
return self
def step(self: GOType, dt: float) -> GOType:
"""
Update object by evolving a single step of duration dt.
Args:
dt: Duration of time step.
"""
for child in self._iter_game_object_children():
child.step(dt)
return self
def process_message(self, msg, /, *args, sender=None):
"""
Process message.
The default implementation seeks for a method named handle_<msg>_message()
and execute it forwarding any positional arguments. The sender object
is passed as a keyword argument and other keyword arguments can be
either forwarded or influence the way the message is processed.
"""
try:
fn = self._message_handlers_cache[msg]
except KeyError:
msg = msg.replace("-", "_")
name = f"handler_{msg}_message"
cls = type(self)
fn = self._message_handlers_cache[msg] = getattr(cls, name)
fn(self, sender, *args)
def send_message(self, msg, *args, **kwargs):
"""
Send message to parent.
"""
kwargs.setdefault("sender", self)
self._parent.send(msg, *args, **kwargs)
| 3.34375 | 3 |
stix_shifter/stix_transmission/src/modules/splunk/splunk_error_mapper.py | 3v1lW1th1n/stix-shifter | 1 | 12761418 | from .....utils.error_mapper_base import ErrorMapperBase
from .....utils.error_response import ErrorCode
error_mapping = {
'Unknown sid.': ErrorCode.TRANSMISSION_SEARCH_DOES_NOT_EXISTS,
'Unable to parse the search': ErrorCode.TRANSMISSION_QUERY_PARSING_ERROR
}
class ErrorMapper():
DEFAULT_ERROR = ErrorCode.TRANSMISSION_MODULE_DEFAULT_ERROR
@staticmethod
def set_error_code(json_data, return_obj):
message_text = None
try:
message_text = json_data['messages'][0]['text']
except Exception as e:
print("failed to find the message_0_text in: " + str(json_data))
raise e
error_code = ErrorMapper.DEFAULT_ERROR
print('error code message: ' + message_text)
for k,v in error_mapping.items():
if k in message_text:
error_code = v
break
if error_code == ErrorMapper.DEFAULT_ERROR:
print("failed to map: "+ str(json_data))
ErrorMapperBase.set_error_code(return_obj, error_code)
| 2.609375 | 3 |
toolbox/backups/postgres.py | quaselbat4/pastepage | 0 | 12761419 | """
A postgres database backup script lifted from <NAME>'s Newsblur
https://github.com/samuelclay/NewsBlur
"""
import os
import sys
CURRENT_DIR = os.path.dirname(__file__)
PROJECT_DIR = ''.join([CURRENT_DIR, '/../../project/'])
sys.path.insert(0, PROJECT_DIR)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import time
import cloudfiles
from django.conf import settings
db_user = settings.DATABASES['default']['USER']
db_name = settings.DATABASES['default']['NAME']
db_pass = settings.DATABASES['default']['PASSWORD']
os.environ['PGPASSWORD'] = db_pass
filename = 'postgres_%s_%s.sql.gz' % (
db_name,
time.strftime('%Y-%m-%d-%H-%M')
)
cmd = 'pg_dump -U %s -Fc %s > %s' % (db_user, db_name, filename)
print 'Backing up PostgreSQL: %s' % cmd
os.system(cmd)
print 'Uploading %s to database backup...' % filename
conn = cloudfiles.get_connection(
settings.CUMULUS['USERNAME'],
settings.CUMULUS['API_KEY']
)
bucket = conn.get_container("pastpages.backups")
obj = bucket.create_object(filename)
obj.load_from_filename(filename)
os.remove(filename)
| 2.46875 | 2 |
RunMe.py | shukkkur/storage | 11 | 12761420 | <reponame>shukkkur/storage
__author__ = 'shukkkur'
'''
GitHub: https://github.com/shukkkur
Date: 17/05/2021
***
I build and trained the following linear model classifiers:
Ridge
SVC
KNN
Logistic Regression
Decision Tree
According to my results, SVC performs the best. The final answers would be based on these algorithm.
The Final Results is stored in DataFrame names "df"
'''
# Neccessary Imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report, accuracy_score
from sklearn.linear_model import RidgeClassifier, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix
# To Display All Columns, when printing a DataFrame
pd.options.display.max_columns = None
##def namestr(obj, namespace=globals()):
## '''Print the name of the variable'''
## return ' '.join([name for name in namespace if namespace[name] is obj][0].split("_"))
# Training Data
Sachenko_Kristina = 'https://raw.githubusercontent.com/shukkkur/Tennis-Match-Prediction/main/TrainingData/SachenkoKristina.csv'
Schwaibiger_Anastasia = 'https://raw.githubusercontent.com/shukkkur/Tennis-Match-Prediction/main/TrainingData/SchwaibigerAnastasia.csv'
Shelekhova_Laura = 'https://raw.githubusercontent.com/shukkkur/Tennis-Match-Prediction/main/TrainingData/ShelekhovaLaura.csv'
Solakov_Leticia = 'https://raw.githubusercontent.com/shukkkur/Tennis-Match-Prediction/main/TrainingData/SolakovLeticia.csv'
Wolter_Franziska = 'https://raw.githubusercontent.com/shukkkur/Tennis-Match-Prediction/main/TrainingData/WolterFranziska.csv'
Wächter_Daria = 'https://raw.githubusercontent.com/shukkkur/Tennis-Match-Prediction/main/TrainingData/W%C3%A4chterDaria.csv'
Öztürk_Tuana = 'https://raw.githubusercontent.com/shukkkur/Tennis-Match-Prediction/main/TrainingData/%C3%96zt%C3%BCrkTuana.csv'
files = [Sachenko_Kristina, Schwaibiger_Anastasia,Shelekhova_Laura,Solakov_Leticia,Wolter_Franziska,Wächter_Daria, Öztürk_Tuana]
# Prediction Data
Grigorieva_Darja = 'https://raw.githubusercontent.com/shukkkur/Tennis-Match-Prediction/main/PredictionData/MoreData/GrigorievaDarja_original.csv'
Khomich_Michelle = 'https://raw.githubusercontent.com/shukkkur/Tennis-Match-Prediction/main/PredictionData/MoreData/KhomichMichelle_original.csv'
Nitzsche_Lavinia_Maria = 'https://raw.githubusercontent.com/shukkkur/Tennis-Match-Prediction/main/PredictionData/MoreData/NitzscheLaviniaMaria_original.csv'
Ribbert_Paula = 'https://raw.githubusercontent.com/shukkkur/Tennis-Match-Prediction/main/PredictionData/MoreData/RibbertPaula_original.csv'
Wächter_Daria = 'https://raw.githubusercontent.com/shukkkur/Tennis-Match-Prediction/main/PredictionData/MoreData/W%C3%A4chterDaria_original.csv'
predict = [Grigorieva_Darja,Khomich_Michelle,Nitzsche_Lavinia_Maria,Ribbert_Paula,Wächter_Daria]
### To train my models, it would be easier if all the csv's are in a single file.
### Concatenate the files
lst = []
for file in files:
df = pd.read_csv(file, index_col=None, header=0)
lst.append(df)
df = pd.concat(lst, axis=0, ignore_index=True)
#dfs.replace({'verloren':'lose', 'gewonnen':'win'}, inplace = True)
#print(df.player1_name.unique())
### Training And Testing Data
cols = ['lk1', 'lk2']
X = df[cols]
y = df['match_outcome']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state=0, shuffle=True)
# Ridge Classifier - L2
ridge = RidgeClassifier(alpha = 0.5)
ridge.fit(X_train, y_train)
# SVC
svc = SVC(C = 10)
svc.fit(X_train, y_train)
# KNN - Neighrest Neighbor
knn = KNeighborsClassifier(n_neighbors=18)
knn.fit(X_train, y_train)
# Logistic Regression Classifier
logreg = LogisticRegression(C = 0.01)
logreg.fit(X_train, y_train)
# Decision Tree
tree = DecisionTreeClassifier()
tree.fit(X_train, y_train)
if __name__ == "__main__":
## print('Ridge Classifier Score: {:.2f}'.format(ridge.score(X_test, y_test)))
## print('SVC Score: {:.2f}'.format(svc.score(X_test, y_test)))
## print('KNN Score: {:.2f}'.format(knn.score(X_test, y_test)))
## print('LogReg Score: {:.2f}'.format(logreg.score(X_test, y_test)))
## print('Decision Tree Score: {:.2f}'.format(tree.score(X_test, y_test)))
## print('\n\n')
# SVC performs the best, threfore the final answers would be according to SVC model
## test = df[['lk1', 'lk2']]
## y_pred = svc.predict(test)
## df.insert(7, 'predicted_outcome', y_pred)
## print(df.head())
## print('\n\n')
## # Confusion Matrix To evaluate our results
## matrix = confusion_matrix(df.match_outcome.values, y_pred)
## print('Confusion Matrix\n', matrix, '\n')
## print("True Positives (correctly predicted 'verloren')", matrix[1,1])
## print("False Positives (incorrectly predicted 'verloren')", matrix[0,1])
## print("True Negatives (correctly predicted 'gewonnen')", matrix[0,0])
## print("False Negatives (incorrectly predicted 'gewonnen')", matrix[1,0])
##
## print('\nDataFrame is stored in variable "df"')
for file in predict:
df = pd.read_csv(file)
X = df[cols]
print(df.iloc[0]['player1_name'])
print(svc.predict(X), end='\n\n')
| 2.46875 | 2 |
tests/tests/test_token.py | kjaskiewiczz/deviceauth | 0 | 12761421 | import bravado
import pytest
import requests
from client import BaseDevicesApiClient, ManagementClient, \
SimpleInternalClient, SimpleManagementClient
from common import Device, DevAuthorizer, device_auth_req, \
make_devid, explode_jwt
class TestToken(ManagementClient):
intclient = SimpleInternalClient()
devapi = BaseDevicesApiClient()
def test_token(self):
d = Device()
da = DevAuthorizer()
url = self.devapi.make_api_url("/auth_requests")
# poke devauth so that device appears
rsp = device_auth_req(url, da, d)
assert rsp.status_code == 401
# try to find our devices in all devices listing
mc = SimpleManagementClient()
dev = mc.find_device_by_identity(d.identity)
self.log.debug('found matching device with ID: %s', dev.id)
devid = dev.id
# extract authentication data set ID
aid = dev.auth_sets[0].id
try:
self.accept_device(devid, aid)
except bravado.exception.HTTPError as e:
assert e.response.status_code == 204
# device is accepted, we should get a token now
rsp = device_auth_req(url, da, d)
assert rsp.status_code == 200
da.parse_rsp_payload(d, rsp.text)
assert len(d.token) > 0
self.log.info("device token: %s", d.token)
thdr, tclaims, tsign = explode_jwt(d.token)
assert 'typ' in thdr and thdr['typ'] == 'JWT'
assert 'jti' in tclaims
assert 'exp' in tclaims
assert 'sub' in tclaims and tclaims['sub'] == devid
assert 'iss' in tclaims and tclaims['iss'] == 'Mender'
# TODO: signature verification?
# verify token; the token is to be placed in the Authorization header
# and it looks like bravado cannot handle a POST request with no data
# in body, hence we fall back to sending request directly
verify_url = self.intclient.make_api_url("/tokens/verify")
self.log.info("verify URL: %s", verify_url)
auth_hdr = 'Bearer {}'.format(d.token)
# no auth header should raise an error
rsp = requests.post(verify_url, data='')
assert rsp.status_code == 401
# successful verification
rsp = requests.post(verify_url, data='',
headers={'Authorization': auth_hdr})
assert rsp.status_code == 200
# use a bogus token that is not a valid JWT
rsp = requests.post(verify_url, data='',
headers={'Authorization': 'bogus'})
assert rsp.status_code == 401
# or a correct token with data appended at the end
rsp = requests.post(verify_url, data='',
headers={'Authorization': auth_hdr + "==foo"})
assert rsp.status_code == 401
# bravado cannot handle DELETE requests either
# self.client.tokens.delete_tokens_id(id=tclaims['jti'])
# use requests instead
rsp = requests.delete(self.make_api_url('/tokens/{}'.format(tclaims['jti'])))
assert rsp.status_code == 204
# unsuccessful verification
rsp = requests.post(verify_url, data='',
headers={'Authorization': auth_hdr})
assert rsp.status_code == 401
| 2.25 | 2 |
src/geniusect/neural_net/dqn_callbacks.py | Jay2645/Geniusect-2.0 | 1 | 12761422 | #!/usr/bin/env python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import tempfile
import numpy as np
import six
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.distribute import multi_worker_training_state
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from rl.callbacks import Callback
class TensorBoard(Callback):
# pylint: disable=line-too-long
"""Enable visualizations for TensorBoard.
TensorBoard is a visualization tool provided with TensorFlow.
This callback logs events for TensorBoard, including:
* Metrics summary plots
* Training graph visualization
* Activation histograms
* Sampled profiling
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```sh
tensorboard --logdir=path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Arguments:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation and
weight histograms for the layers of the model. If set to 0, histograms
won't be computed. Validation data (or split) must be specified for
histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard. The log file
can become quite large when write_graph is set to True.
write_images: whether to write model weights to visualize as image in
TensorBoard.
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
writes the losses and metrics to TensorBoard after each batch. The same
applies for `'epoch'`. If using an integer, let's say `1000`, the
callback will write the metrics and losses to TensorBoard every 1000
samples. Note that writing too frequently to TensorBoard can slow down
your training.
profile_batch: Profile the batch to sample compute characteristics. By
default, it will profile the second batch. Set profile_batch=0 to
disable profiling. Must run in TensorFlow eager mode.
embeddings_freq: frequency (in epochs) at which embedding layers will
be visualized. If set to 0, embeddings won't be visualized.
embeddings_metadata: a dictionary which maps layer name to a file name in
which metadata for this embedding layer is saved. See the
[details](
https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
Raises:
ValueError: If histogram_freq is set and no validation data is provided.
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='logs',
histogram_freq=0,
write_graph=True,
write_images=False,
update_freq='epoch',
profile_batch=2,
embeddings_freq=0,
embeddings_metadata=None,
**kwargs):
super(TensorBoard, self).__init__()
self._validate_kwargs(kwargs)
self.log_dir = log_dir
self.histogram_freq = histogram_freq
self.write_graph = write_graph
self.write_images = write_images
if update_freq == 'batch':
self.update_freq = 1
else:
self.update_freq = update_freq
self.embeddings_freq = embeddings_freq
self.embeddings_metadata = embeddings_metadata
self._samples_seen = 0
self._samples_seen_at_last_write = 0
self._current_batch = 0
self._total_batches_seen = 0
self._total_val_batches_seen = 0
# A collection of file writers currently in use, to be closed when
# training ends for this callback. Writers are keyed by the
# directory name under the root logdir: e.g., "train" or
# "validation".
self._writers = {}
self._train_run_name = 'train'
self._validation_run_name = 'validation'
self._profile_batch = profile_batch
# True when a trace is running.
self._is_tracing = False
# TensorBoard should only write summaries on the chief when in a
# Multi-Worker setting.
self._chief_worker_only = True
def _validate_kwargs(self, kwargs):
"""Handle arguments were supported in V1."""
if kwargs.get('write_grads', False):
logging.warning('`write_grads` will be ignored in TensorFlow 2.0 '
'for the `TensorBoard` Callback.')
if kwargs.get('batch_size', False):
logging.warning('`batch_size` is no longer needed in the '
'`TensorBoard` Callback and will be ignored '
'in TensorFlow 2.0.')
if kwargs.get('embeddings_layer_names', False):
logging.warning('`embeddings_layer_names` is not supported in '
'TensorFlow 2.0. Instead, all `Embedding` layers '
'will be visualized.')
if kwargs.get('embeddings_data', False):
logging.warning('`embeddings_data` is not supported in TensorFlow '
'2.0. Instead, all `Embedding` variables will be '
'visualized.')
unrecognized_kwargs = set(kwargs.keys()) - {
'write_grads', 'embeddings_layer_names', 'embeddings_data', 'batch_size'
}
# Only allow kwargs that were supported in V1.
if unrecognized_kwargs:
raise ValueError('Unrecognized arguments in `TensorBoard` '
'Callback: ' + str(unrecognized_kwargs))
def set_model(self, model):
"""Sets Keras model and writes graph if specified."""
self.model = model.model
with context.eager_mode():
self._close_writers()
if self.write_graph:
with self._get_writer(self._train_run_name).as_default():
with summary_ops_v2.always_record_summaries():
if not self.model.run_eagerly:
summary_ops_v2.graph(K.get_graph(), step=0)
summary_writable = (
self.model._is_graph_network or # pylint: disable=protected-access
self.model.__class__.__name__ == 'Sequential') # pylint: disable=protected-access
if summary_writable:
summary_ops_v2.keras_model('keras', self.model, step=0)
if self.embeddings_freq:
self._configure_embeddings()
def _configure_embeddings(self):
"""Configure the Projector for embeddings."""
# TODO(omalleyt): Add integration tests.
from tensorflow.python.keras.layers import embeddings
try:
from tensorboard.plugins import projector
except ImportError:
raise ImportError('Failed to import TensorBoard. Please make sure that '
'TensorBoard integration is complete."')
config = projector.ProjectorConfig()
for layer in self.model.layers:
if isinstance(layer, embeddings.Embedding):
embedding = config.embeddings.add()
embedding.tensor_name = layer.embeddings.name
if self.embeddings_metadata is not None:
if isinstance(self.embeddings_metadata, str):
embedding.metadata_path = self.embeddings_metadata
else:
if layer.name in embedding.metadata_path:
embedding.metadata_path = self.embeddings_metadata.pop(layer.name)
if self.embeddings_metadata:
raise ValueError('Unrecognized `Embedding` layer names passed to '
'`keras.callbacks.TensorBoard` `embeddings_metadata` '
'argument: ' + str(self.embeddings_metadata.keys()))
class DummyWriter(object):
"""Dummy writer to conform to `Projector` API."""
def __init__(self, logdir):
self.logdir = logdir
def get_logdir(self):
return self.logdir
writer = DummyWriter(self.log_dir)
projector.visualize_embeddings(writer, config)
def _close_writers(self):
"""Close all remaining open file writers owned by this callback.
If there are no such file writers, this is a no-op.
"""
with context.eager_mode():
for writer in six.itervalues(self._writers):
writer.close()
self._writers.clear()
def _get_writer(self, writer_name):
"""Get a summary writer for the given subdirectory under the logdir.
A writer will be created if it does not yet exist.
Arguments:
writer_name: The name of the directory for which to create or
retrieve a writer. Should be either `self._train_run_name` or
`self._validation_run_name`.
Returns:
A `SummaryWriter` object.
"""
if writer_name not in self._writers:
path = os.path.join(self.log_dir, writer_name)
writer = summary_ops_v2.create_file_writer_v2(path)
self._writers[writer_name] = writer
return self._writers[writer_name]
def on_train_begin(self, logs=None):
if self._profile_batch == 1:
summary_ops_v2.trace_on(graph=True, profiler=True)
self._is_tracing = True
def on_batch_end(self, batch, logs=None):
"""Writes scalar summaries for metrics on every training batch.
Performs profiling if current batch is in profiler_batches.
Arguments:
batch: Integer, index of batch within the current epoch.
logs: Dict. Metric results for this batch.
"""
# Don't output batch_size and batch number as TensorBoard summaries
logs = logs or {}
self._samples_seen += logs.get('size', 1)
samples_seen_since = self._samples_seen - self._samples_seen_at_last_write
if self.update_freq != 'epoch' and samples_seen_since >= self.update_freq:
self._log_metrics(logs, prefix='batch_', step=self._total_batches_seen)
self._samples_seen_at_last_write = self._samples_seen
self._total_batches_seen += 1
if self._is_tracing:
self._log_trace()
elif (not self._is_tracing and
self._total_batches_seen == self._profile_batch - 1):
self._enable_trace()
def on_epoch_end(self, epoch, logs=None):
"""Runs metrics and histogram summaries at epoch end."""
step = epoch if self.update_freq == 'epoch' else self._samples_seen
self._log_metrics(logs, prefix='epoch_', step=step)
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._log_weights(epoch)
if self.embeddings_freq and epoch % self.embeddings_freq == 0:
self._log_embeddings(epoch)
def on_train_end(self, logs=None):
if self._is_tracing:
self._log_trace()
self._close_writers()
def _enable_trace(self):
if context.executing_eagerly():
summary_ops_v2.trace_on(graph=True, profiler=True)
self._is_tracing = True
def _log_trace(self):
if context.executing_eagerly():
with self._get_writer(self._train_run_name).as_default(), \
summary_ops_v2.always_record_summaries():
# TODO(b/126388999): Remove step info in the summary name.
summary_ops_v2.trace_export(
name='batch_%d' % self._total_batches_seen,
step=self._total_batches_seen,
profiler_outdir=os.path.join(self.log_dir, 'train'))
self._is_tracing = False
def _log_metrics(self, logs, prefix, step):
"""Writes metrics out as custom scalar summaries.
Arguments:
logs: Dict. Keys are scalar summary names, values are NumPy scalars.
prefix: String. The prefix to apply to the scalar summary names.
step: Int. The global step to use for TensorBoard.
"""
if logs is None:
logs = {}
# Group metrics by the name of their associated file writer. Values
# are lists of metrics, as (name, scalar_value) pairs.
logs_by_writer = {
self._train_run_name: [],
self._validation_run_name: [],
}
validation_prefix = 'val_'
for (name, value) in logs.items():
if name in ('batch', 'size', 'num_steps'):
# Scrub non-metric items.
continue
if name.startswith(validation_prefix):
name = name[len(validation_prefix):]
writer_name = self._validation_run_name
else:
writer_name = self._train_run_name
name = prefix + name # assign batch or epoch prefix
logs_by_writer[writer_name].append((name, value))
with context.eager_mode():
with summary_ops_v2.always_record_summaries():
for writer_name in logs_by_writer:
these_logs = logs_by_writer[writer_name]
if not these_logs:
# Don't create a "validation" events file if we don't
# actually have any validation data.
continue
writer = self._get_writer(writer_name)
with writer.as_default():
for (name, value) in these_logs:
summary_ops_v2.scalar(name, value, step=step)
def _log_weights(self, epoch):
"""Logs the weights of the Model to TensorBoard."""
writer = self._get_writer(self._train_run_name)
with context.eager_mode(), \
writer.as_default(), \
summary_ops_v2.always_record_summaries():
for layer in self.model.layers:
for weight in layer.weights:
weight_name = weight.name.replace(':', '_')
with ops.init_scope():
weight = K.get_value(weight)
summary_ops_v2.histogram(weight_name, weight, step=epoch)
if self.write_images:
self._log_weight_as_image(weight, weight_name, epoch)
writer.flush()
def _log_weight_as_image(self, weight, weight_name, epoch):
"""Logs a weight as a TensorBoard image."""
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 1: # Bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
elif len(shape) == 2: # Dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # ConvNet case
if K.image_data_format() == 'channels_last':
# Switch to channels_first to display every kernel as a separate
# image.
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [shape[0], shape[1], shape[2], 1])
shape = K.int_shape(w_img)
# Not possible to handle 3D convnets etc.
if len(shape) == 4 and shape[-1] in [1, 3, 4]:
summary_ops_v2.image(weight_name, w_img, step=epoch)
def _log_embeddings(self, epoch):
embeddings_ckpt = os.path.join(self.log_dir, 'train',
'keras_embedding.ckpt-{}'.format(epoch))
self.model.save_weights(embeddings_ckpt)
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
Arguments:
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`, the latest best model according
to the quantity monitored will not be overwritten.
mode: one of {auto, min, max}. If `save_best_only=True`, the decision to
overwrite the current save file is made based on either the maximization
or the minimization of the monitored quantity. For `val_acc`, this
should be `max`, for `val_loss` this should be `min`, etc. In `auto`
mode, the direction is automatically inferred from the name of the
monitored quantity.
save_weights_only: if True, then only the model's weights will be saved
(`model.save_weights(filepath)`), else the full model is saved
(`model.save(filepath)`).
save_freq: `'epoch'` or integer. When using `'epoch'`, the callback saves
the model after each epoch. When using integer, the callback saves the
model at end of a batch at which this many samples have been seen since
last saving. Note that if the saving isn't aligned to epochs, the
monitored metric may potentially be less reliable (it could reflect as
little as 1 batch, since the metrics get reset every epoch). Defaults to
`'epoch'`
load_weights_on_restart: Whether the training should restore the model. If
True, the model will attempt to load the checkpoint file from `filepath`
at the start of `model.fit()`. This saves the need of manually calling
`model.load_weights()` before `model.fit(). In multi-worker distributed
training, this provides fault-tolerance and loads the model
automatically upon recovery of workers. The callback gives up loading if
the filepath does not exist, and raises ValueError if format does not
match. Defaults to False.
**kwargs: Additional arguments for backwards compatibility. Possible key
is `period`.
"""
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
save_freq='epoch',
load_weights_on_restart=False,
**kwargs):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.save_freq = save_freq
self.load_weights_on_restart = load_weights_on_restart
self.epochs_since_last_save = 0
self._samples_seen_since_last_saving = 0
self.metrics = []
self.infos = []
self.info_names = None
# Deprecated field `period` is for the number of epochs between which
# the model is saved.
if 'period' in kwargs:
self.period = kwargs['period']
logging.warning('`period` argument is deprecated. Please use `save_freq` '
'to specify the frequency in number of samples seen.')
else:
self.period = 1
if mode not in ['auto', 'min', 'max']:
logging.warning('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
if self.save_freq != 'epoch' and not isinstance(self.save_freq, int):
raise ValueError('Unrecognized save_freq: {}'.format(self.save_freq))
# Only the chief worker writes model checkpoints, but all workers
# restore checkpoint at on_train_begin().
self._chief_worker_only = False
def set_model(self, model):
self.model = model
# Use name matching rather than `isinstance` to avoid circular dependencies.
if (not self.save_weights_only and
not model._is_graph_network and # pylint: disable=protected-access
model.__class__.__name__ != 'Sequential'):
self.save_weights_only = True
def on_train_begin(self, logs=None):
if K.in_multi_worker_mode():
# pylint: disable=protected-access
# MultiWorkerTrainingState is used to manage the training state needed
# for preemption-recovery of a worker in multi-worker training.
self.model._training_state = (
multi_worker_training_state.MultiWorkerTrainingState(
self.model, self.filepath))
self._training_state = self.model._training_state
if self._training_state.restore():
# If the training state needs to be and is successfully restored,
# it is recovering from a previous failure (or preemption). In such
# case, do not load the weights from user specified file path.
return
# If this is not multi worker training, restoring is not needed, or
# restoring failed, check if it should load weights on restart.
# TODO(rchao): Also restore the epoch in single-worker training when
# `self.load_weights_on_restart=True`.
if self.load_weights_on_restart:
# In multi worker training, it only should if `experimental_should_init`
# is True.
# TODO(rchao): Reference `experimental_should_init` api from a util file.
if not K.in_multi_worker_mode() or dc_context.get_current_worker_context(
).experimental_should_init:
filepath_to_load = (
self._get_most_recently_modified_file_matching_pattern(
self.filepath))
if filepath_to_load is not None and os.path.exists(filepath_to_load):
try:
# `filepath` may contain placeholders such as `{epoch:02d}`, and
# thus it attempts to load the most recently modified file with file
# name matching the pattern.
self.model.load_weights(filepath_to_load)
except (IOError, ValueError) as e:
raise ValueError('Error loading file from {}. Reason: {}'.format(
filepath_to_load, e))
def on_train_end(self, logs=None):
if K.in_multi_worker_mode():
# In multi-worker training, on successful exit of training, delete the
# training state backup file that was saved for the purpose of worker
# recovery.
self._training_state.delete_backup()
# Restore the training state so the model is ready for next (possible)
# multi worker training.
del self._training_state
del self.model._training_state
def on_batch_end(self, batch, logs=None):
logs = logs or {}
if isinstance(self.save_freq, int):
self._samples_seen_since_last_saving += logs.get('size', 1)
if self._samples_seen_since_last_saving >= self.save_freq:
self._save_model(epoch=self._current_epoch, logs=logs)
self._samples_seen_since_last_saving = 0
def on_epoch_begin(self, epoch, logs=None):
self._current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
self.epochs_since_last_save += 1
if self.save_freq == 'epoch':
self._save_model(epoch=epoch, logs=logs)
if K.in_multi_worker_mode():
# For multi-worker training, back up the weights and current training
# state for possible future recovery.
# TODO(rchao): Call `back_up` at finer period such as N steps.
self._training_state.back_up(epoch)
def on_step_end(self, step, logs):
if self.info_names is None:
self.info_names = logs['info'].keys()
self.metrics.append(logs['metrics'])
if len(self.info_names) > 0:
self.infos.append([logs['info'][k] for k in logs['info'].keys()])
def _save_model(self, epoch, logs):
"""Saves the model.
Arguments:
epoch: the epoch this iteration is in.
logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
"""
logs = logs or {}
if isinstance(self.save_freq,
int) or self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
file_handle, filepath = self._get_file_handle_and_path(epoch, logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
logging.warning('Can save best model only with %s available, '
'skipping.', self.monitor)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('\nEpoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s' % (epoch + 1, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('\nEpoch %05d: %s did not improve from %0.5f' %
(epoch + 1, self.monitor, self.best))
else:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
self._maybe_remove_file(file_handle, filepath)
def _get_file_handle_and_path(self, epoch, logs):
"""Returns the file handle and path."""
# TODO(rchao): Replace dc_context reference with
# distributed_training_utils.should_current_worker_checkpoint() once
# distributed_training_utils.py no longer depends on callbacks.py.
if not K.in_multi_worker_mode() or dc_context.get_current_worker_context(
).should_checkpoint:
return None, self.filepath.format(epoch=epoch + 1, **logs)
else:
# If this is multi-worker training, and this worker should not
# save checkpoint, we replace the filepath with a dummy filepath so
# it writes to a file that will be removed at the end of _save_model()
# call. This is because the SyncOnReadVariable needs to be synced across
# all the workers in order to be read, and all workers need to initiate
# that.
file_handle, temp_file_name = tempfile.mkstemp()
extension = os.path.splitext(self.filepath)[1]
return file_handle, temp_file_name + extension
def _maybe_remove_file(self, file_handle, filepath):
# Remove the file in multi-worker training where this worker should
# not checkpoint. It is a dummy file previously saved for sync distributed
# training.
if K.in_multi_worker_mode(
) and not dc_context.get_current_worker_context().should_checkpoint:
os.close(file_handle)
os.remove(filepath)
def _get_most_recently_modified_file_matching_pattern(self, pattern):
"""Returns the most recently modified filepath matching pattern.
Pattern may contain python formatting placeholder. If
`tf.train.latest_checkpoint()` does not return None, use that; otherwise,
check for most recently modified one that matches the pattern.
In the rare case where there are more than one pattern-matching file having
the same modified time that is most recent among all, return the filepath
that is largest (by `>` operator, lexicographically using the numeric
equivalents). This provides a tie-breaker when multiple files are most
recent. Note that a larger `filepath` can sometimes indicate a later time of
modification (for instance, when epoch/batch is used as formatting option),
but not necessarily (when accuracy or loss is used). The tie-breaker is
put in the logic as best effort to return the most recent, and to avoid
undeterministic result.
Modified time of a file is obtained with `os.path.getmtime()`.
This utility function is best demonstrated via an example:
```python
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
# Write something to each of the files
self.assertEqual(
_get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
```
Arguments:
pattern: The file pattern that may optionally contain python placeholder
such as `{epoch:02d}`.
Returns:
The most recently modified file's full filepath matching `pattern`. If
`pattern` does not contain any placeholder, this returns the filepath
that
exactly matches `pattern`. Returns `None` if no match is found.
"""
dir_name = os.path.dirname(pattern)
base_name = os.path.basename(pattern)
base_name_regex = '^' + re.sub(r'{.*}', r'.*', base_name) + '$'
# If tf.train.latest_checkpoint tells us there exists a latest checkpoint,
# use that as it is more robust than `os.path.getmtime()`.
latest_tf_checkpoint = checkpoint_management.latest_checkpoint(dir_name)
if latest_tf_checkpoint is not None and re.match(
base_name_regex, os.path.basename(latest_tf_checkpoint)):
return latest_tf_checkpoint
latest_mod_time = 0
file_path_with_latest_mod_time = None
n_file_with_latest_mod_time = 0
file_path_with_largest_file_name = None
if os.path.exists(dir_name):
for file_name in os.listdir(dir_name):
# Only consider if `file_name` matches the pattern.
if re.match(base_name_regex, file_name):
file_path = os.path.join(dir_name, file_name)
mod_time = os.path.getmtime(file_path)
if (file_path_with_largest_file_name is None or
file_path > file_path_with_largest_file_name):
file_path_with_largest_file_name = file_path
if mod_time > latest_mod_time:
latest_mod_time = mod_time
file_path_with_latest_mod_time = file_path
# In the case a file with later modified time is found, reset
# the counter for the number of files with latest modified time.
n_file_with_latest_mod_time = 1
elif mod_time == latest_mod_time:
# In the case a file has modified time tied with the most recent,
# increment the counter for the number of files with latest modified
# time by 1.
n_file_with_latest_mod_time += 1
if n_file_with_latest_mod_time == 1:
# Return the sole file that has most recent modified time.
return file_path_with_latest_mod_time
else:
# If there are more than one file having latest modified time, return
# the file path with the largest file name.
return file_path_with_largest_file_name
class EarlyStopping(Callback):
"""Stop training when a monitored quantity has stopped improving.
Arguments:
monitor: Quantity to be monitored.
min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: Number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: One of `{"auto", "min", "max"}`. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
baseline: Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
restore_best_weights: Whether to restore model weights from
the epoch with the best value of the monitored quantity.
If False, the model weights obtained at the last step of
training are used.
Example:
```python
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)
# This callback will stop the training when there is no improvement in
# the validation loss for three consecutive epochs.
model.fit(data, labels, epochs=100, callbacks=[callback],
validation_data=(val_data, val_labels))
```
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=False):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.baseline = baseline
self.min_delta = abs(min_delta)
self.wait = 0
self.stopped_epoch = 0
self.restore_best_weights = restore_best_weights
self.best_weights = None
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
if self.baseline is not None:
self.best = self.baseline
else:
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = self.get_monitor_value(logs)
if current is None:
return
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
if self.restore_best_weights:
self.best_weights = self.model.get_weights()
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
if self.restore_best_weights:
if self.verbose > 0:
print('Restoring model weights from the end of the best epoch.')
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
logging.warning('Early stopping conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
return monitor_value | 2.34375 | 2 |
mail_alias_manager/api/v1_api/recipient_alias.py | stuvusIT/mail_alias_manager | 0 | 12761423 | """Module containing the recipient alias API of the v1 API."""
from flask import abort
from flask.views import MethodView
from .root import API_V1
from .models import RecipientAlias
from ...db import DB
from ...db.models.recipient_alias import RecipientAlias as RecipientAlias_DB
@API_V1.route("/recipient_alias/")
class RecipientAliasList(MethodView):
"""Root endpoint for all recipient alias resources."""
@API_V1.response(RecipientAlias(many=True))
def get(self):
"""Get all recipient aliases"""
return RecipientAlias_DB.query.all()
@API_V1.arguments(RecipientAlias, description="The alias to add")
@API_V1.response(RecipientAlias, code=201)
def post(self, new_data):
"""Add a new recipient alias"""
item = RecipientAlias_DB(**new_data)
DB.session.add(item)
DB.session.commit()
return item
@API_V1.route("/recipient_alias/create_many")
class RecipientAliasCreateMany(MethodView):
"""Endpoint to create many aliases in one request."""
@API_V1.arguments(RecipientAlias(many=True), description="The aliases to add")
@API_V1.response(RecipientAlias(many=True), code=201)
def post(self, new_data):
"""Add new recipient aliases"""
items = []
for data in new_data:
item = RecipientAlias_DB(**data)
DB.session.add(item)
items.append(item)
DB.session.commit()
return items
@API_V1.route("/recipient_alias/replace")
class RecipientAliasReplace(MethodView):
"""Endpoint to replace all recipient aliases."""
@API_V1.arguments(RecipientAlias(many=True), description="The new list which should be set")
@API_V1.response(code=204)
def post(self, new_data):
"""Replace all recipient aliases with the given list."""
RecipientAlias_DB.query.delete()
for data in new_data:
item = RecipientAlias_DB(**data)
DB.session.add(item)
DB.session.commit()
@API_V1.route("/recipient_alias/<recipient_alias_id>/")
class RecipientAlias(MethodView):
"""Endpoint for a single recipient alias resource"""
@API_V1.doc(responses={'404': {'description': 'When requested recipient alias is not found'}})
@API_V1.response(RecipientAlias())
def get(self, recipient_alias_id):
""" Get a single recipient alias """
item = RecipientAlias_DB.query.filter(RecipientAlias_DB.id == recipient_alias_id).first()
if item is None:
abort(404, "Requested recipient alias not found.")
return item
@API_V1.arguments(RecipientAlias, description="The new values for the alias")
@API_V1.response(RecipientAlias())
def put(self, recipient_alias_id, update_data):
""" Update a single recipient alias """
item = RecipientAlias_DB.query.filter(RecipientAlias_DB.id == recipient_alias_id).first()
if item is None:
abort(404, "Requested recipient alias not found.")
item.update(update_data)
DB.session.commit()
return item
@API_V1.response(code=204)
def delete(self, recipient_alias_id):
""" Delete a single recipient alias """
item = RecipientAlias_DB.query.filter(RecipientAlias_DB.id == recipient_alias_id).first()
if item is None:
return
DB.session.delete(item)
DB.session.commit()
| 2.484375 | 2 |
MSK/test_scripts/test_msk_consumer.py | terratenney/aws-tools | 8 | 12761424 | <reponame>terratenney/aws-tools
# See https://docs.confluent.io/current/clients/confluent-kafka-python/index.html#pythonclient-consumer
# See https://aws.amazon.com/premiumsupport/knowledge-center/ec2-linux-python3-boto3/
from confluent_kafka import Consumer, KafkaError
app_settings = {
"bootstrap.servers": "TODO",
"group.id": "TODO",
"topic": "TODO",
}
c = Consumer({
"bootstrap.servers": app_settings["bootstrap.servers"],
"group.id": app_settings["group.id"],
"auto.offset.reset": "latest", # smallest, earliest, beginning, largest, latest, end, error
})
c.subscribe([app_settings["topic"]])
while True:
msg = c.poll(0.1)
if msg is None:
print("No Data")
continue
if msg.error():
print(f"Consumer error: {msg.error()}")
continue
print(f"Received message: {msg.value().decode('utf-8')}")
c.close() | 2.296875 | 2 |
backend/server/apps/accounts/api/views.py | kanishq1/psd | 0 | 12761425 | from django.conf import settings
from django.contrib.auth import (
authenticate, login, logout, get_user_model,
user_logged_out
)
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
import traceback
User = get_user_model()
class LoginView(APIView):
def post(self, request):
try:
user = authenticate(
request, username=request.data['username'],
password=request.data['password']
)
if user is not None:
login(request, user)
return Response(
data={
'success':True, 'message':"Success: User logged in",
'data':{
'username': user.username,
'email': user.email,
}
}, status=status.HTTP_200_OK
)
else:
return Response(
data={
'success':False, 'message':"User does not exists",
'data':{
'username': request.data['username'],
}
}, status=status.HTTP_200_OK
)
except Exception:
if settings.DEBUG:
traceback.print_exc()
return Response(
data={
'success':False, 'message':"Internal Server Error",
'data':{
'username': request.data['username'],
}
}, status=status.HTTP_200_OK
)
class LogoutView(APIView):
def post(self ,request):
user_logged_out.send(
sender=request.user.__class__, request=request, user=request.user
)
logout(request)
return Response(
data={
'success':True, 'message':"Success: User logged out",
'data':{
'username': request.data['username'],
}
}, status=status.HTTP_200_OK
)
| 2.171875 | 2 |
tests/test_player_draft.py | leaffan/pynhldb | 3 | 12761426 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from db.player_draft import PlayerDraft
from db.team import Team
def test_find_by_id():
pdft = PlayerDraft.find_by_player_id(8475883) # <NAME>
assert len(pdft) == 2
pdft = PlayerDraft.find_by_player_id(8466145) # <NAME>
assert len(pdft) == 2
def test_find():
pdft = PlayerDraft.find(8479318, 10, 2016) # <NAME>
assert pdft.round == 1
assert pdft.overall == 1
def test_constructor():
pdft = PlayerDraft(8999444, 1, 2018, 3, 75) # fictional player
assert pdft.player_id == 8999444
assert Team.find_by_id(pdft.team_id).name == 'New Jersey Devils'
assert pdft.year == 2018
assert pdft.round == 3
assert pdft.overall == 75
def test_comparison_operators():
pdft_kopitar = PlayerDraft.find_by_player_id(8471685).pop(0) # 2005, 11
pdft_toews = PlayerDraft.find_by_player_id(8473604).pop(0) # 2006, 3
pdft_kessel = PlayerDraft.find_by_player_id(8473548).pop(0) # 2006, 5
pdft_stamkos = PlayerDraft.find_by_player_id(8474564).pop(0) # 2008, 1
ordered = sorted([pdft_kessel, pdft_kopitar, pdft_stamkos, pdft_toews])
assert ordered[0] == pdft_kopitar
assert ordered[1] == pdft_toews
assert ordered[2] == pdft_kessel
assert ordered[3] == pdft_stamkos
| 2.8125 | 3 |
accounts/models.py | AaganMaskey/NCIT | 4 | 12761427 | from django.db import models
class Student(models.Model):
roll = models.IntegerField(unique=True)
name = models.CharField(max_length=100)
phone = models.CharField(max_length=10, unique=True, blank=True, null=True)
email = models.EmailField(unique=True, blank=True, null=True)
sem = models.IntegerField()
program = models.CharField(max_length=200, blank=True, null=True)
address = models.CharField(max_length=200, blank=True, null=True)
pic = models.ImageField(upload_to='uploads/accounts/', default='uploads/accounts/default.jpeg')
registered = models.BooleanField(default=False)
class Meta:
ordering = ['roll']
@property
def first_name(self):
return self.name.split(' ')[0]
def __str__(self):
return self.name
class StudentLogin(models.Model):
password = models.CharField(max_length=100)
student = models.OneToOneField(Student, on_delete=models.CASCADE)
def __str__(self):
return f'{self.student.name} {self.student.roll}' | 2.359375 | 2 |
control_scripts_lib/game.py | Laggg/surviv.io-rl-bots-demo | 11 | 12761428 | <reponame>Laggg/surviv.io-rl-bots-demo<filename>control_scripts_lib/game.py
# selenium: !conda install -c conda-forge selenium -y
from selenium.webdriver import Chrome, ChromeOptions #, Firefox, FirefoxOptions
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver import ActionChains
from enum import Enum
import json
import time
class Key(Enum):
W = "w"
A = "a"
S = "s"
D = "d"
F = "f" # переделать так, чтобы F через keyDown
R = "r"
V = "v"
N1 = "1"
N2 = "2"
N3 = "3"
N7 = "7"
N8 = "8"
N9 = "9"
N0 = "0"
def __str__(self):
return self.value
@staticmethod
def fromDirection(id_dir):
"""Direction schema
Args:
direction ([int]): stop0; 🡹1; 🡽2; 🡺3; 🡾4; 🡻5; 🡿6; 🢀7; 🡼8;
Returns:
[Key]: array of key enum
"""
d_dir = {0: [],
1: [Key.W],
2: [Key.W, Key.D],
3: [Key.D],
4: [Key.D, Key.S],
5: [Key.S],
6: [Key.A, Key.S],
7: [Key.A],
8: [Key.A, Key.W]}
return d_dir[id_dir]
class Direction(Enum):
Up = "keyUp"
Down = "rawKeyDown"
def __str__(self):
return self.value
class Game:
def __init__(self, chrome_driver, chrome_adblock, custom_config=True, classic_mode=True):
self.game_url ='https://surviv.io/'
#https://chromedriver.chromium.org/downloads
#self.chrome_driver = '/home/laggg/RL_surviv/control_architecture/control_scripts_lib/support_files_for_selenium/chromedriver'
self.chrome_driver = chrome_driver
#https://www.crx4chrome.com/crx/31931/
#self.chrome_adblock = '/home/laggg/RL_surviv/control_architecture/control_scripts_lib/support_files_for_selenium/uBlockOrigin.crx'
self.chrome_adblock = chrome_adblock
chrOptions = ChromeOptions()
chrOptions.add_extension(self.chrome_adblock)
chrOptions.add_argument("disable-infobars")
chrOptions.add_argument("--mute-audio")
self.browser = Chrome(executable_path=self.chrome_driver, chrome_options=chrOptions)
self.browser.set_window_position(x=-10,y=0)
self.browser.get(self.game_url)
self.browser.implicitly_wait(3)
self.browser.maximize_window()
#для поиска id кнопок нужно ctrl+shift+I
try:
self.browser.find_element_by_xpath("/html/body/div[9]/div[34]/div[1]/div[1]/span").click()
except:
print('Not found')
#self.browser.find_element_by_id("modal-account-incentive-btn").click()
try:
self.browser.find_element_by_xpath("/html/body/div[9]/div[29]/div/div[1]/span").click()
except:
print('Not found')
try:
self.browser.find_element_by_xpath("/html/body/div[9]/div[40]/div/div[1]/span").click()
except:
print('Not found')
try:
self.browser.find_element_by_xpath("/html/body/div[9]/div[18]/div/div[1]/span").click()
except:
print('Not found')
if classic_mode: # выбрать классический режим игры (если дефолтно стоит другой)
self.browser.find_element_by_id("index-play-mode-selected").click()
self.browser.find_element_by_xpath(\
"/html/body/div[9]/div[19]/div[12]/div[2]/div[4]/div[3]/div[3]/div[1]/div[3]/div/div[1]").click()
self.callCounters = {} # call counter for each key
self.previousDirection = 0 # pvious dirrection for caching optimization
# and for easier cancelling last movement direction
def close_current_tab(self):
self.browser.close()
def get_window_size(self):
dim = self.browser.get_window_size()
position = self.browser.get_window_position()
position['y'] += dim['height'] - self.browser.find_element_by_tag_name('body').size['height']
dim['height'] = self.browser.find_element_by_tag_name('body').size['height']
return position, dim
# действия агента оправляются через селениум, который инициализируется в классе Game,
# поэтому все взаимодействия агента и среды должны быть прописаны в классе Game,
# которые, в свою очередь, вызываются внутри агента
# (агент - класс, у которого есть атрибут - класс Game).
def start_playing(self):
self.browser.find_element_by_id("btn-start-battle").click()
#self.browser.find_element_by_xpath("/html/body/div[6]/div/div[1]/span").click()
def restart_after_death(self):
#Продолжить игру # взято отсюда https://habr.com/en/post/250975/
try:
print('поиск кнопки "Играть еще"')
self.browser.find_element_by_xpath('/html/body/div[3]/div[4]/div[2]/div[1]/div[3]/a').click()
print('нажал кнопку "Играть еще"')
except:
print('не нашел кнопку "Играть еще"')
pass
try:
print('поиск кнопки "закрыть лишнее окно"')
self.browser.find_element_by_xpath("/html/body/div[4]/div[28]/div/div[1]/span").click()
print('нажал кнопку "закрыть лишнее окно"')
except:
print('не нашел кнопку "закрыть лишнее окно"')
pass
try:
print('поиск кнопки "В бой"')
self.start_playing()
print('нажал кнопку "В бой"')
except:
print('не нашел кнопку "В бой"')
pass
# def get_crashed(self):
# def get_score(self):
# def get_highscore(self):
#==================================================================================================
#================🡻 LOCOMOTION ACTIONS 🡻==========================================================
# NOTE: Why not just use ActionChains.key_up(...)/key_down(...) !?
# for keys like WASD, up, down etc. it is not possible to click and push them via ActionChains
def _dispatchKeyEvent(self, name, options={}):
options["type"] = name
body = json.dumps({"cmd": "Input.dispatchKeyEvent", "params": options})
resource = "/session/%s/chromium/send_command" % self.browser.session_id
url = self.browser.command_executor._url + resource
self.browser.command_executor._request("POST", url, body)
def _holdKey(self, directStr, keyStr):
options = {
"code": "Key" + keyStr.upper(), # Uppercase is required
"key": keyStr.upper(),
"text": keyStr.upper(),
"unmodifiedText": keyStr.upper(),
"nativeVirtualKeyCode": ord(keyStr.upper()),
"windowsVirtualKeyCode": ord(keyStr.upper())
}
self._dispatchKeyEvent(directStr, options)
def keyDown(self, key):
self._holdKey(str(Direction.Down), str(key))
def keyUp(self, key):
self._holdKey(str(Direction.Up), str(key))
def stop_moving(self):
# отпускаем все кнопки, которые были нажаты ранее (даже если не были)
self.keyUp(Key.A)
self.keyUp(Key.S)
self.keyUp(Key.D)
self.keyUp(Key.W)
def move(self, keys):
self.stop_moving()
for key in keys:
self.keyDown(key) # зажимаем все кнопки движения, перечисленные во входном списке
#================🡹 LOCOMOTION ACTIONS 🡹==========================================================
#==================================================================================================
def _createActionWithMoveTo(self, x, y):
'''
Функция для нескольких одновременных действий (начать двигаться, зажать ЛКМ и нажать доп клавишу)
'''
# action
# https://stackoverflow.com/questions/32167577/how-to-move-the-mouse-in-selenium
# https://selenium-python.readthedocs.io/api.html#module-selenium.webdriver.common.action_chains
action = ActionChains(self.browser)
#element = self.browser.find_element_by_id("game-touch-area")
#size = element.size
#centerBias = {"x": int(size["width"] / 2),
# "y": int(size["height"] / 2)}
# -y because html axis "y" is inverted
#return action.move_to_element_with_offset(
# element, centerBias["x"] + int(x), centerBias["y"] + int(-y))
return action #.move_by_offset(0,0)
# ВСЯ ПРОБЛЕМА В медленной работе ФУНКЦИИ move_to_element_with_offset/move_by_offset
#==================================================================================================
#================🡻 MOUSE ACTIONS 🡻==========================================================
def moveMouseTo(self, x, y):
"""Move mouse relatively screen center
Args:
x ([int]): x axis
y ([int]): y axis
"""
actions = self._createActionWithMoveTo(x, y)
actions.perform()
#================🡹 MOUSE ACTIONS 🡹==========================================================
#==================================================================================================
#==================================================================================================
#================🡻 ADDITIONAL BUTTONS 🡻==========================================================
def press(self, key): # F, R, N7, N8, N9, N0, N1, N2, N3
self.keyDown(key)
time.sleep(0.001)
self.keyUp(key)
# V - Свернуть миникарту
#================🡹 ADDITIONAL BUTTONS 🡹==========================================================
#==================================================================================================
#==================================================================================================
#================🡻 PROCESS AGENT'S ACTIONS 🡻=====================================================
def keySwitch(self, key):
keyStr = str(key)
self.callCounters[keyStr] = self.callCounters.get(keyStr, 0) + 1
if self.callCounters[keyStr] % 2 != 0:
self.keyDown(key)
else:
self.keyUp(key)
def _switchDirection(self, direction):
keys = Key.fromDirection(direction)
for key in keys:
self.keySwitch(key)
def process_all_agents_actions(self, request):
""" Process request by surviv processor
Args:
request ([SurvivRequest]): Special class for managing surviv.io game
"""
# разобраться с приоритетами дествий внутри всего request'a
if request.fKey == 1:
self.press(Key.F)
if request.actKey == 5: #перезарядка
self.press(Key.R)
elif request.actKey == 3: #сода
self.press(Key.N9)
elif request.actKey == 4: #таблетки
self.press(Key.N0)
elif request.actKey == 1: #бинты
self.press(Key.N7)
elif request.actKey == 2: #аптечка
self.press(Key.N8)
if request.click == 1:
action = ActionChains(self.browser)
action.click_and_hold()
action.perform()
if request.switch_weap == 1: #переключиться на оружие 1
self.press(Key.N1)
elif request.switch_weap == 2: #переключиться на оружие 2
self.press(Key.N2)
elif request.switch_weap == 3: #переключиться на оружие 3
self.press(Key.N3)
if self.previousDirection != request.direction:
self._switchDirection(self.previousDirection)
self._switchDirection(request.direction)
self.previousDirection = request.direction
if request.click == 1:
time.sleep(request.dt_click/1000) #длина очереди стрельбы
post_act = ActionChains(self.browser) #поднимаем ЛКМ
post_act.release()
post_act.perform()
#================🡹 PROCESS AGENT'S ACTIONS 🡹=====================================================
#================================================================================================== | 2.4375 | 2 |
sheets/coupon_request_api_test.py | mitodl/mit-xpro | 10 | 12761429 | # pylint: disable=redefined-outer-name,unused-argument
"""Coupon request API tests"""
import os
from types import SimpleNamespace
import pytest
from pygsheets import Worksheet, Spreadsheet
from pygsheets.client import Client as PygsheetsClient
from pygsheets.drive import DriveAPIWrapper
from pygsheets.sheet import SheetAPIWrapper
from courses.factories import CourseRunFactory
from ecommerce.factories import ProductVersionFactory
from ecommerce.models import Company, Coupon
from sheets.coupon_request_api import CouponRequestHandler, CouponRequestRow
from sheets.factories import GoogleApiAuthFactory
from sheets.models import CouponGenerationRequest
from sheets.utils import ResultType
@pytest.fixture
def courseware_objects():
"""Database objects that CSV data depends on"""
run = CourseRunFactory.create(courseware_id="course-v1:edX+DemoX+Demo_Course")
ProductVersionFactory.create(product__content_object=run)
@pytest.fixture
def request_csv_rows(settings, courseware_objects):
"""Fake coupon request spreadsheet data rows (loaded from CSV)"""
fake_request_csv_filepath = os.path.join(
settings.BASE_DIR, "sheets/resources/coupon_requests.csv"
)
with open(fake_request_csv_filepath) as f:
# Return all rows except for the header
return [line.split(",") for i, line in enumerate(f.readlines()) if i > 0]
@pytest.fixture
def pygsheets_fixtures(mocker, db, request_csv_rows):
"""Patched functions for pygsheets client functionality"""
Mock = mocker.Mock
MagicMock = mocker.MagicMock
google_api_auth = GoogleApiAuthFactory.create()
patched_get_data_rows = mocker.patch(
"sheets.sheet_handler_api.get_data_rows", return_value=request_csv_rows
)
mocked_worksheet = MagicMock(spec=Worksheet, get_all_values=Mock(return_value=[]))
mocked_spreadsheet = MagicMock(
spec=Spreadsheet, sheet1=mocked_worksheet, id="abc123"
)
mocked_pygsheets_client = MagicMock(
spec=PygsheetsClient,
oauth=Mock(),
open_by_key=Mock(return_value=mocked_spreadsheet),
drive=MagicMock(spec=DriveAPIWrapper),
sheet=MagicMock(spec=SheetAPIWrapper),
create=Mock(return_value=mocked_spreadsheet),
)
mocker.patch(
"sheets.coupon_request_api.get_authorized_pygsheets_client",
return_value=mocked_pygsheets_client,
)
return SimpleNamespace(
client=mocked_pygsheets_client,
spreadsheet=mocked_spreadsheet,
worksheet=mocked_worksheet,
google_api_auth=google_api_auth,
patched_get_data_rows=patched_get_data_rows,
)
@pytest.fixture
def patched_sheets_api(mocker):
"""Patches for sheets API functions that use the Drive/Sheets APIs"""
share_drive_file = mocker.patch(
"sheets.coupon_request_api.share_drive_file_with_emails", return_value=None
)
create_file_watch = mocker.patch(
"sheets.coupon_request_api.create_or_renew_sheet_file_watch", return_value=None
)
return SimpleNamespace(
share_drive_file=share_drive_file, create_file_watch=create_file_watch
)
def test_full_sheet_process(
db, pygsheets_fixtures, patched_sheets_api, request_csv_rows
):
"""
CouponRequestHandler.process_sheet should parse rows, create relevant objects in the database, and report
on results
"""
handler = CouponRequestHandler()
result = handler.process_sheet()
expected_processed_rows = {6, 8}
expected_failed_rows = {5, 7}
assert ResultType.PROCESSED.value in result
assert set(result[ResultType.PROCESSED.value]) == expected_processed_rows, (
"Rows %s as defined in coupon_requests.csv should be processed"
% str(expected_processed_rows)
)
assert ResultType.FAILED.value in result
assert set(result[ResultType.FAILED.value]) == expected_failed_rows, (
"Rows %s as defined in coupon_requests.csv should fail"
% str(expected_failed_rows)
)
# A CouponGenerationRequest should be created for each row that wasn't ignored and did not fail full sheet
# validation (CSV has 1 row that should fail validation, hence the 1)
assert CouponGenerationRequest.objects.all().count() == (
len(expected_processed_rows) + len(expected_failed_rows) - 1
)
# The correct number of coupons should have been created for each processed row
processed_rows = [
CouponRequestRow.parse_raw_data(i, row_data)
for i, row_data in enumerate(request_csv_rows, start=2)
if i in expected_processed_rows
]
expected_coupons = sum((row.num_codes for row in processed_rows))
assert Coupon.objects.all().count() == expected_coupons
# Sheets API should have been used to create an assignment sheet and share it
assert patched_sheets_api.create_file_watch.call_count == len(
expected_processed_rows
)
assert patched_sheets_api.share_drive_file.call_count == len(
expected_processed_rows
)
# New companies should have been created during the processing
assert list(Company.objects.order_by("name").values_list("name", flat=True)) == [
"MIT",
"MIT Open Learning",
]
| 2.09375 | 2 |
egs/wsj/s5/utils/data/extend_segment_times.py | shuipi100/kaldi | 805 | 12761430 | <filename>egs/wsj/s5/utils/data/extend_segment_times.py
#!/usr/bin/env python
from __future__ import print_function
import sys
import argparse
from collections import defaultdict
parser = argparse.ArgumentParser(description="""
Usage: extend_segment_times.py [options] <input-segments >output-segments
This program pads the times in a 'segments' file (e.g. data/train/segments)
with specified left and right context (for cases where there was no
silence padding in the original segments file)""")
parser.add_argument("--start-padding", type = float, default = 0.1,
help="Amount of padding, in seconds, for the start time of "
"each segment (start times <0 will be set to zero).")
parser.add_argument("--end-padding", type = float, default = 0.1,
help="Amount of padding, in seconds, for the end time of "
"each segment.")
parser.add_argument("--last-segment-end-padding", type = float, default = 0.1,
help="Amount of padding, in seconds, for the end time of "
"the last segment of each file (maximum allowed).")
parser.add_argument("--fix-overlapping-segments", type = str,
default = 'true', choices=['true', 'false'],
help="If true, prevent segments from overlapping as a result "
"of the padding (or that were already overlapping)")
args = parser.parse_args()
# the input file will be a sequence of lines which are each of the form:
# <utterance-id> <recording-id> <start-time> <end-time>
# e.g.
# utt-1 recording-1 0.62 5.40
# The output will be in the same format and in the same
# order, except wiht modified times.
# This variable maps from a recording-id to a listof the utterance
# indexes (as integer indexes into 'entries']
# that are part of that recording.
recording_to_utt_indexes = defaultdict(list)
# This is an array of the entries in the segments file, in the fomrat:
# (utterance-id as astring, recording-id as string,
# start-time as float, end-time as float)
entries = []
while True:
line = sys.stdin.readline()
if line == '':
break
try:
[ utt_id, recording_id, start_time, end_time ] = line.split()
start_time = float(start_time)
end_time = float(end_time)
except:
sys.exit("extend_segment_times.py: could not interpret line: " + line)
if not end_time > start_time:
print("extend_segment_times.py: bad segment (ignoring): " + line,
file = sys.stderr)
recording_to_utt_indexes[recording_id].append(len(entries))
entries.append([utt_id, recording_id, start_time, end_time])
num_times_fixed = 0
for recording, utt_indexes in recording_to_utt_indexes.items():
# this_entries is a list of lists, sorted on mid-time.
# Notice: because lists are objects, when we change 'this_entries'
# we change the underlying entries.
this_entries = sorted([ entries[x] for x in utt_indexes ],
key = lambda x : 0.5 * (x[2] + x[3]))
min_time = 0
max_time = max([ x[3] for x in this_entries ]) + args.last_segment_end_padding
start_padding = args.start_padding
end_padding = args.end_padding
for n in range(len(this_entries)):
this_entries[n][2] = max(min_time, this_entries[n][2] - start_padding)
this_entries[n][3] = min(max_time, this_entries[n][3] + end_padding)
for n in range(len(this_entries) - 1):
this_end_time = this_entries[n][3]
next_start_time = this_entries[n+1][2]
if this_end_time > next_start_time and args.fix_overlapping_segments == 'true':
midpoint = 0.5 * (this_end_time + next_start_time)
this_entries[n][3] = midpoint
this_entries[n+1][2] = midpoint
num_times_fixed += 1
# this prints a number with a certain number of digits after
# the point, while removing trailing zeros.
def FloatToString(f):
num_digits = 6 # we want to print 6 digits after the zero
g = f
while abs(g) > 1.0:
g *= 0.1
num_digits += 1
format_str = '%.{0}g'.format(num_digits)
return format_str % f
for entry in entries:
[ utt_id, recording_id, start_time, end_time ] = entry
if not start_time < end_time:
print("extend_segment_times.py: bad segment after processing (ignoring): " +
' '.join(entry), file = sys.stderr)
continue
print(utt_id, recording_id, FloatToString(start_time), FloatToString(end_time))
print("extend_segment_times.py: extended {0} segments; fixed {1} "
"overlapping segments".format(len(entries), num_times_fixed),
file = sys.stderr)
## test:
# (echo utt1 reco1 0.2 6.2; echo utt2 reco1 6.3 9.8 )| extend_segment_times.py
# and also try the above with the options --last-segment-end-padding=0.0 --fix-overlapping-segments=false
| 3.125 | 3 |
day05/app05plus/urls.py | 940716tian/PythonStudy | 0 | 12761431 | from django.conf.urls import url
from app05plus.views import index, register, mylogin
from app05plus.views import mylogout
urlpatterns = [
url(r"^newindex01$",index),
url(r"^register01$",register,name="register"),
url(r"^mylogin01$",mylogin,name="mylogin"),
url(r"^logout$",mylogout),
] | 1.695313 | 2 |
pipeline/tests/pipeline_data.py | wkma/bk-sops | 55 | 12761432 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
# 存储 pipeline 结构的字典
pipeline_1 = {
"id": "p1", # 该 pipeline 的 id
"name": "name",
"start_event": {"id": "", "name": "", "type": "EmptyStartEvent", "incoming": None, "outgoing": "outgoing_flow_id"},
"end_event": {"id": "", "name": "", "type": "EmptyEndEvent", "incoming": "incoming_flow_id", "outgoing": None},
"activities": { # 存放该 pipeline 中所有的 task,包含:起始任务,结束任务,子 pipeline
"n1": {
"id": "n1",
"type": "ServiceActivity",
"name": "",
"incoming": "f1",
"outgoing": "f2",
"component": {
"tag_code": "",
"data": {
"env_id": {"hook": True, "constant": "${_env_id}", "value": ""},
"another_param": {"hook": True, "constant": "${_another_param}", "value": ""},
},
},
},
"n2": {"id": "n2", "type": "SubProcess", "name": "", "incoming": "f3", "outgoing": "f4", "template_id": ""},
},
"flows": { # 存放该 Pipeline 中所有的线
"f1": {"id": "f1", "source": "n1", "target": "n2", "is_default": False},
"f2": {"id": "f2", "source": "n2", "target": "n3", "is_default": False},
},
"gateways": { # 这里存放着网关的详细信息
"g2": {
"id": "g2",
"type": "ExclusiveGateway",
"name": "",
"incoming": "flow_id_0",
"outgoing": ["flow_id_1", "flow_id_2"],
"data_source": "activity_id",
"conditions": {
"flow_id_1": {"evaluate": "result > 10"}, # 判断条件
"flow_id_2": {"evaluate": "result < 10"}, # 判断条件
},
"converge_gateway_id": "converge_gateway_id",
},
"g3": {
"id": "g3",
"type": "ConvergeGateway",
"name": "",
"incoming": ["flow_id_3", "flow_id_4"],
"outgoing": "flow_id_5",
},
},
"constants": { # 全局变量
# '${_env_id}': {
# 'name': '',
# 'key': '${_env_id}',
# 'desc': '',
# 'tag_type': 'input_var',
# 'validation': '^\d+$',
# 'show_type': 'show',
# 'tag_code': '${_env_id}',
# 'value': '',
# 'data': {
# 'set': {
# 'value': '${set}',
# 'constant': '',
# 'hook': 'off',
# },
# 'module': {
# 'value': '${module}',
# 'constant': '',
# 'hook': 'off',
# }
# }
# },
"${_env_id}": {
"name": "",
"key": "${_env_id}",
"desc": "",
"tag_type": "input_var",
"validation": r"^\d+$",
"show_type": "show",
"tag_code": "${_env_id}",
"value": "11",
},
},
}
| 1.429688 | 1 |
etilog/migrations/0020_auto_20191003_1651.py | hodeld/etiki-prototype1 | 1 | 12761433 | <filename>etilog/migrations/0020_auto_20191003_1651.py<gh_stars>1-10
# Generated by Django 2.2.4 on 2019-10-03 14:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('etilog', '0019_auto_20190919_1704'),
]
operations = [
migrations.AddField(
model_name='impactevent',
name='article_text',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='impactevent',
name='date_text',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| 1.46875 | 1 |
epython/validate.py | anirrudh/epython | 41 | 12761434 | <filename>epython/validate.py
import ast
disallowed_nodes = [
ast.AsyncFor,
ast.AsyncFunctionDef,
ast.AsyncWith,
ast.Delete,
ast.Raise,
ast.Try,
ast.GeneratorExp,
ast.Await,
ast.Yield,
ast.YieldFrom,
ast.Del,
ast.ExceptHandler,
ast.Starred,
ast.With,
ast.withitem,
ast.Interactive,
]
def validate(code):
for node in ast.walk(code):
if node.__class__ in disallowed_nodes:
info = f"Invalid node {node.__class__}"
if hasattr(node, "lineno"):
info += f" at line {node.lineno}"
return ValueError, info
return None
| 2.9375 | 3 |
http_provider.py | SDU-Mirrors/github-release-downloader | 1 | 12761435 | <reponame>SDU-Mirrors/github-release-downloader
import contextlib
import logging
from typing import Optional, Any
import urllib3
from urllib3 import PoolManager, HTTPResponse, ProxyManager
from urllib.request import getproxies
from constant import UA_NAME
logger = logging.getLogger(__name__)
http: PoolManager = PoolManager()
chunk_size = 1048576
def get_proxy() -> Optional[str]:
proxies = getproxies()
if 'all' in proxies.keys():
return proxies['all']
if 'http' in proxies.keys():
return proxies['http']
return None
def initialize():
global http
proxy = get_proxy()
if proxy is None:
http = PoolManager(
retries=False,
timeout=urllib3.util.Timeout(connect=9, read=120),
block=True,
)
else:
logger.info('Proxy server is set to {}.'.format(proxy))
http = ProxyManager(
proxy_url=proxy,
retries=False,
timeout=urllib3.util.Timeout(connect=9, read=120),
block=True,
)
initialize()
@contextlib.contextmanager
def urllib3_http_request(http: urllib3.PoolManager, *args: Any, **kwargs: Any):
r = http.request(*args, **kwargs)
try:
yield r
finally:
r.release_conn()
def download_file(url: str, filepath: str, filesize: Optional[int] = None):
logger.info('Downloading file {}'.format(url))
with urllib3_http_request(http, 'GET', url, preload_content=False, headers={'User-Agent': UA_NAME}) as r:
with open(filepath, 'wb') as f:
content_len = int(r.headers['Content-length'])
downloaded_size = 0
logger.info('Connecting...')
for chunk in r.stream(chunk_size):
downloaded_size += len(chunk)
f.write(chunk)
logger.info('{:.2f}/{:.2f} MiB, {:.2%}'.format(
downloaded_size / 1048576, content_len / 1048576,
downloaded_size / content_len),
)
check_http_code(r, url)
if filesize is not None and filesize != downloaded_size:
raise Exception('File length mismatch. Got {}, but {} is expected.'.format(downloaded_size, filesize))
def download_file_with_retry(url: str, filepath: str, filesize: Optional[int] = None, retry_time: int = 3):
for i in range(retry_time):
try:
download_file(url, filepath, filesize)
break
except Exception as e:
logger.warning(e)
if i == retry_time - 1: # is last loop
raise e
def check_http_code(resp: HTTPResponse, url: str):
if resp.status != 200:
raise Exception('HTTP {} on url {}'.format(resp.status, url))
| 2.46875 | 2 |
ConditionalStatement/HotelRoom.py | Rohitm619/Softuni-Python-Basic | 1 | 12761436 | <reponame>Rohitm619/Softuni-Python-Basic
# За студио, при повече от 7 нощувки през май и октомври : 5% намаление.
# За студио, при повече от 14 нощувки през май и октомври : 30% намаление.
# За студио, при повече от 14 нощувки през юни и септември: 20% намаление.
# За апартамент, при повече от 14 нощувки, без значение от месеца : 10% намаление.
month = input()
nights = int(input())
studio = 0
apartment = 0
if month == "May" or month == "October":
if 7 < nights <= 14:
studio = 50 - 50 * 5 / 100
apartment = 65
price_studio = studio * nights
price_apartment = apartment * nights
elif nights > 14:
studio = 50 - 50 * 30 / 100
apartment = 65 - 65 * 10 / 100
price_studio = studio * nights
price_apartment = apartment * nights
else:
studio = 50
apartment = 65
price_studio = studio * nights
price_apartment = apartment * nights
elif month == "June" or month == "September":
if nights > 14:
studio = 75.20 - 75.20 * 20 / 100
apartment = 68.70 - 68.70 * 10 / 100
price_studio = studio * nights
price_apartment = apartment * nights
else:
studio = 75.20
apartment = 68.70
price_studio = studio * nights
price_apartment = apartment * nights
elif month == "July" or month == "August":
if nights > 14:
studio = 76
apartment = 77 - 77 * 10 / 100
price_studio = studio * nights
price_apartment = apartment * nights
else:
studio = 76
apartment = 77
price_studio = studio * nights
price_apartment = apartment * nights
print(f"Apartment: {price_apartment:.2f} lv.")
print(f"Studio: {price_studio:.2f} lv.") | 3.859375 | 4 |
venv/Lib/site-packages/tklib37/GraphicItem.py | GabrielAmare/Darts | 0 | 12761437 | from tkinter import Canvas
class GraphicItem:
itemType: str
coords: list
config: dict
def __init__(self, cnv: Canvas):
self.cnv = cnv
self.uid = None
def update(self):
if self.uid is None:
self.uid = self.cnv._create(
itemType=self.itemType,
args=self.coords,
kw=self.config
)
else:
self.cnv.coords(self.uid, *self.coords)
| 3.15625 | 3 |
fromAudioFile.py | ClownMonster/Python_SpeechToText | 0 | 12761438 | '''
Gets the Text out of an English Audio File
'''
import speech_recognition as sr
r = sr.Recognizer()
def convert_to_text(audioFile):
with sr.AudioFile(audioFile) as source:
audioData = r.record(source)
text = r.recognize_google(audioData)
print("\ntext : ", text)
if __name__ == "__main__":
file_path = input("Enter the realtive path to audio file: ")
# 16-122828-0002.wav is the audio file u can use any
convert_to_text(file_path) | 3.875 | 4 |
tftrt/examples/image_classification/image_classification.py | sarvex/tensorrt | 662 | 12761439 | <reponame>sarvex/tensorrt<filename>tftrt/examples/image_classification/image_classification.py<gh_stars>100-1000
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import sys
import logging
import multiprocessing
import time
from functools import partial
import numpy as np
import tensorflow as tf
import preprocessing
# Allow import of top level python files
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from benchmark_args import BaseCommandLineAPI
from benchmark_runner import BaseBenchmarkRunner
class CommandLineAPI(BaseCommandLineAPI):
SAMPLES_IN_VALIDATION_SET = 50000
def __init__(self):
super(CommandLineAPI, self).__init__()
self._parser.add_argument('--input_size', type=int, default=224,
help='Size of input images expected by the '
'model')
self._parser.add_argument('--num_classes', type=int, default=1001,
help='Number of classes used when training '
'the model')
self._parser.add_argument('--preprocess_method', type=str,
choices=['vgg', 'inception',
'resnet50_v1_5_tf1_ngc_preprocess'
],
default='vgg',
help='The image preprocessing method used in '
'dataloading.')
class BenchmarkRunner(BaseBenchmarkRunner):
ACCURACY_METRIC_NAME = "accuracy"
def before_benchmark(self, **kwargs):
self._labels_shift = 1 if kwargs["num_classes"] == 1001 else 0
def compute_accuracy_metric(self, predictions, expected, **kwargs):
return np.mean(np.equal(predictions["outputs"], expected))
def process_model_output(self, outputs, **kwargs):
outputs = outputs.numpy()
if (len(outputs.shape) != 1):
outputs = np.argmax(outputs, axis=1).reshape(-1)
return {"outputs": outputs - self._labels_shift}
def get_dataset(data_files, batch_size, use_synthetic_data, preprocess_method, input_size):
def deserialize_image_record(record):
feature_map = {
'image/encoded': tf.io.FixedLenFeature([], tf.string, ''),
'image/class/label': tf.io.FixedLenFeature([1], tf.int64, -1),
'image/class/text': tf.io.FixedLenFeature([], tf.string, ''),
'image/object/bbox/xmin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.io.VarLenFeature(dtype=tf.float32)
}
with tf.compat.v1.name_scope('deserialize_image_record'):
obj = tf.io.parse_single_example(serialized=record,
features=feature_map)
imgdata = obj['image/encoded']
label = tf.cast(obj['image/class/label'], tf.int32)
return imgdata, label
def get_preprocess_fn(preprocess_method, input_size):
"""Creates a function to parse and process a TFRecord
preprocess_method: string
input_size: int
returns: function, the preprocessing function for a record
"""
if preprocess_method == 'vgg':
preprocess_fn = preprocessing.vgg_preprocess
elif preprocess_method == 'inception':
preprocess_fn = preprocessing.inception_preprocess
elif preprocess_method == 'resnet50_v1_5_tf1_ngc_preprocess':
preprocess_fn = preprocessing.resnet50_v1_5_tf1_ngc_preprocess
else:
raise ValueError(
'Invalid preprocessing method {}'.format(preprocess_method)
)
def preprocess_sample_fn(record):
# Parse TFRecord
imgdata, label = deserialize_image_record(record)
label -= 1 # Change to 0-based (don't use background class)
try:
image = tf.image.decode_jpeg(
imgdata,
channels=3,
fancy_upscaling=False,
dct_method='INTEGER_FAST'
)
except:
image = tf.image.decode_png(imgdata, channels=3)
# Use model's preprocessing function
image = preprocess_fn(image, input_size, input_size)
return image, label
return preprocess_sample_fn
dataset = tf.data.Dataset.from_tensor_slices(data_files)
dataset = dataset.interleave(
tf.data.TFRecordDataset,
cycle_length=min(8, multiprocessing.cpu_count()),
block_length=max(batch_size, 32)
)
# preprocess function for input data
preprocess_fn = get_preprocess_fn(
preprocess_method=preprocess_method,
input_size=input_size
)
dataset = dataset.map(
map_func=preprocess_fn,
num_parallel_calls=min(8, multiprocessing.cpu_count())
)
dataset = dataset.batch(batch_size=batch_size, drop_remainder=True)
if use_synthetic_data:
dataset = dataset.take(count=1) # loop over 1 batch
dataset = dataset.cache()
dataset = dataset.repeat()
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset
if __name__ == '__main__':
cmdline_api = CommandLineAPI()
args = cmdline_api.parse_args()
def get_files(data_dir, filename_pattern):
if data_dir is None:
return []
files = tf.io.gfile.glob(os.path.join(data_dir, filename_pattern))
if not files:
raise ValueError('Can not find any files in {} with '
'pattern "{}"'.format(data_dir, filename_pattern))
return files
data_files = get_files(args.data_dir, 'validation*')
calib_files = (
[]
if args.precision != 'INT8' else
get_files(args.calib_data_dir, 'train*')
)
def _input_fn(input_files, build_steps, model_phase):
dataset = get_dataset(
data_files=input_files,
batch_size=args.batch_size,
# even when using synthetic data, we need to
# build and/or calibrate using real training data
# to be in a realistic scenario
use_synthetic_data=False,
preprocess_method=args.preprocess_method,
input_size=args.input_size
)
for i, (batch_images, _) in enumerate(dataset):
if i >= build_steps:
break
print("* [%s] - step %04d/%04d" % (
model_phase, i + 1, build_steps
))
yield batch_images,
calibration_input_fn = partial(
_input_fn,
input_files=calib_files,
build_steps=args.num_calib_inputs // args.batch_size,
model_phase="Calibration"
)
optimize_offline_input_fn = partial(
_input_fn,
input_files=data_files,
build_steps=1,
model_phase="Building"
)
runner = BenchmarkRunner(
input_saved_model_dir=args.input_saved_model_dir,
output_saved_model_dir=args.output_saved_model_dir,
allow_build_at_runtime=args.allow_build_at_runtime,
calibration_input_fn=calibration_input_fn,
debug=args.debug,
gpu_mem_cap=args.gpu_mem_cap,
input_signature_key=args.input_signature_key,
max_workspace_size_bytes=args.max_workspace_size,
minimum_segment_size=args.minimum_segment_size,
num_calib_inputs=args.num_calib_inputs,
optimize_offline=args.optimize_offline,
optimize_offline_input_fn=optimize_offline_input_fn,
output_tensor_indices=args.output_tensor_indices,
output_tensor_names=args.output_tensor_names,
precision_mode=args.precision,
use_dynamic_shape=args.use_dynamic_shape,
use_tftrt=args.use_tftrt
)
get_benchmark_input_fn = partial(
get_dataset,
data_files=data_files,
input_size=args.input_size,
preprocess_method=args.preprocess_method
)
runner.execute_benchmark(
batch_size=args.batch_size,
display_every=args.display_every,
get_benchmark_input_fn=get_benchmark_input_fn,
num_iterations=args.num_iterations,
num_warmup_iterations=args.num_warmup_iterations,
skip_accuracy_testing=(
args.use_synthetic_data or args.skip_accuracy_testing
),
use_synthetic_data=args.use_synthetic_data,
use_xla=args.use_xla,
########### Additional Settings ############
num_classes=args.num_classes,
)
| 1.695313 | 2 |
RHGraph/generators/RHGeneratorConstants.py | crhaithcock/RushHour | 0 | 12761440 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 4 16:56:20 2019
@author: CHaithcock
"""
import sys
sys.path.insert(1, 'C:/Users/chaithcock/Documents/repos/RushHour/RHGraph')
import RHConstants as const
'''
Constants for Toplogical Combinatorial Constructions.
'''
STRIPS = ['C','CC','CCC','CT','TC','T','TT']
SLOTS = range(12)
EXIT_SLOT = 2
ROW_SLOTS = SLOTS[:6]
COL_SLOTS = SLOTS[6:]
HORZ_STRIPS = {}
HORZ_STRIPS['C'] = []
HORZ_STRIPS['C'].append([const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,0,0,0])
HORZ_STRIPS['C'].append([0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,0,0])
HORZ_STRIPS['C'].append([0,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,0])
HORZ_STRIPS['C'].append([0,0,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0])
HORZ_STRIPS['C'].append([0,0,0,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR])
HORZ_STRIPS['CC'] = []
HORZ_STRIPS['CC'].append([const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,0])
HORZ_STRIPS['CC'].append([const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0])
HORZ_STRIPS['CC'].append([const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR])
HORZ_STRIPS['CC'].append([0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0])
HORZ_STRIPS['CC'].append([0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR])
HORZ_STRIPS['CC'].append([0,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR])
HORZ_STRIPS['CCC'] = [ [const.HORIZONTAL_CAR]*6 ]
HORZ_STRIPS['CT'] = []
HORZ_STRIPS['CT'].append([const.HORIZONTAL_CAR] * 2 + [const.HORIZONTAL_TRUCK] * 3 + [0] )
HORZ_STRIPS['CT'].append([const.HORIZONTAL_CAR] * 2 + [0] + [const.HORIZONTAL_TRUCK] * 3 )
HORZ_STRIPS['CT'].append([0] + [const.HORIZONTAL_CAR] * 2 + [const.HORIZONTAL_TRUCK] * 3 )
HORZ_STRIPS['TC'] = []
HORZ_STRIPS['TC'].append([const.HORIZONTAL_TRUCK] * 2 + [const.HORIZONTAL_CAR] * 3 + [0] )
HORZ_STRIPS['TC'].append([const.HORIZONTAL_TRUCK] * 2 + [0] + [const.HORIZONTAL_CAR] * 3 )
HORZ_STRIPS['TC'].append([0] + [const.HORIZONTAL_TRUCK] * 2 + [const.HORIZONTAL_CAR] * 3 )
HORZ_STRIPS['T'] = []
HORZ_STRIPS['T'].append([const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,0,0,0])
HORZ_STRIPS['T'].append([0,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,0,0])
HORZ_STRIPS['T'].append([0,0,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,0])
HORZ_STRIPS['T'].append([0,0,0,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK])
HORZ_STRIPS['TT'] = [[const.HORIZONTAL_TRUCK]*6]
VERT_STRIPS = {}
VERT_STRIPS['C'] = []
VERT_STRIPS['C'].append([const.VERTICAL_CAR,const.VERTICAL_CAR,0,0,0,0])
VERT_STRIPS['C'].append([0,const.VERTICAL_CAR,const.VERTICAL_CAR,0,0,0])
VERT_STRIPS['C'].append([0,0,const.VERTICAL_CAR,const.VERTICAL_CAR,0,0])
VERT_STRIPS['C'].append([0,0,0,const.VERTICAL_CAR,const.VERTICAL_CAR,0])
VERT_STRIPS['C'].append([0,0,0,0,const.VERTICAL_CAR,const.VERTICAL_CAR])
VERT_STRIPS['CC'] = []
VERT_STRIPS['CC'].append([const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR,0,0])
VERT_STRIPS['CC'].append([const.VERTICAL_CAR,const.VERTICAL_CAR,0,const.VERTICAL_CAR,const.VERTICAL_CAR,0])
VERT_STRIPS['CC'].append([const.VERTICAL_CAR,const.VERTICAL_CAR,0,0,const.VERTICAL_CAR,const.VERTICAL_CAR])
VERT_STRIPS['CC'].append([0,const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR,0])
VERT_STRIPS['CC'].append([0,const.VERTICAL_CAR,const.VERTICAL_CAR,0,const.VERTICAL_CAR,const.VERTICAL_CAR])
VERT_STRIPS['CC'].append([0,0,const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR])
VERT_STRIPS['CCC'] = [ [const.VERTICAL_CAR]*6 ]
VERT_STRIPS['CT'] = []
VERT_STRIPS['CT'].append([const.VERTICAL_CAR] * 2 + [const.VERTICAL_TRUCK] * 3 + [0] )
VERT_STRIPS['CT'].append([const.VERTICAL_CAR] * 2 + [0] + [const.VERTICAL_TRUCK] * 3 )
VERT_STRIPS['CT'].append([0] + [const.VERTICAL_CAR] * 2 + [const.VERTICAL_TRUCK] * 3 )
VERT_STRIPS['TC'] = []
VERT_STRIPS['TC'].append([const.VERTICAL_TRUCK] * 2 + [const.VERTICAL_CAR] * 3 + [0] )
VERT_STRIPS['TC'].append([const.VERTICAL_TRUCK] * 2 + [0] + [const.VERTICAL_CAR] * 3 )
VERT_STRIPS['TC'].append([0] + [const.VERTICAL_TRUCK] * 2 + [const.VERTICAL_CAR] * 3 )
VERT_STRIPS['T'] = []
VERT_STRIPS['T'].append([const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,0,0,0])
VERT_STRIPS['T'].append([0,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,0,0])
VERT_STRIPS['T'].append([0,0,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,0])
VERT_STRIPS['T'].append([0,0,0,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK])
VERT_STRIPS['TT'] = [[const.VERTICAL_TRUCK]*6]
| 2.046875 | 2 |
localstack/services/resourcegroupstaggingapi/rgta_starter.py | andrei-gypaetus/localstack | 3 | 12761441 | <reponame>andrei-gypaetus/localstack
from localstack import config
from localstack.services.infra import start_moto_server
def start_rgsa(port=None, asynchronous=False, update_listener=None):
port = port or config.PORT_RESOURCEGROUPSTAGGINGAPI
return start_moto_server('resourcegroupstaggingapi', port, name='Resource Groups Tagging API',
asynchronous=asynchronous, update_listener=update_listener)
| 1.835938 | 2 |
convrnn/train_noise.py | esizikova/anytime-prediction | 3 | 12761442 | import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.datasets import cifar10
from PIL import Image, ImageEnhance
import cv2
import urllib
import numpy as np
from tensorflow.keras.utils import to_categorical
import glob
from random import shuffle
import h5py
import torch
from torchvision import transforms
import math
import time
import os
import argparse
# tf.enable_v2_behavior()
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
from rcnn_sat import preprocess_image, bl_net
from load_data import load_dataset, load_dataset_h5, prep_pixels, prep_pixels_h5
from custom_transforms import all_random_noise
if tf.test.gpu_device_name():
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
else:
print("Please install GPU version of TF")
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
tf.test.is_gpu_available(cuda_only=False, min_cuda_compute_capability=None)
parser = argparse.ArgumentParser()
parser.add_argument('--tag', default='noise-contrast-gray', type=str)
parser.add_argument('--color', default='gray', type=str)
parser.add_argument('--download-data', default=False, type=bool)
parser.add_argument('--pretrained', default=True, type=bool)
args = parser.parse_args()
print(args)
data_root = '../data/{}'.format(args.color)
if args.download_data == True:
trainX, trainy, testX, testy = load_dataset()
os.makedirs(data_root, exist_ok = True)
prep_pixels_h5(trainX, trainy, testX, testy, data_root, args.color)
args.download_data = False
if args.download_data == False:
trainX,trainy,testX,testy = load_dataset_h5(data_root)
input_layer = tf.keras.layers.Input((128, 128, 3))
model = bl_net(input_layer, classes=10, cumulative_readout=False)
if args.pretrained:
model.load_weights('bl_imagenet.h5',skip_mismatch=True,by_name=True)
## Lets try fine tuning it
# tf.keras.utils.plot_model(model,to_file='check.png')
skip_layers = ['ReadoutDense','Sotfmax_Time_0','Sotfmax_Time_1',
'Sotfmax_Time_2','Sotfmax_Time_3','Sotfmax_Time_4',
'Sotfmax_Time_5','Sotfmax_Time_6','Sotfmax_Time_7']
for layer in model.layers:
if layer.name in skip_layers:
layer.trainable = True
else:
layer.trainable = False
# compile model with optimizer and loss
"""
B, BL and parameter-matched controls (B-K, B-F and B-D) were trained for a total of 90 epochs
with a batch size of 100. B-U was trained using the same procedure but with a batch size of 64
due to its substantially larger number of parameters.
The cross-entropy between the softmax of the network category readout and the labels
was used as the training loss. For networks with multiple readouts (BL and B-U),
we calculate the cross-entropy at each readout and average this across readouts.
Adam [64] was used for optimisation with a learning rate of 0.005 and epsilon parameter 0.1.
L2-regularisation was applied throughout training with a coefficient of 10−6.
"""
cce = tf.keras.losses.CategoricalCrossentropy()
opt = tf.keras.optimizers.Adam(learning_rate=0.005)
model.compile(optimizer=opt,loss='categorical_crossentropy',metrics=['accuracy'])
from tensorflow.keras.callbacks import ModelCheckpoint
checkpoint = ModelCheckpoint("pretrained_mp_{}.hdf5".format(args.tag), monitor='loss', verbose=1,
save_best_only=True, mode='auto', period=1)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(preprocessing_function=all_random_noise)
# trainy = np.transpose(trainy, (1,2,0))
# testy = np.transpose(testy, (1,2,0))
print(trainX.shape)
print(trainy.shape)
history = model.fit(x=datagen.flow(trainX, trainy[0],batch_size=32),
validation_data=(testX,testy[0]),
steps_per_epoch=len(trainX)//32,
epochs=100,callbacks=[checkpoint])
model.save('./model/{}_{}'.format(
args.tag,
time.strftime('%Y.%m.%d_%H.%M.%S'),
))
| 2.359375 | 2 |
calibration_tool/observer/base_observer.py | liwen-deepmotion/map_based_lidar_camera_calibration_tool | 26 | 12761443 | <filename>calibration_tool/observer/base_observer.py
# Author: <NAME> (<EMAIL>)
from typing import Iterable, Tuple, Any, Callable, TYPE_CHECKING
if TYPE_CHECKING:
from map_based_calibrator import MapBasedCalibrator
class BaseObserver(object):
"""
This is a base class for all observer classes implemented in the project.
It provides common convenient interfaces for subclasses to
(1) Reference the Calibrator object `self.editor`;
(2) Access the Qt encapsulated interactor, renderer, and an update call to
trigger repaint;
(3) Register itself to the Qt signals;
"""
def __init__(self, editor: 'MapBasedCalibrator'):
self.editor = editor
self._is_activated = True
self.QT_EVENT_CALLBACK_PRIORITY_TUPLES = [
] # type: Iterable[Tuple[int, Callable, int]]
self.QT_SIGNAL_CALLBACK_TUPLES = [
] # type: Iterable[Tuple[Any, Callable]]
@property
def renderer(self):
return self.editor.main_canvas_widget.renderer()
def update(self):
self.editor.main_canvas_widget.custom_update()
def invoke_event(self, event: int, *args, **kwargs):
self.editor.event_handler.invoke_event(event, *args, **kwargs)
def connect_myself_to_qt_signals(self):
for signal, callback in self.QT_SIGNAL_CALLBACK_TUPLES:
signal.connect(callback)
def connect_myself_to_vtk_events(self):
for event, callback, priority in \
self.QT_EVENT_CALLBACK_PRIORITY_TUPLES:
self.editor.event_handler.add_observer(
event, callback, priority)
def is_activated(self):
return self._is_activated
def activate(self):
self._is_activated = True
def deactivate(self):
self._is_activated = False
| 2.078125 | 2 |
komax_app/migrations/0013_auto_20200323_1556.py | UsernameForGerman/PrettlNKKomax | 0 | 12761444 | <gh_stars>0
# Generated by Django 2.2.7 on 2020-03-23 15:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('komax_app', '0012_auto_20200323_1254'),
]
operations = [
migrations.AlterField(
model_name='taskpersonal',
name='tube_len_1',
field=models.CharField(max_length=256, null=True),
),
migrations.AlterField(
model_name='taskpersonal',
name='tube_len_2',
field=models.CharField(max_length=256, null=True),
),
]
| 1.5 | 2 |
intent_classifier/test_intent.py | MartinoMensio/botcycle-nlu | 1 | 12761445 | import spacy
import sys
import numpy as np
import operator
from keras.models import load_model
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import preprocess_data
MAX_SEQUENCE_LENGTH = 100
EMBEDDING_DIM = 300
model = load_model('models/bidirectional_lstm/model.h5')
nlp = spacy.load('en')
print('Test your sentences.')
print('> ', end='', flush=True)
intents = preprocess_data.load_intents()
for line in sys.stdin:
doc = nlp(line)
embedding_matrix = np.zeros((1, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM))
for index, word in enumerate(doc):
embedding_matrix[0][index] = word.vector
prediction = model.predict(embedding_matrix)
scores = {}
for (x, y), score in np.ndenumerate(prediction):
scores[intents[y]] = score
sorted_scores = sorted(scores.items(), key=operator.itemgetter(1), reverse=True)
print(sorted_scores)
print('> ', end='', flush=True) | 2.609375 | 3 |
code/config_files/examples/view_grasp.py | Ric-27/Task-Oriented-Grasp-Analysis | 0 | 12761446 | import argparse
import numpy as np
import os, sys
import keyboard
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from functions import (
assert_OBJ_exist_if_GRP_exist,
get_OBJECT_dict,
is_obj_grp_OBJ_GRP,
partition_str,
object_file_name,
print_if_worked,
)
parser = argparse.ArgumentParser(
description="view the grasps saved on the predetermined file"
)
parser.add_argument(
"-o",
"--object",
type=str,
default="",
help="select an object [def: all]",
)
parser.add_argument(
"-g",
"--grasp",
type=str,
default="",
help="select a grasp of an object [def: all]",
)
parser.add_argument(
"-gi",
"--grasp_info",
type=bool,
default=False,
help="print Grasp Info [def: False]",
)
args = parser.parse_args()
OBJ = args.object
GRP = args.grasp
assert_OBJ_exist_if_GRP_exist(OBJ, GRP)
objects = get_OBJECT_dict()
worked = False
for obj1, mesh in objects["meshes"].items():
for grp1, grasp in objects["grasps"].items():
obj, grp = partition_str(grp1)
if obj != obj1:
continue
if is_obj_grp_OBJ_GRP(OBJ, GRP, obj, grp):
worked = True
if args.grasp_info or True:
print("Gt \n", grasp.Gt.round(3))
grasp.get_classification(True)
mesh.view(grp1, grasp.contact_points)
print_if_worked(
worked,
"Finished" + 50 * " ",
"No objects and/or grasps declared on "
+ object_file_name()
+ ".yaml or object and/or grasp passed as argument doesn't exists",
)
| 2.828125 | 3 |
migrations/versions/schema/dc089ecc2c38_.py | Georgi2704/pricelist-fastapi-boilerplate | 0 | 12761447 | """empty message
Revision ID: dc089ecc2c38
Revises: <PASSWORD>
Create Date: 2019-09-25 00:46:18.814508
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "dc089ecc2c38"
down_revision = "<PASSWORD>"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_shops_name", table_name="shops")
op.create_index(op.f("ix_shops_name"), "shops", ["name"], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_shops_name"), table_name="shops")
op.create_index("ix_shops_name", "shops", ["name"], unique=True)
# ### end Alembic commands ###
| 1.078125 | 1 |
src/data/split.py | dankiy/mm-ml-2021 | 0 | 12761448 | import numpy as np
def train_test(
X: np.ndarray,
y: np.ndarray,
test_size: float,
random_seed: int = 0,
) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Split input data randomly after shuffling
Args:
X (np.ndarray): decision matrix
y (np.ndarray): ground-truth labels
test_size (float): fraction of test split
random_seed (int): number to initialize a pseudorandom number generator
Returns:
tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: X_train, y_train,
X_test, y_test
"""
np.random.seed(random_seed)
num_samples = X.shape[0]
num_train_samples = int(num_samples * (1 - test_size))
permuted_ids = np.random.permutation(np.arange(num_samples))
train_ids = permuted_ids[:num_train_samples]
test_ids = permuted_ids[num_train_samples:]
X_test = X[test_ids]
X_train = X[train_ids]
y_test = y[test_ids]
y_train = y[train_ids]
return X_train, y_train, X_test, y_test
class _KFoldIterator:
def __init__(self, kfold):
self._kfold = kfold
self._counter = 0
def __next__(self):
if self._counter < self._kfold.num_folds:
item = self._kfold.__getitem__(self._counter)
self._counter += 1
return item
else:
raise StopIteration
class KFold:
"""Iterable cross-validation object
Args:
X (np.ndarray): samples decision matrix
y (np.ndarray): samples ground-truth value
num_folds (int): number of cross-validation folds
random_seed (int): value for numpy random number generator initialization
Methods:
__getitem__(key): returns X_train, y_train, X_test, y_test
"""
def __init__(self, X: np.ndarray, y: np.ndarray, num_folds: int, random_seed: int):
self.num_samples = X.shape[0]
self.num_folds = num_folds
np.random.seed(random_seed)
permuted_ids = np.random.permutation(np.arange(self.num_samples))
self.X = X[permuted_ids]
self.y = y[permuted_ids]
def __getitem__(self, key: int):
assert key < self.num_folds, "Key must be lower than number of folds"
assert key >= 0, "Key must be not negative"
test_start_id = int(key * self.num_samples / self.num_folds)
test_end_id = int((key + 1) * self.num_samples / self.num_folds)
X_test = self.X[test_start_id: test_end_id]
X_train = np.concatenate([
self.X[: test_start_id],
self.X[test_end_id:],
],
axis=0,
)
y_test = self.y[test_start_id: test_end_id]
y_train = np.concatenate([
self.y[: test_start_id],
self.y[test_end_id:],
],
axis=0,
)
return X_train, y_train, X_test, y_test
def __iter__(self):
return _KFoldIterator(self)
def cross_val(
X: np.ndarray,
y: np.ndarray,
num_folds: int,
random_seed: int = 0,
) -> KFold:
"""
Make cross-validation split randomly after shuffling
Args:
X (np.ndarray): decision matrix
y (np.ndarray): ground-truth labels
num_folds (int): number of train/test folds
random_seed (int): number to initialize a pseudorandom number generator
Returns:
KFold: object containing data with __getitem__ method for getting splits
"""
kfold = KFold(X, y, num_folds, random_seed)
return kfold
| 2.890625 | 3 |
1 ano/logica-de-programacao/listas/nome-telefone.py | Biguelini/Atividades-Tecnico-em-Informatica | 4 | 12761449 | <filename>1 ano/logica-de-programacao/listas/nome-telefone.py
nomes = [0,0,0,0,0,0,0,0,0,0]
telefone = [0,0,0,0,0,0,0,0,0,0]
i = 0
while(i < 2):
nomes[i]=input("Digite um nome... ")
telefone[i]=input("Digite o telefone respectivo ")
i+=1
i=nomes.index(input("Qual nome referente ao número desejado? "))
print(f"O número de {nomes[i]} é {telefone[i]}") | 3.671875 | 4 |
csv_parser.py | mecroby/test_pi_learning | 0 | 12761450 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 18 21:10:52 2017
@author: roby
"""
#data parsing di file csv con valori errati
import dateutil.parser
import csv
def parse_row(input_row, parsers):
#return [parser(value) if parser is not None else value for value, parser in zip(input_row,parsers)]
return [try_or_none(parser)(value) if parser is not None else value for value, parser in zip(input_row,parsers)]
def parse_rows_with(reader,parsers):
for row in reader:
yield parse_row(row,parsers)
def try_or_none(f):
def f_or_none(x):
try: return f(x)
except: return None
return f_or_none
data=[]
with open("data_bad.txt","rb") as f:
reader=csv.reader(f)
for line in parse_rows_with(reader,[dateutil.parser.parse, None, float]):
data.append(line)
#stampo solo la riga che contiene un valore none ==>non è stato parserizzato
for row in data:
if any(x is None for x in row):
print row
def try_parse_field(field_name,value,parser_dict):
parser=parser_dict.get(field_name)
if parser is not None:
return try_or_none(parser)(value)
else:
return value
def parse_dict(input_dict,parser_dict):
return {field_name:try_parse_field(field_name,value.parser_dict)
for field_name, value in input_dict.iteritems()}
| 3.171875 | 3 |