max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
orderbook_tests.py | valmac/HFT-Orderbook | 1 | 12760151 | <gh_stars>1-10
# Import Built-Ins
import logging
from unittest import TestCase
# Import Third-Party
# Import Homebrew
from lob import LimitOrderBook, Order
# Init Logging Facilities
log = logging.getLogger(__name__)
class OrderTests(TestCase):
def test_adding_a_new_order_works(self):
lob = LimitOrderBook()
bid_order = Order(uid=1, is_bid=True, size=5, price=100)
ask_order = Order(uid=2, is_bid=False, size=5, price=200)
lob.process(bid_order)
lob.process(ask_order)
self.assertEqual(lob.best_ask.price, 200)
self.assertEqual(lob.best_bid.price, 100)
self.assertEqual(lob.best_bid.volume, 500)
# Assert that the best bid (bid_order) has no previous and no next item,
# since it is the only one in the book on the bid size at the moment.
self.assertEqual(len(lob.best_bid), 1)
self.assertEqual(len(lob.best_ask), 1)
self.assertIsNone(bid_order.next_item)
self.assertIsNone(bid_order.previous_item)
self.assertEqual(lob.best_bid.orders.head, bid_order)
self.assertEqual(lob.best_ask.orders.head, ask_order)
self.assertIn(1, lob._orders)
# Assert that updating an order works
updated_bid_order = Order(uid=1, is_bid=True, size=4, price=100, timestamp=bid_order.timestamp)
lob.process(updated_bid_order)
self.assertEqual(lob.best_bid.orders.head.size, 4)
self.assertEqual(lob.best_bid.volume, 400)
updated_ask_order = Order(uid=2, is_bid=True, size=4, price=200, timestamp=ask_order.timestamp)
lob.process(updated_ask_order)
self.assertEqual(lob.best_ask.orders.head.size, 4)
self.assertEqual(lob.best_ask.volume, 800)
# Assert that adding an additional order to a limit level updates the
# doubly linked list correctly
bid_order_2 = Order(uid=3, is_bid=True, size=5, price=100)
lob.process(bid_order_2)
self.assertEqual(lob.best_bid.orders.head.next_item, bid_order_2)
self.assertEqual(lob.best_bid.orders.tail, bid_order_2)
self.assertEqual(len(lob.best_bid), 2)
def test_removing_orders_works(self):
lob = LimitOrderBook()
bid_order = Order(uid=1, is_bid=True, size=5, price=100)
bid_order_2 = Order(uid=2, is_bid=True, size=10, price=100)
ask_order = Order(uid=3, is_bid=False, size=10, price=200)
ask_order_2 = Order(uid=4, is_bid=False, size=10, price=200)
lob.process(bid_order)
lob.process(bid_order_2)
lob.process(ask_order)
lob.process(ask_order_2)
# Assert that removing an order from a limit level with several
# orders resets the tail, head and previous / next items accordingly
removed_bid_order = Order(uid=1, is_bid=True, size=0, price=100)
self.assertEqual(len(lob.best_bid), 2)
self.assertEqual(lob.best_bid.orders.head, bid_order)
self.assertEqual(lob.best_bid.orders.tail, bid_order_2)
lob.process(removed_bid_order)
self.assertEqual(len(lob.best_bid), 1)
self.assertEqual(lob.best_bid.orders.head, bid_order_2)
self.assertEqual(lob.best_bid.orders.tail, bid_order_2)
self.assertIsNone(lob.best_bid.orders.head.next_item)
self.assertIsNone(lob.best_bid.orders.head.previous_item)
self.assertNotIn(removed_bid_order.uid, lob._orders)
self.assertIn(removed_bid_order.price, lob._price_levels)
# Assert that removing the last Order in a price level removes its
# limit Level accordingly
removed_bid_order_2 = Order(uid=2, is_bid=True, size=0, price=100)
lob.process(removed_bid_order_2)
self.assertIsNone(lob.best_bid)
self.assertNotIn(removed_bid_order_2.uid, lob._orders)
self.assertNotIn(removed_bid_order_2.price, lob._price_levels)
| 2.65625 | 3 |
treex/module.py | msaroufim/treex | 0 | 12760152 | <filename>treex/module.py<gh_stars>0
import typing as tp
import jax
import jax.numpy as jnp
import jax.tree_util
import numpy as np
from treex import types
from treex.tree_object import TreeObject, module_map
A = tp.TypeVar("A")
B = tp.TypeVar("B")
M = tp.TypeVar("M", bound="Module")
class Module(TreeObject):
_training: bool
_initialized: bool
def __init__(self) -> None:
self._training = True
self._initialized = False
super().__init__()
@property
def initialized(self) -> bool:
return self._initialized
@property
def training(self) -> bool:
return self._training
def init(self: M, key: tp.Union[int, jnp.ndarray], inplace: bool = False) -> M:
"""
Creates a new module with the same structure, but with its fields initialized given a seed `key`. The following
procedure is used:
1. The input `key` is split and iteratively updated before passing a derived value to any
process that requires initialization.
2. `Initializer`s are called and applied to the module first.
3. `TreeObject.module_init` methods are called last.
Arguments:
key: The seed to use for initialization.
Returns:
The new module with the fields initialized.
"""
if isinstance(key, int):
key = jax.random.PRNGKey(key)
def next_key() -> jnp.ndarray:
nonlocal key
assert isinstance(key, jnp.ndarray)
next_key, key = jax.random.split(key)
return next_key
def call_module_init(module: TreeObject) -> TreeObject:
if isinstance(module, Module) and not module._initialized:
module.module_init(next_key())
module._initialized = True
return module
module = jax.tree_map(
lambda initializer: (
initializer(next_key())
if isinstance(initializer, types.Initializer)
else initializer
),
self,
)
if inplace:
# here we update initialized fields by the above tree_map
self.update(module, inplace=True)
# now call module_init inplace
return module_map(call_module_init, self, inplace=True)
else:
return module_map(call_module_init, module, inplace=False)
def train(self: M, mode: bool = True, inplace: bool = False) -> M:
"""
Creates a new module with the same structure, but with `TreeObject.training` set to the given value.
Arguments:
mode: The new training mode.
Returns:
The new module in with the training mode is set to the given value.
"""
def set_training(module: TreeObject) -> TreeObject:
if isinstance(module, Module):
module._training = mode
return module
return module_map(set_training, self, inplace=inplace)
def eval(self: M, inplace: bool = False) -> M:
"""
Creates a new module with the training mode set to False, equivalent to calling `train(False)`.
Returns:
The new module with the training mode set to False.
"""
return self.train(False, inplace=inplace)
| 2.4375 | 2 |
test.py | gnudeb/ludere | 0 | 12760153 | from dataclasses import dataclass
from ludere.core import Ludere
l = Ludere()
Register = l.register
@Register
@dataclass
class Config:
x: int = 5
def __post_init__(self):
print("Config got instantiated!")
@Register
@dataclass
class App:
config: Config
def __post_init__(self):
print(self.config.x)
if __name__ == '__main__':
l.run()
| 2.4375 | 2 |
setup.py | jee67/evohome-client | 0 | 12760154 | from setuptools import setup
setup(
name = 'evohomeclient',
version = '0.2.8',
description = 'Python client for connecting to the Evohome webservice',
url = 'https://github.com/watchforstock/evohome-client/',
download_url = 'https://github.com/watchforstock/evohome-client/tarball/0.2.8',
author = '<NAME>',
author_email = '<EMAIL>',
license = 'Apache 2',
classifiers = [
'Development Status :: 3 - Alpha',
],
keywords = ['evohome'],
packages = ['evohomeclient', 'evohomeclient2'],
install_requires = ['requests']
)
| 1.328125 | 1 |
spiderss.py | Deleh/spiderss | 0 | 12760155 | <reponame>Deleh/spiderss<filename>spiderss.py
#!/usr/bin/env python
import argparse
import feedparser
import html2text
import os
import re
import requests
import subprocess
import sys
import toml
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
from readability import Document
from time import mktime
from urllib.parse import urlsplit, urlunsplit
"""
Output functions
"""
# Print log message
def log(text, force=False):
if verbose or force:
print("{} | {}".format(datetime.now().strftime("%d.%m %H:%M"), text))
# Print error message and exit
def error(text):
print("{} E {}".format(datetime.now().strftime("%d.%m %H:%M"), text))
# Print spiderss logo
def print_logo():
logo = """
;:
.N' ,K:
,O .0MWx' lk 0;
,kl;';O lx. .xc :k, .kMMXl.c .:x. .xl .dl :kl,'oo .xd:,,O.
kNOokOWc ;WMKccXMX .WMX. :XM,,OMMl oXMcNMd 'oMMk:XW:.OWddO0N, cKNlkONk
MMo 0c KMK :MM' XMO oMM. MMl cMM. ON: .MMl 'MM, .K' OMX oO
WMWxOWMN: KMK ;MM' XMO oMM. MMl cMM:c; .MMl .WMXx0MMX. xMMOkNMWk
'X; .MMo KMK ;MM' XMO oMM. MMl cMM, .MMl :X. ;MM, .0d 0MX
.Kdc:'MMo.oNMNl;lMW. .WM0 KMMk:'MMl dMM: .. cMMk.' ,Xlc;cMM, xOl:'KMX
;kddkNKl. XMNkWk, :N0; .'cOW0c. ,lOW0; .:0Nl. okddOW0:. .kdoxXNd,
WMX
;..cc
"""
print(logo)
"""
Utility functions
"""
# Get articles of a feed
def get_articles(feed):
feed = feedparser.parse(feed["url"])
return feed.entries
# Write text to file
def write_to_file(filepath, text):
file = open(filepath, "w")
file.write(text)
file.close()
# Get filename postfix from a title
def get_filename_postfix(title):
# Get title as lowercase words concatenated with underscores
title = re.sub("[^A-Za-z0-9 ]+", "", title.lower())
title = re.sub(" ", "_", title)
return "{}.{}".format(title, fileending)
# Get HTML image snippet from the first image url in a text
def get_image_snippet(text):
try:
image_url = re.search(
"(?P<image>https?://\S+(\.png|\.jpg|\.jpeg))", text, re.IGNORECASE
).group("image")
return '<img src="{}" alt="Image">\n\n'.format(image_url)
except Exception:
return ""
# Get HTML summary snippet from a HTML text
def get_summary_snippet(text):
try:
h = html2text.HTML2Text()
h.unicode_snob = True
h.ignore_links = True
h.ignore_images = True
h.body_width = 0
summary = h.handle(text).split("\n\n")[0].strip()
return "<p><b>{}</b></p>\n\n".format(summary)
except Exception:
return ""
# Get article body either from web or its content
def get_article_body(article, feed):
body = ""
# If scrape, get article with readability
if feed["scrape"]:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36"
}
response = requests.get(article.link, headers=headers)
doc = Document(response.text)
body = doc.summary()
# Else construct from article object
else:
# Add all content to body
if hasattr(article, "content"):
for c in article.content:
if c.type == "text/html" or c.type == "text/plain":
body += c.value
# Use summary as fallback
elif hasattr(article, "summary"):
body += article.summary
# Replace relative links with absolute ones, using beautifulsoup
try:
splitted_url = urlsplit(article.link)
except Exception:
splitted_url = urlsplit(feed["url"])
soup = BeautifulSoup(body, features="lxml")
for img in soup.find_all("img", src=True):
src = img.get("src")
splitted_src = urlsplit(src)
constructed_src = [
splitted_src.scheme,
splitted_src.netloc,
splitted_src.path,
splitted_src.query,
splitted_src.fragment,
]
if constructed_src[0] == "":
constructed_src[0] = splitted_url.scheme
if constructed_src[1] == "":
constructed_src[1] = splitted_url.netloc
new_src = urlunsplit(constructed_src)
if new_src.startswith("http"):
body = body.replace('"{}"'.format(src), '"{}"'.format(new_src), 1)
for a in soup.find_all("a", href=True):
href = a.get("href")
splitted_href = urlsplit(href)
constructed_href = [
splitted_href.scheme,
splitted_href.netloc,
splitted_href.path,
splitted_href.query,
splitted_href.fragment,
]
if constructed_href[0] == "":
constructed_href[0] = splitted_url.scheme
if constructed_href[1] == "":
constructed_href[1] = splitted_url.netloc
new_href = urlunsplit(constructed_href)
if new_href.startswith("http"):
body = body.replace('"{}"'.format(href), '"{}"'.format(new_href), 1)
return body
# Postprocess HTML
def postprocess(text):
try:
processor = subprocess.Popen(
postprocessor.split(" "),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(output, err) = processor.communicate(input=text.encode())
if err:
raise Exception(err.decode().strip())
except Exception as e:
error(" while postprocessing: {}".format(e))
sys.exit(1)
return output.decode().strip()
# Get constructed article
def get_article(article, feed):
# Get body of article
body = get_article_body(article, feed)
# Construct head of article
image = get_image_snippet(str(article))
if image == "":
image = get_image_snippet(body)
summary = get_summary_snippet(article.summary)
if summary == "":
summary = get_summary_snippet(body)
try:
date = datetime.fromtimestamp(mktime(article.published_parsed)).strftime(
datetime_format
)
except Exception:
date = datetime.now().strftime(datetime_format)
try:
link = article.link
except Exception:
splitted_url = urlsplit(feed["url"])
splitted_link = [splitted_url.scheme, splitted_url.netloc, "", "", ""]
link = urlunsplit(splitted_link)
head = "<h1>{}</h1>\n\n{}{}<p>{} - <a href={}>Link</a></p>".format(
article.title, image, summary, date, link
)
# Postprocess article
article_text = postprocess("{}\n\n<hr>\n\n{}".format(head, body)).strip()
return article_text
# Update feed
def update_feed(feed):
log(' updating feed "{}"'.format(feed["name"]))
# Set feedpaths
feedpath_new = os.path.join(base_directory, feed["category"], feed["name"], "new")
feedpath_read = os.path.join(base_directory, feed["category"], feed["name"], "read")
if not os.path.exists(feedpath_new):
os.makedirs(feedpath_new)
if not os.path.exists(feedpath_read):
os.makedirs(feedpath_read)
# Get exisiting articles
existing_articles = (
os.listdir(feedpath_new) + os.listdir(feedpath_read) + os.listdir(lovedpath)
)
# Update articles
articles = get_articles(feed)
threshold_date = datetime.now() - timedelta(days=max_age)
if len(articles) == 0:
error('no articles received from feed "{}"'.format(feed["name"]))
for a in articles:
try:
# Set fallback if no parseable date found
fallback = False
try:
date = datetime.fromtimestamp(mktime(a.published_parsed))
except Exception:
date = datetime.now()
fallback = True
if date > threshold_date:
# Check if article should be filtered
filter = False
for f in filters:
if re.search(f, a.title.lower()):
filter = True
log(' filtered article "{}"'.format(a.title))
if not filter:
# Construct filename
filename_prefix = date.strftime("%Y%m%d%H%M")
filename_postfix = get_filename_postfix(a.title)
filename = "{}_{}".format(filename_prefix, filename_postfix)
# Check if article exists
article_exists = False
if fallback:
existing_articles_fallback = [a[13:] for a in existing_articles]
if filename_postfix in existing_articles_fallback:
article_exists = True
elif filename in existing_articles:
article_exists = True
if not article_exists:
text = get_article(a, feed)
write_to_file(os.path.join(feedpath_new, filename), text)
log(' added article "{}"'.format(a.title))
except Exception as e:
error(
'while parsing article "{}" from feed "{}": {}'.format(
a.title, feed["name"], e
)
)
# Delete articles older than max_age
def remove_old_articles():
threshold_date = datetime.now() - timedelta(days=max_age)
count = 0
for subdir, dirs, files in os.walk(base_directory):
# Skip 'loved' directory
if not os.path.join(base_directory, "loved") in subdir:
for file in files:
date = datetime.strptime(file[:12], "%Y%m%d%H%M")
if threshold_date > date:
os.remove(os.path.join(subdir, file))
count += 1
log(" removed {} articles".format(count))
# Parse config file
def load_config(filepath):
global base_directory, max_age, datetime_format, postprocessor, fileending, filters, feeds
try:
config = toml.load(filepath)
base_directory = config["base_directory"]
max_age = config["max_age"]
datetime_format = config["datetime_format"]
postprocessor = config["postprocessor"]
fileending = config["fileending"]
filters = config["filters"]
feeds = config["feed"]
except Exception as e:
error("while parsing config: {}".format(e))
sys.exit(1)
# Initialize spiderss
def initialize():
global lovedpath
# Create 'loved' directory if not existent
lovedpath = os.path.join(base_directory, "loved")
if not os.path.exists(lovedpath):
os.makedirs(lovedpath)
# Update all feeds and remove old articles
def crawl():
log("crawling feeds", True)
for feed in feeds:
update_feed(feed)
log("removing old articles", True)
remove_old_articles()
"""
Main
"""
def main():
global verbose
# Initialize parser
parser = argparse.ArgumentParser(
description="Crawl RSS feeds and store articles as Markdown files."
)
parser.add_argument("-v", "--verbose", action="store_true", help="verbose output")
parser.add_argument(
"-c",
"--config",
default="./config.toml",
help="config file (default: ./config.toml)",
)
# Get args
args = parser.parse_args()
verbose = args.verbose
config = args.config
# Main routine
print_logo()
load_config(config)
initialize()
crawl()
if __name__ == "__main__":
main()
| 2.53125 | 3 |
examples/transformer/transformer_main.py | lunayach/texar-pytorch | 0 | 12760156 | # Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer model.
"""
import argparse
import functools
import importlib
import os
import pickle
import random
import torch
from tqdm import tqdm
from torchtext import data
import texar as tx
from texar.modules import Transformer
from bleu_tool import bleu_wrapper
from utils import data_utils, utils
from utils.preprocess import eos_token_id
parser = argparse.ArgumentParser()
parser.add_argument("--config_model",
type=str,
default="config_model",
help="The model config.")
parser.add_argument("--config_data",
type=str,
default="config_iwslt15",
help="The dataset config.")
parser.add_argument("--run_mode",
type=str,
default="train_and_evaluate",
help="Either train_and_evaluate or test.")
parser.add_argument("--model_dir",
type=str,
default="./outputs/",
help="Path to save the trained model and logs.")
parser.add_argument("--model_fn",
type=str,
default="best-model.ckpt",
help="Model filename to save the trained weights")
args = parser.parse_args()
config_model = importlib.import_module(args.config_model)
config_data = importlib.import_module(args.config_data)
utils.set_random_seed(config_model.random_seed)
def main():
"""Entry point.
"""
# Load data
train_data, dev_data, test_data = data_utils.load_data_numpy(
config_data.input_dir, config_data.filename_prefix)
with open(config_data.vocab_file, 'rb') as f:
id2w = pickle.load(f)
beam_width = getattr(config_model, "beam_width", 1)
# Create logging
tx.utils.maybe_create_dir(args.model_dir)
logging_file = os.path.join(args.model_dir, 'logging.txt')
logger = utils.get_logger(logging_file)
print(f"logging file is saved in: {logging_file}")
model = Transformer(config_model, config_data)
if torch.cuda.is_available():
model = model.cuda()
device = torch.cuda.current_device()
else:
device = None
best_results = {'score': 0, 'epoch': -1}
lr_config = config_model.lr_config
if lr_config["learning_rate_schedule"] == "static":
init_lr = lr_config["static_lr"]
scheduler_lambda = lambda x: 1.0
else:
init_lr = lr_config["lr_constant"]
scheduler_lambda = functools.partial(
utils.get_lr_multiplier, warmup_steps=lr_config["warmup_steps"])
optim = torch.optim.Adam(
model.parameters(), lr=init_lr, betas=(0.9, 0.997), eps=1e-9)
scheduler = torch.optim.lr_scheduler.LambdaLR(optim, scheduler_lambda)
def _eval_epoch(epoch, mode):
torch.cuda.empty_cache()
if mode == 'eval':
eval_data = dev_data
elif mode == 'test':
eval_data = test_data
else:
raise ValueError("`mode` should be either \"eval\" or \"test\".")
references, hypotheses = [], []
bsize = config_data.test_batch_size
for i in tqdm(range(0, len(eval_data), bsize)):
sources, targets = zip(*eval_data[i:i + bsize])
with torch.no_grad():
x_block = data_utils.source_pad_concat_convert(
sources, device=device)
predictions = model(
encoder_input=x_block,
is_train_mode=False,
beam_width=beam_width)
if beam_width == 1:
decoded_ids = predictions[0].sample_id
else:
decoded_ids = predictions["sample_id"][:, :, 0]
hypotheses.extend(h.tolist() for h in decoded_ids)
references.extend(r.tolist() for r in targets)
hypotheses = utils.list_strip_eos(hypotheses, eos_token_id)
references = utils.list_strip_eos(references, eos_token_id)
if mode == 'eval':
# Writes results to files to evaluate BLEU
# For 'eval' mode, the BLEU is based on token ids (rather than
# text tokens) and serves only as a surrogate metric to monitor
# the training process
# TODO: Use texar.evals.bleu
fname = os.path.join(args.model_dir, 'tmp.eval')
hwords, rwords = [], []
for hyp, ref in zip(hypotheses, references):
hwords.append([str(y) for y in hyp])
rwords.append([str(y) for y in ref])
hwords = tx.utils.str_join(hwords)
rwords = tx.utils.str_join(rwords)
hyp_fn, ref_fn = tx.utils.write_paired_text(
hwords, rwords, fname, mode='s',
src_fname_suffix='hyp', tgt_fname_suffix='ref')
eval_bleu = bleu_wrapper(ref_fn, hyp_fn, case_sensitive=True)
eval_bleu = 100. * eval_bleu
logger.info("epoch: %d, eval_bleu %.4f", epoch, eval_bleu)
print(f"epoch: {epoch:d}, eval_bleu {eval_bleu:.4f}")
if eval_bleu > best_results['score']:
logger.info("epoch: %d, best bleu: %.4f", epoch, eval_bleu)
best_results['score'] = eval_bleu
best_results['epoch'] = epoch
model_path = os.path.join(args.model_dir, args.model_fn)
logger.info("Saving model to %s", model_path)
print(f"Saving model to {model_path}")
states = {
'model': model.state_dict(),
'optimizer': optim.state_dict(),
'scheduler': scheduler.state_dict(),
}
torch.save(states, model_path)
elif mode == 'test':
# For 'test' mode, together with the cmds in README.md, BLEU
# is evaluated based on text tokens, which is the standard metric.
fname = os.path.join(args.model_dir, 'test.output')
hwords, rwords = [], []
for hyp, ref in zip(hypotheses, references):
hwords.append([id2w[y] for y in hyp])
rwords.append([id2w[y] for y in ref])
hwords = tx.utils.str_join(hwords)
rwords = tx.utils.str_join(rwords)
hyp_fn, ref_fn = tx.utils.write_paired_text(
hwords, rwords, fname, mode='s',
src_fname_suffix='hyp', tgt_fname_suffix='ref')
logger.info("Test output written to file: %s", hyp_fn)
print(f"Test output written to file: {hyp_fn}")
def _train_epoch(epoch: int):
torch.cuda.empty_cache()
random.shuffle(train_data)
train_iter = data.iterator.pool(
train_data,
config_data.batch_size,
key=lambda x: (len(x[0]), len(x[1])),
# key is not used if sort_within_batch is False by default
batch_size_fn=utils.batch_size_fn,
random_shuffler=data.iterator.RandomShuffler())
for _, train_batch in tqdm(enumerate(train_iter)):
optim.zero_grad()
in_arrays = data_utils.seq2seq_pad_concat_convert(
train_batch, device=device)
loss = model(
encoder_input=in_arrays[0],
is_train_mode=True,
decoder_input=in_arrays[1],
labels=in_arrays[2],
)
loss.backward()
optim.step()
scheduler.step()
step = scheduler.last_epoch
if step % config_data.display_steps == 0:
logger.info('step: %d, loss: %.4f', step, loss)
lr = optim.param_groups[0]['lr']
print(f"lr: {lr} step: {step}, loss: {loss:.4}")
if step and step % config_data.eval_steps == 0:
_eval_epoch(epoch, mode='eval')
if args.run_mode == 'train_and_evaluate':
logger.info("Begin running with train_and_evaluate mode")
model_path = os.path.join(args.model_dir, args.model_fn)
if os.path.exists(model_path):
logger.info("Restore latest checkpoint in", model_path)
ckpt = torch.load(model_path)
model.load_state_dict(ckpt['model'])
optim.load_state_dict(ckpt['optimizer'])
scheduler.load_state_dict(ckpt['scheduler'])
_eval_epoch(0, mode='test')
for epoch in range(config_data.max_train_epoch):
_train_epoch(epoch)
_eval_epoch(epoch, mode='eval')
elif args.run_mode == 'eval':
logger.info("Begin running with evaluate mode")
model_path = os.path.join(args.model_dir, args.model_fn)
logger.info("Restore latest checkpoint in %s", model_path)
ckpt = torch.load(model_path)
model.load_state_dict(ckpt['model'])
_eval_epoch(0, mode='eval')
elif args.run_mode == 'test':
logger.info("Begin running with test mode")
model_path = os.path.join(args.model_dir, args.model_fn)
logger.info("Restore latest checkpoint in", model_path)
ckpt = torch.load(model_path)
model.load_state_dict(ckpt['model'])
_eval_epoch(0, mode='test')
else:
raise ValueError(f"Unknown mode: {args.run_mode}")
if __name__ == '__main__':
main()
| 1.828125 | 2 |
utils/encoders.py | rezer0dai/bnpo | 0 | 12760157 | <filename>utils/encoders.py
import abc
import numpy as np
import torch
#from utils.rbf import *
from utils.normalizer import *
class IEncoder(nn.Module):
def __init__(self, size_in, size_out, n_features):
super().__init__()
self.size_in = size_in
self.size_out = size_out
self.n_features = n_features
@abc.abstractmethod
def forward(self, states, memory):
pass
def out_size(self):
return self.size_out
def in_size(self):
return self.size_in
def features_n(self):
return self.n_features
def has_features(self):
return False
def extract_features(self, states):
feats = torch.zeros( # states can come in group, but features is per state not group
states.view(-1, self.size_in).size(0), self.n_features).reshape(
states.size(0), -1).to(states.device)
return self.forward(states, feats)
# better to rethink design of this ~ beacuse of RNN ~ features, multiple ? dont over-engineer though...
class StackedEncoder(IEncoder):
def __init__(self, encoder_a, encoder_b):
super().__init__(size_in=encoder_a.in_size(), size_out=encoder_b.out_size(), n_features=encoder_b.features_n())
self.encoder_a = encoder_a
self.encoder_b = encoder_b
assert not self.encoder_a.has_features() or not self.encoder_a.has_features(), "only one RNN is allowed in encoder!"
assert not self.encoder_a.has_features(), "Currently RNN can be only *last* layer of encoder!!"
def features_n(self):
return self.encoder_b.features_n()
def has_features(self):
return self.encoder_a.has_features() or self.encoder_a.has_features()
def forward(self, states, memory):
states, memory = self.encoder_a(states, memory)
return self.encoder_b(states, memory)
def extract_features(self, states):
states, features_a = self.encoder_a.extract_features(states)
states, features_b = self.encoder_b.extract_features(states)
return states, features_b if self.encoder_b.has_features() else features_a
class IdentityEncoder(IEncoder):
def __init__(self, size_in):
super().__init__(size_in=size_in, size_out=size_in, n_features=1)
def forward(self, states, memory):
return states, memory
class RBFEncoder(IEncoder):
def __init__(self, size_in, env, gamas, components, sampler = None):
self.encoder = RbfState(env, gamas, components, sampler)
super().__init__(size_in=size_in, size_out=self.encoder.size, n_features=1)
def forward(self, states, memory):
states = states.view(-1, self.size_in)
states = self.encoder.transform(states).reshape(-1, self.size_out)
return torch.from_numpy(states).to(memory.device), memory
class BatchNormalizer2D(IEncoder):
def __init__(self, size_in, n_history):
super().__init__(size_in=size_in*n_history, size_out=size_in*n_history, n_features=1)
self.bn = nn.BatchNorm1d(self.size_in)
def forward(self, states, memory):
states = states.view(-1, self.size_in)
if states.size(0) == 1: self.eval()
out = self.bn(states)
if states.size(0) == 1: self.train()
return out, memory
class BatchNormalizer3D(IEncoder):
def __init__(self, size_in, n_history):
super().__init__(size_in=size_in, size_out=size_in*n_history, n_features=1)
self.n_history = n_history
self.bn = nn.BatchNorm1d(self.n_history)
def forward(self, states, memory):
states = states.view(-1, self.n_history, self.size_in)
if states.size(0) == 1: self.eval()
out = self.bn(states).view(-1, self.size_out)
if states.size(0) == 1: self.train()
return out, memory
def extract_features(self, states):
assert states.size(1) == self.size_out
feats = torch.zeros(
states.view(-1, self.size_out).size(0), self.n_features).to(states.device)
return self.forward(states, feats)
class GlobalNormalizerWGrads(IEncoder):
def __init__(self, size_in, n_history):
super().__init__(size_in=size_in*n_history, size_out=size_in*n_history, n_features=1)
self.norm = Normalizer(self.size_in)
self.add_module("norm", self.norm)
def forward(self, states, memory):
self.norm.update(states)
return self.norm.normalize(states).view(-1, self.size_out), memory
class GlobalNormalizer(GlobalNormalizerWGrads):
def __init__(self, size_in, n_history):
super().__init__(size_in, n_history)
for p in self.norm.parameters():
p.requires_grad = False
class GoalGlobalNorm(nn.Module):
def __init__(self, size):
super().__init__()
self.size = size
self.norm = Normalizer(self.size)
self.add_module("norm", self.norm)
for p in self.norm.parameters():
p.requires_grad = False
def forward(self, states):
shape = states.size()
states = states.view(-1, self.size)
self.norm.update(states)
return self.norm.normalize(states).view(shape)
| 2.8125 | 3 |
classes/factories/TrainersFactory.py | anuj-harisinghani/canary-nlp | 0 | 12760158 | from classes.trainer.Trainer import Trainer
from classes.trainer.SingleModelTrainer import SingleModelTrainer
from classes.trainer.TaskFusionTrainer import TaskFusionTrainer
from classes.trainer.ModelEnsembleTrainer import ModelEnsembleTrainer
class TrainersFactory:
def __init__(self):
self.__trainers = {
"single_tasks": SingleModelTrainer,
"fusion": TaskFusionTrainer,
"ensemble": ModelEnsembleTrainer
}
def get(self, mode: str) -> Trainer:
"""
get -> returns a Trainer class from the given mode
:param mode: single_tasks, fusion or ensemble. Used to choose the type of Trainer.
:return: Trainer class
"""
if mode not in self.__trainers.keys():
raise ValueError("Trainer '{}' not supported! Supported trainers are: {}"
.format(mode, self.__trainers.keys()))
return self.__trainers[mode]()
| 2.625 | 3 |
scripts/disabled_tests/issue_filter.py | JasonFengJ9/aqa-tests | 0 | 12760159 | <filename>scripts/disabled_tests/issue_filter.py
import argparse
import json
import logging
import os
import re
import sys
import textwrap
from typing import List, Sequence
from common import models
logging.basicConfig(
format="%(levelname)s - %(message)s",
)
LOG = logging.getLogger()
# common prefix for environment variables
ENV_ARG_PREFIX = 'AQA_ISSUE_FILTER_'
class Filter:
# name of command-line argument for this filter. Overridden by subclasses
CLI_ARG_NAME: str
# name of the environment variable for this filter. Overridden by subclasses
ENV_ARG_NAME: str
# name of the property under which the value of the argument will be stored. Overridden by subclasses
CLI_METAVAR: str
# flag to indicate raw regex expressions
RE_PREFIX: str = 're:'
def __init__(self, pattern_source: str, pattern: re.Pattern):
self.pattern_source = pattern_source
self.pattern = pattern
def accept(self, issue: models.Scheme) -> bool:
"""
Returns `True` if this filter "matches" the given issue; `False` otherwise
"""
field_value = self.extract_field(issue)
return self.pattern.match(field_value) is not None
def extract_field(self, s: models.Scheme) -> str:
"""
Extract the relevant field value for a filter from an issue scheme
"""
raise NotImplemented
# extra information regarding filter formats; appended to the command-line help text
CLI_EXTRA_HELP_TEXT: str = textwrap.dedent(f"""
Filter expressions can be provided through (in ascending order of priority):
1. command-line switches
2. environment variables
Filter expressions can be of any 2 formats:
1. a comma-separated list of exact matches
info: whitespace around commas matters; case-insensitive
e.g.
- 11,17,18+
- openJ9
- aarch64_linux,Aarch64_macos
2. a regular expression, prefixed with {RE_PREFIX!r}
info: be cautious of escaping rules on your shell when using a backslash '\\'
e.g.
- {RE_PREFIX}1[4-8]
- {RE_PREFIX}x86\\-64.*
Note: an empty expression is equivalent to not specifying the filter at all
e.g.
--jdk-version=\"\" is equivalent to not specifying a filter on jdk-version""")
@classmethod
def from_string(cls, string: str):
"""
Construct a `Filter` from a user-provided string
"""
if string.startswith(cls.RE_PREFIX):
re_pattern = string[len(cls.RE_PREFIX):] # remove prefix
LOG.debug(f'Using user-provided pattern {re_pattern!r} for {cls.CLI_ARG_NAME}')
else:
exact_matches = string.split(',')
# escape characters that have a significance in regular expressions (like '+', '.', etc.)
escaped_matches = [re.escape(m) for m in exact_matches]
re_pattern = '(?i)^' + '|'.join(f'({m})' for m in escaped_matches) + '$'
LOG.debug(f'Using generated pattern {re_pattern!r} for {cls.CLI_ARG_NAME}')
compiled_pattern = re.compile(re_pattern)
return cls(
pattern_source=string,
pattern=compiled_pattern
)
class JdkVersionFilter(Filter):
CLI_ARG_NAME = 'jdk-version'
ENV_ARG_NAME = ENV_ARG_PREFIX + 'JDK_VERSION'
CLI_METAVAR = 'jdk_version'
def extract_field(self, s: models.Scheme) -> str:
return s['JDK_VERSION']
class JdkImplementationFilter(Filter):
CLI_ARG_NAME = 'jdk-implementation'
ENV_ARG_NAME = ENV_ARG_PREFIX + 'JDK_IMPLEMENTATION'
CLI_METAVAR = 'jdk_implementation'
def extract_field(self, s: models.Scheme) -> str:
return s['JDK_IMPL']
class PlatformFilter(Filter):
CLI_ARG_NAME = 'platform'
ENV_ARG_NAME = ENV_ARG_PREFIX + 'PLATFORM'
CLI_METAVAR = 'platform'
def extract_field(self, s: models.Scheme) -> str:
return s['PLATFORM']
def build_filters_from_args_and_env(args):
"""
Construct `Filter` instances corresponding to each filter specified on the command-line
"""
filters = []
# for all subclasses of `Filter`
for filter_klass in Filter.__subclasses__():
# get argument value corresponding to that subclass, either from the cli or the env
filter_arg = getattr(args, filter_klass.CLI_METAVAR, None) or os.getenv(filter_klass.ENV_ARG_NAME, None)
if filter_arg:
# user provided a value
filter_inst = filter_klass.from_string(filter_arg)
filters.append(filter_inst)
else:
LOG.debug(f'No filter applied for {filter_klass.CLI_ARG_NAME}')
return filters
def filter_all_issues(issues: Sequence[models.Scheme], filters: Sequence[Filter]):
"""
Filter issues using filter instances
"""
filtered_issues = []
len_issues = len(issues)
for i, issue in enumerate(issues):
log_prefix = f'{i + 1}/{len_issues}: '
if all((objector := f).accept(issue) for f in filters):
filtered_issues.append(issue)
LOG.debug(log_prefix + 'accepted')
else:
LOG.info(log_prefix + f'rejected: {objector.CLI_ARG_NAME}={objector.extract_field(issue)!r} '
f'does not match {objector.pattern_source!r} (re: {objector.pattern.pattern})')
return filtered_issues
def main():
parser = argparse.ArgumentParser(description="Filter issues stored in JSON files\n\n" + Filter.CLI_EXTRA_HELP_TEXT,
allow_abbrev=False, formatter_class=argparse.RawDescriptionHelpFormatter)
for filter_klass in Filter.__subclasses__():
parser.add_argument(f'--{filter_klass.CLI_ARG_NAME}', type=str, default=None, metavar=filter_klass.CLI_METAVAR,
help=f"Filter for {filter_klass.CLI_ARG_NAME} [env: {filter_klass.ENV_ARG_NAME}]")
parser.add_argument('--infile', '-i', type=argparse.FileType('r'), default=sys.stdin,
help='Input file, defaults to stdin')
parser.add_argument('--outfile', '-o', type=argparse.FileType('w'), default=sys.stdout,
help='Output file, defaults to stdout')
parser.add_argument('--verbose', '-v', action='count', default=0,
help="Enable info logging level, debug level if -vv")
args = parser.parse_args()
if args.verbose == 1:
LOG.setLevel(logging.INFO)
elif args.verbose == 2:
LOG.setLevel(logging.DEBUG)
LOG.debug("Building filters")
filters = build_filters_from_args_and_env(args)
LOG.debug(f"Loading JSON from {getattr(args.infile, 'name', '<unknown>')}")
issues: List[models.Scheme] = json.load(args.infile)
filtered_issues: List[models.Scheme] = filter_all_issues(issues, filters)
LOG.debug(f"Outputting JSON to {getattr(args.outfile, 'name', '<unknown>')}")
json.dump(
obj=filtered_issues,
fp=args.outfile,
indent=2,
)
if __name__ == '__main__':
main()
| 2.59375 | 3 |
cargonet/models/eval/losses.py | romnnn/rail-stgcnn | 2 | 12760160 | <gh_stars>1-10
import torch
import torch.nn as nn
class RMSELoss(nn.Module):
def __init__(self):
super().__init__()
self.mse = nn.MSELoss()
def forward(self, x, y):
return torch.sqrt(self.mse(x, y))
class MAELoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
abs_err = torch.abs(x - y)
return abs_err.mean()
class ACCLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y, tolerance=30):
diff = torch.abs(y - x)
valid_total = diff[diff == diff].sum()
correct = diff[diff < tolerance].sum()
return correct / valid_total
class CombinedLoss(nn.Module):
def __init__(self):
super().__init__()
self.mse = nn.MSELoss()
self.mae = MAELoss()
self.acc = ACCLoss()
self.rmse = RMSELoss()
def forward(self, x, y):
return dict(
mse=self.mse(x, y).item(),
mae=self.mae(x, y).item(),
acc=self.acc(x, y).item(),
rmse=self.rmse(x, y).item(),
)
class LossCollector:
def __init__(self):
self.loss = CombinedLoss()
self.mses, self.accs, self.maes, self.rmses = [], [], [], []
self._xs, self._ys = [], []
def collect(self, x, y):
self._xs.append(x.reshape(-1))
self._ys.append(y.reshape(-1))
metrics = self.loss(x, y)
return self.collect_metrics(metrics)
def collect_metrics(self, metrics):
self.mses.append(metrics["mse"])
self.accs.append(metrics["acc"])
self.maes.append(metrics["mae"])
self.rmses.append(metrics["rmse"])
return metrics
def reduce(self):
self.xs = None if len(self._xs) < 1 else torch.cat(self._xs, dim=0)
self.ys = None if len(self._ys) < 1 else torch.cat(self._ys, dim=0)
mse=torch.FloatTensor(self.mses)
mae=torch.FloatTensor(self.maes)
acc=torch.FloatTensor(self.accs)
rmse=torch.FloatTensor(self.rmses)
return dict(
mse=mse[mse == mse].mean().item(),
mae=mae[mae == mae].mean().item(),
acc=acc[acc == acc].mean().item(),
rmse=rmse[rmse == rmse].mean().item(),
xs=self.xs,
ys=self.ys,
)
@staticmethod
def format(metrics):
return "ACC={:.4f} MSE={:.4f} MAE={:.4f} RMSE={:.4f}".format(
metrics["acc"], metrics["mse"], metrics["mae"], metrics["rmse"]
)
def summary(self):
return self.format(self.reduce())
def loss(have, want):
print("MAE {:.4f}".format(mae(have, want).item()))
print("ACC {:.4f}".format(acc(have, want).item()))
print("RMSE {:.4f}".format(rmse(have, want).item()))
| 2.609375 | 3 |
venv/Lib/site-packages/nbdime/utils.py | PeerHerholz/guideline_jupyter_book | 2 | 12760161 | # coding: utf-8
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import codecs
from collections import defaultdict
import errno
import io
import locale
import os
import re
from subprocess import check_output, CalledProcessError
import sys
from contextlib import contextmanager
import nbformat
from six import string_types, text_type, PY2
if os.name == 'nt':
EXPLICIT_MISSING_FILE = 'nul'
else:
EXPLICIT_MISSING_FILE = '/dev/null'
def read_notebook(f, on_null, on_empty=None):
"""Read and return notebook json from filename
Parameters:
f: The filename to read from or null filename
("/dev/null" on *nix, "nul" on Windows).
Alternatively a file-like object can be passed.
on_null: What to return when filename null
"empty": return empty dict
"minimal": return miminal valid notebook
on_empty: What to return when the file is completely empty (0 size)
None: Raise an error
"empty": return empty dict
"minimal: return minimal valid notebook
"""
if f == EXPLICIT_MISSING_FILE:
if on_null == 'empty':
return {}
elif on_null == 'minimal':
return nbformat.v4.new_notebook()
else:
raise ValueError(
'Not valid value for `on_null`: %r. Valid values '
'are "empty" or "minimal"' % (on_null,))
else:
try:
return nbformat.read(f, as_version=4)
except nbformat.reader.NotJSONError:
if on_empty is None:
raise
# Reraise if file is not empty
if isinstance(f, string_types):
with io.open(f, encoding='utf-8') as fo:
if len(fo.read(10)) != 0:
raise
if on_empty == 'empty':
return {}
elif on_empty == 'minimal':
return nbformat.v4.new_notebook()
else:
raise ValueError(
'Not valid value for `on_empty`: %r. Valid values '
'are None, "empty" or "minimal"' % (on_empty,))
def as_text(text):
if isinstance(text, list):
text = "".join(text)
if isinstance(text, bytes):
text = text.decode("utf8")
return text
def as_text_lines(text):
if isinstance(text, string_types):
text = text.splitlines(True)
if isinstance(text, tuple):
text = list(text)
assert isinstance(text, list), 'text argument should be string or string sequence'
assert all(isinstance(t, string_types) for t in text), (
'text argument should be string or string sequence')
return text
def strings_to_lists(obj):
if isinstance(obj, dict):
return {k: strings_to_lists(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [strings_to_lists(v) for v in obj]
elif isinstance(obj, string_types):
return obj.splitlines(True)
else:
return obj
def revert_strings_to_lists(obj):
if isinstance(obj, dict):
return {k: revert_strings_to_lists(v) for k, v in obj.items()}
elif isinstance(obj, list):
if not obj:
return obj
elif isinstance(obj[0], string_types):
return "".join(obj)
else:
return [revert_strings_to_lists(v) for v in obj]
else:
return obj
def split_path(path):
"Split a path on the form '/foo/bar' into ['foo','bar']."
return [x for x in path.strip("/").split("/") if x]
def join_path(*args):
"Join a path on the form ['foo','bar'] into '/foo/bar'."
if len(args) == 1 and isinstance(args[0], (list, tuple, set)):
args = args[0]
args = [str(a) for a in args if a not in ["", "/"]]
ret = "/".join(args)
return ret if ret.startswith("/") else "/" + ret
r_is_int = re.compile(r"^[-+]?\d+$")
def star_path(path):
"""Replace integers and integer-strings in a path with * """
path = list(path)
for i, p in enumerate(path):
if isinstance(p, int):
path[i] = '*'
else:
if not isinstance(p, text_type):
p = p.decode()
if r_is_int.match(p):
path[i] = '*'
return join_path(path)
def resolve_path(obj, path):
for p in path:
obj = obj[p]
return obj
class Strategies(dict):
"""Simple dict wrapper for strategies to allow for wildcard matching of
list indices + transients collection.
"""
def __init__(self, *args, **kwargs):
self.transients = kwargs.pop("transients", [])
self.fall_back = kwargs.pop("fall_back", None)
super(Strategies, self).__init__(*args, **kwargs)
def get(self, k, d=None):
parts = split_path(k)
key = star_path(parts)
return super(Strategies, self).get(key, d)
def is_in_repo(pkg_path):
"""Get whether `pkg_path` is a repository, or is part of one
Parameters
----------
pkg_path : str
directory containing package
Returns
-------
is_in_repo : bool
Whether directory is a part of a repository
"""
# maybe we are in a repository, check for a .git folder
p = os.path
cur_path = None
par_path = pkg_path
while cur_path != par_path:
cur_path = par_path
if p.exists(p.join(cur_path, '.git')):
return True
par_path = p.dirname(par_path)
return False
def ensure_dir_exists(path):
"""Ensure a directory exists at a given path"""
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def locate_gitattributes(scope=None):
"""Locate the .gitattributes file
returns None if not in a git repo and scope=None
"""
if scope == 'global':
try:
bpath = check_output(['git', 'config', '--global', 'core.attributesfile'])
gitattributes = os.path.expanduser(bpath.decode('utf8', 'replace').strip())
except CalledProcessError:
if os.environ.get('XDG_CONFIG_HOME'):
gitattributes = os.path.expandvars('$XDG_CONFIG_HOME/git/attributes')
else:
gitattributes = os.path.expanduser('~/.config/git/attributes')
elif scope == 'system':
# git docs: "Attributes for all users on a system should be placed in
# the $(prefix)/etc/gitattributes file". Our job is then to check for
# $(prefix) value.
try:
env = os.environ.copy()
env['GIT_EDITOR'] = 'echo'
bpath = check_output(['git', 'config', '--system', '-e'], env=env)
gitconfig = bpath.decode('utf8', 'replace').strip()
gitattributes = os.path.join(os.path.dirname(gitconfig), 'gitattributes')
except CalledProcessError:
# Default to most likely case of empty $(prefix)
# Sanity check:
if not os.path.exists('/etc'):
raise EnvironmentError('Could not find system gitattributes location!')
gitattributes = os.path.join(['etc', 'gitattributes'])
else:
# find .gitattributes in current dir
path = os.path.abspath('.')
if not os.path.exists(os.path.join(path, '.git')):
return None
gitattributes = os.path.join(path, '.gitattributes')
return gitattributes
def is_prefix_array(parent, child):
if parent == child:
return True
if not parent:
return True
if child is None or len(parent) > len(child):
return False
for i in range(len(parent)):
if parent[i] != child[i]:
return False
return True
def find_shared_prefix(a, b):
if a is None or b is None:
return None
if a is b:
return a[:]
n = min(len(a), len(b))
i = 0
while i < n and a[i] == b[i]:
i += 1
return a[:i]
def _setup_std_stream_encoding():
"""Setup encoding on stdout/err
Ensures sys.stdout/err have error-escaping encoders,
rather than raising errors.
"""
if os.getenv('PYTHONIOENCODING'):
# setting PYTHONIOENCODING overrides anything we would do here
return
_default_encoding = locale.getpreferredencoding() or 'UTF-8'
for name in ('stdout', 'stderr'):
stream = getattr(sys, name)
raw_stream = getattr(sys, '__%s__' % name)
if stream is not raw_stream:
# don't wrap captured or redirected output
continue
enc = getattr(stream, 'encoding', None) or _default_encoding
errors = getattr(stream, 'errors', None) or 'strict'
# if error-handler is strict, switch to replace
if errors == 'strict' or errors.startswith('surrogate'):
if PY2:
bin_stream = stream
else:
bin_stream = stream.buffer
new_stream = codecs.getwriter(enc)(bin_stream, errors='backslashreplace')
setattr(sys, name, new_stream)
def setup_std_streams():
"""Setup sys.stdout/err
- Ensures sys.stdout/err have error-escaping encoders,
rather than raising errors.
- enables colorama for ANSI escapes on Windows
"""
_setup_std_stream_encoding()
# must enable colorama after setting up encoding,
# or encoding will undo colorama setup
if sys.platform.startswith('win'):
import colorama
colorama.init()
def split_os_path(path):
return os.path.normpath(path).lstrip(os.path.sep).split(os.path.sep)
@contextmanager
def pushd(path):
"""Change current directory with context manager (changes back)"""
old = os.curdir
try:
os.chdir(path)
yield
finally:
os.chdir(old)
class defaultdict2(defaultdict):
"""A defaultdict variant that retains a dictionary of default values.
When a key in the dictionary is missing, it will first consult the
default_values instance attribute. If that does not have the key,
the default_factory will be used as standard defaultdict behavior.
"""
def __init__(self, default_factory, default_values, *args, **kwargs):
super(defaultdict2, self).__init__(default_factory, *args, **kwargs)
self.default_values = default_values
def copy(self):
c = super(defaultdict2, self).copy()
c.default_values = self.default_values.copy()
return c
def __missing__(self, key):
try:
v = self.default_values[key]
self[key] = v
return v
except KeyError:
return super(defaultdict2, self).__missing__(key)
| 2.3125 | 2 |
bpnsdata/time.py | lifewatch/bpnsdata | 0 | 12760162 | import datetime
import sys
if sys.version_info < (3, 9):
# importlib.resources either doesn't exist or lacks the files()
# function, so use the PyPI version:
import importlib_resources
else:
# importlib.resources has files(), so use that:
import importlib.resources as importlib_resources
import numpy as np
from skyfield import almanac, api
from tqdm.auto import tqdm
class TimeData:
"""
Class to calculate moon phase and moment of the day
"""
def __init__(self):
self.ts = api.load.timescale()
self.bsp_file = importlib_resources.files('bpnsdata') / 'data' / 'de421.bsp'
if not self.bsp_file.exists():
print('Downloading the de421.bsp file...')
load = api.Loader(self.bsp_file.parent)
load.download('de421.bsp')
self.eph = None
def __call__(self, df):
self.eph = api.load_file(self.bsp_file)
df = self.get_time_data_df(df)
self.eph.close()
return df
def get_moon_phase(self, dt, categorical=False):
"""
Return the moon phase of a certain date in radians or in MOON_PHASES = ['New Moon','First Quarter','Full Moon',
'Last Quarter'] if categorical is true
Parameters
----------
dt: datetime object
Datetime on which to calculate the moon phase
categorical : boolean
Set to True to get the moon phase name as a string
Returns
-------
Moon phase as string
"""
utc_dt = dt.replace(tzinfo=datetime.timezone.utc)
t = self.ts.utc(utc_dt)
if categorical:
moon_phase_at = almanac.moon_phases(self.eph)
moon_phase = almanac.MOON_PHASES[moon_phase_at(t)]
else:
moon_phase = almanac.moon_phase(self.eph, t).radians
return moon_phase
def get_day_moment(self, dt, location):
"""
Return moment of the day (day, night, twilight)
Parameters
----------
dt : datetime
Datetime to get the moment of
location : geometry object
Returns
-------
Moment of the day (string)
"""
bluffton = api.Topos(latitude_degrees=location.coords[0][0], longitude_degrees=location.coords[0][1])
utc_dt = dt.replace(tzinfo=datetime.timezone.utc)
t = self.ts.utc(utc_dt)
is_dark_twilight_at = almanac.dark_twilight_day(self.eph, bluffton)
day_moment = is_dark_twilight_at(t).min()
return almanac.TWILIGHTS[day_moment]
def get_time_data_df(self, df):
"""
Add to the dataframe the moon_phase and the day_moment to all the rows
Parameters
----------
df : DataFrame
DataFrame with the datetime as index
Returns
-------
The dataframe with the columns added
"""
df['moon_phase'] = np.nan
df['day_moment'] = np.nan
df_4326 = df.to_crs(epsg=4326)
for row in tqdm(df_4326.loc[~df_4326.geometry.is_empty][['geometry']].itertuples(), total=len(df_4326), position=0, leave=True):
t = row.Index
geometry = row[1]
if geometry is not None:
df.loc[t, 'moon_phase'] = self.get_moon_phase(t)
df.loc[t, 'day_moment'] = self.get_day_moment(t, geometry)
return df
| 2.609375 | 3 |
dlearn/utils/costfuncs.py | Cysu/dlearn | 4 | 12760163 | <reponame>Cysu/dlearn
import theano.tensor as T
def binxent(output, target):
r"""Return the mean binary cross entropy cost.
The binary cross entropy of two :math:`n`-dimensional vectors :math:`o` and
:math:`t` is
.. math::
c = -\sum_{i=1}^n t_i\log(o_i) + (1-t_i)\log(1-o_i)
Parameters
----------
output : theano.tensor.matrix
The output symbol of the model. Each row is a sample vector.
target : theano.tensor.matrix
The target symbol of the model. Each row is a ground-truth vector.
Returns
-------
out : theano.tensor.scalar
The mean of binary cross entropies of all the samples.
"""
return T.nnet.binary_crossentropy(output, target).sum(axis=1).mean()
def mse(output, target):
r"""Return the mean square error cost.
The square error of two :math:`n`-dimensional vectors :math:`o` and
:math:`t` is
.. math::
c = -\sum_{i=1}^n (o_i-t_i)^2
Parameters
----------
output : theano.tensor.matrix
The output symbol of the model. Each row is a sample vector.
target : theano.tensor.matrix
The target symbol of the model. Each row is a ground-truth vector.
Returns
-------
out : theano.tensor.scalar
The mean square error of all the samples.
"""
return ((output - target) ** 2).sum(axis=1).mean()
def neglog(output, target):
r"""Return the mean negative log-likelihood cost.
The negative log-likelihood of an output vector :math:`o` with ground-truth
label :math:`t` is
.. math::
c = -\log(o_t)
Parameters
----------
output : theano.tensor.matrix
The output symbol of the model. Each row is a sample vector.
target : theano.tensor.ivector
The target symbol of the model. Each row is a ground-truth label.
Returns
-------
out : theano.tensor.scalar
The mean negative log-likelihood of all the samples.
"""
return -T.mean(T.log(output)[T.arange(target.shape[0]), target])
def neglog_2d(output, target):
i = T.arange(target.shape[0]).reshape((target.shape[0], 1))
i = T.repeat(i, target.shape[1], axis=1).flatten()
j = T.arange(target.shape[1]).reshape((1, target.shape[1]))
j = T.repeat(j, target.shape[0], axis=0).flatten()
k = target.flatten()
return -T.mean(T.log(output)[i, j, k])
def miscls_rate(output, target):
r"""Return the mean misclassification rate.
Parameters
----------
output : theano.tensor.matrix
The output symbol of the model. Each row is a sample vector.
target : theano.tensor.ivector
The target symbol of the model. Each row is a ground-truth label.
Returns
-------
out : theano.tensor.scalar
The mean misclassification rate of all the samples.
"""
pred = T.argmax(output, axis=1)
return T.neq(pred, target).mean()
def miscls_rate_2d(output, target):
pred = T.argmax(output, axis=2)
return T.neq(pred, target).mean()
def binerr(output, target):
r"""Return the mean binary prediction error rate.
The output vector :math:`o\in [0,1]^n`, and target vector :math:`t\in
\{0,1\}^n`. Then the binary prediction error rate is
.. math::
c = \sum_{i=1}^n \delta(round(o_i) = t_i)
Parameters
----------
output : theano.tensor.matrix
The output symbol of the model. Each row is a sample vector.
target : theano.tensor.matrix
The target symbol of the model. Each row is a ground-truth vector.
Returns
-------
out : theano.tensor.scalar
The mean binary prediction error rate of all the samples.
"""
pred = T.round(output)
return T.neq(pred, target).sum(axis=1).mean()
def KL(target, output):
r"""Return the mean of summation of Kullback-Leibler divergence.
**Note that the parameters order is different from other cost functions due
to the conventional definition of the KL-divergence.** Denote the target
vector and output vector by :math:`t\in [0,1]^n` and :math:`o\in [0,1]^n`
respectively, the KL-divergence of each element is defined to be
.. math::
KL(t_i||o_i) = t_i\log\frac{t_i}{o_i} + (1-t_i)\log\frac{1-t_i}{1-o_i}
And the summation over all the elements is
.. math::
c = \sum_{i=1}^n KL(t_i||o_i)
Parameters
----------
target : theano.tensor.matrix
The target symbol of the model. Each row is a ground-truth vector.
output : theano.tensor.matrix
The output symbol of the model. Each row is a sample vector.
Returns
-------
out : theano.tensor.scalar
The mean of summation of KL-divergence over all the elements.
"""
kl = target * T.log(target / output) + \
(1.0 - target) * T.log((1.0 - target) / (1.0 - output))
return kl.sum(axis=1).mean()
def weighted_norm2(output, target, weight):
x = (output - target) ** 2
w = (target + weight)
return (w * x).sum(axis=1).mean()
| 3.0625 | 3 |
Server/ogarserver.py | mboerwinkle/OGAR | 5 | 12760164 | import asyncio
import json
import websockets
import queue
import threading
import time
import ssl
import sqlite3
import sys
if len(sys.argv) < 2:
sys.exit("You must specify your domain name as an argument")
domainname = sys.argv[1]
certPathPrefix = "/etc/letsencrypt/live/{:s}/".format(domainname)
CLIENTS = []
CLIENTCOUNT = 0
RES = None
class ResourceWrapper:
def __init__(self, dbfilepath):
print("Loading resource db \""+dbfilepath+"\"")
try:
self.dbconn = sqlite3.connect(dbfilepath)
except:
print("Failed to connect to database")
self.regenCursor()
self.c.execute("PRAGMA foreign_keys = ON;")
def regenCursor(self):
self.c = self.dbconn.cursor()
def tablesize(self, table):
self.c.execute("select count(1) from "+table+";")
return self.c.fetchone()[0]
def exists(self, table, field, value):
self.c.execute("select count(1) from "+table+" where "+field+" = ?;", (value,));
return self.c.fetchone()[0] != 0
def commit(self):
self.dbconn.commit()
class Client:
def __init__(self, websocket):
self.ws = websocket
self.id = None#id
self.qid = None#qualtrics id
def send(self, msg):
asyncio.run(self.ws.send(msg))
async def register(websocket):
print("client got ", websocket.remote_address)
c = Client(websocket)
CLIENTS.append(c)
return c
async def unregister(websocket):
for c in CLIENTS:
if(c.ws == websocket):
CLIENTS.remove(c)
return
#asynchronous client commands that need to be processed later
CLIENTCOMMANDS = queue.Queue(maxsize=1000)
def pushToClientQueue(comm):
global CLIENTCOMMANDS
try:
CLIENTCOMMANDS.put_nowait(comm)
except queue.Full:
print("Failed to append to clientcommands. Queue full.")
def popFromClientQueue():
global CLIENTCOMMANDS
return CLIENTCOMMANDS.get()
async def clienthandler(websocket, path):
global CLIENTS, CLIENTCOUNT
client = await register(websocket)
pushToClientQueue((client, "connect"))
CLIENTCOUNT += 1
try:
async for message in websocket:
data = json.loads(message)
if data["type"] == "reg":#register
print("Player joining: "+data["qid"]+" "+str(client.ws.remote_address))
pushToClientQueue((client, "reg", data["qid"]))
elif data["type"] in ("err", "evt", "pos", "perf"):
pushToClientQueue((client, data["type"], data))
else:
print("Unknown type: "+data["type"])
except Exception:
print("Client handler excepted:",Exception)
finally:
pushToClientQueue((client, "disconnect"))
await unregister(websocket)
CLIENTCOUNT -= 1
print("Client Disconn (",CLIENTCOUNT," left)")
def MainLoop():
global RES
RES = ResourceWrapper('ogarserver.sqlite3')
RES.c.executescript('''
CREATE TABLE IF NOT EXISTS participant (
id int,
qid varchar(50),
conntime int,
disconntime int,
ip varchar(16),
primary key(id)
);
CREATE TABLE IF NOT EXISTS event (
id int,
timestamp int,
eventid int,
foreign key(id) references participant
);
CREATE TABLE IF NOT EXISTS perf (
id int,
timestamp int,
drawfps int,
iterfps int,
drawtime real,
itertime real,
foreign key(id) references participant
);
CREATE TABLE IF NOT EXISTS position (
id int,
timestamp int,
millisecond int,
locX real,
locY real,
yaw real,
pitch real,
foreign key(id) references participant
);
CREATE TABLE IF NOT EXISTS error (
id int,
timestamp int,
msg varchar(200),
foreign key (id) references participant
);
PRAGMA foreign_keys=ON;
''')
retmaxpart = RES.c.execute("select max(id) from participant").fetchone()
nextUID = 0
if retmaxpart[0] != None:
nextUID = retmaxpart[0]+1
print("Starting from uid:",nextUID)
else:
print("Starting from empty database.")
posQString = "insert into position (id, timestamp, millisecond, locX, locY, yaw, pitch) VALUES (?,?,?,?,?,?,?);"
perfQString = "insert into perf (id, timestamp, drawfps, drawtime, iterfps, itertime) VALUES (?,?,?,?,?,?);"
errQString = "insert into error (id, timestamp, msg) VALUES (?,?,?);"
evtQString = "insert into event (id, timestamp, eventid) VALUES (?,?,?);"
regQString = "update participant set qid = ? where id = ?;"
connQString = "insert into participant (id, conntime, ip) values (?,strftime('%s','now'),?);"
disconnQString = "update participant set disconntime = strftime('%s','now') where id = ?;"
while True:
command = popFromClientQueue()
client = command[0]
task = command[1]
if task == "pos":
d = command[2]
RES.c.execute(posQString, (client.id, d['time'], d['milli'], d['x'], d['y'], d['yaw'], d['pitch']))
elif task == "perf":
d = command[2]
RES.c.execute(perfQString, (client.id, d['t'], d['d'], d['dt'], d['i'], d['it']))
elif task == "err":
d = command[2]
RES.c.execute(errQString, (client.id, d['time'], d['err'].strip()[0:150]))
elif task == "evt":
d = command[2]
RES.c.execute(evtQString, (client.id, d['time'], d['evt']))
elif task == "reg":
client.qid = command[2]
RES.c.execute(regQString, (client.qid, client.id))
elif task == "connect":
client.id = nextUID
ipstring = "0.0.0.0"#do not save IP for irb compliance
#ipstring = str(client.ws.remote_address)
RES.c.execute(connQString, (client.id, ipstring))
nextUID+=1
elif task == "disconnect":
RES.c.execute(disconnQString, (client.id,))
RES.commit()
else:
print("Unknown client command: ", task)
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ssl_context.load_cert_chain(certfile=certPathPrefix+"fullchain.pem", keyfile=certPathPrefix+"privkey.pem")
DBThread = threading.Thread(group=None, target=MainLoop, name="DBThread")
DBThread.start()
asyncio.get_event_loop().run_until_complete(websockets.serve(clienthandler, port=6411, ssl=ssl_context))#GALL
asyncio.get_event_loop().run_forever()
| 2.5 | 2 |
database.py | fazildgr8/touchlessClockin_project | 2 | 12760165 | from firebase import firebase
from datetime import date
from datetime import datetime
import numpy as np
import cv2
import imghdr
import base64
import json
'''
Stucture of Our Main Object
Note that a unique id will be created when we save it for the first time in db, its not show currently.
We will use this id for all transcations
object = {
"userName":'',
"ubid":'',
"faceEncoding":[]
"dayAndTime":[{
'day':'',
'startTime':'',
'endTime':''}]
}
'''
today = date.today()
date1= today.strftime("%d/%m/%Y")
now = datetime.now()
start_time = now.strftime("%H:%M:%S")
class Database:
def __init__(self):
self.firebase = firebase.FirebaseApplication('https://cvip-a44cd.firebaseio.com/', None)
'''
To save the first data object
Pass it to a dictionary consisting of following parameters:name,ubid,feature vector
Structure of the dictionary eg - {"name":'Gerald',"ubid":5033999,"faceEncoding":[1,2,3,4,5]}
It will automatically insert date and start time in the main object
'''
def postData(self,d):
data={ "userName":d['name'],
"dayAndTime":[{ "day": date1,
"startTime":"",
"endTime":" "
}],
"ubid":d['ubid'],
"faceEncoding":d['faceEncoding']}
print("Posting data in DB")
result = self.firebase.post('/timeclock/',data)
uid = dict(result)
return uid['name']
#This method will retrieve all the data from database
'''
Sample of a single object returned from database
obj = {'-M5VP8cUF8UehCDc8fV4':
{'dayAndTime': [{
'day': '22/04/2020',
'endTime': ' ',
'startTime': '01:42:21'}],
'faceEncoding': [1, 2, 3, 4, 5],
'ubid': 5033, '
userName': 'Gerald'}}
'''
def getData(self):
result = self.firebase.get('/timeclock/', '')
print(result)
return result
#Pass the Unique Key to Get That Particular Data
def getSingleData(self,key):
result = self.firebase.get('/timeclock/', key)
#print(result)
return result
#To update a single object, pass it the unique key and updated data object
def updateSingleData(self,id,data):
rs=self.firebase.patch('/timeclock/'+id, data)
print('updated')
def clockInUser(self,id):
data = self.getSingleData(id)
x = data['dayAndTime']
for dict1 in x:
if (dict1['day'] == date1):
dict1['startTime'] = start_time
data['dayAndTime'] = x
rs = self.firebase.patch('/timeclock/' + id, data)
return data
def clockOutUser(self,id):
data=self.getSingleData(id)
x=data['dayAndTime']
for dict1 in x:
if(dict1['day'] == date1):
dict1['endTime']=start_time
data['dayAndTime']=x
rs=self.firebase.patch('/timeclock/'+id, data)
return data
#check if user is clocked in or out
def checkClockedStatus(self,id):
data=self.getSingleData(id)
x=data['dayAndTime']
status = True
for dict1 in x:
if(dict1['day'] == date1):
if(dict1['startTime'] == ''):
status=False
return status
def getNameAndFeatureVector(self):
res = self.firebase.get('/timeclock/', '')
if(res != None):
name=[]
featureVector=[]
uid=[]
ubid=[]
for obj1 in res:
uid.append(obj1)
obj2=res[str(obj1)]
name.append(obj2['userName'])
featureVector.append(np.array(obj2['faceEncoding']))
ubid.append(obj2['ubid'])
return name,featureVector,uid,ubid
else:
return [],[],[],[]
def dayAndDateValues(self):
day = today.strftime("%d")
month = today.strftime("%m")
year = today.strftime("%Y")
hours = now.strftime("%H")
seconds = now.strftime("%S")
minutes = now.strftime("%M")
return day,month,year,hours,seconds,minutes
def getUbidStatus(self,ubid):
data = self.firebase.get('/ubidDb/', None)
out = None
ubid = str(ubid)
ubid = ubid.replace(" ","")
for dict1 in data:
x=(data[dict1])
if(str(ubid) == x['ubid']):
out = x['name']
break
return out
'''
a=Database()
d1={"name":'Gautam',"ubid":5033,"faceEncoding":[1,2,3,4,5]}
#a.updateSingleData(a.getSingleData())
c=a.getData()
f=list(c.keys())
print(f)
res=a.getSingleData(f[0])
#print(res)
#res['userName'] = 'gautam'
res['dayAndTime'].append({'day':'24/04/2019'})
#print(res)
#a.updateSingleData(str(f[0]),res)
img=cv2.imread('3.png')
imgType= imghdr.what('3.png')
print(imgType)
img_str = cv2.imencode('.png',img)[1].tostring()
print(type(img_str))
a=Database()
d1={"name":'Gautam111',"ubid":5033,"faceEncoding":[1,2,3,4,5]}
img=cv2.imread('3.png')
with open("3.png", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
a.postData(d1, encoded_string)
#cv2.imwrite('F:/CV/Final/data/3.png', img)
'''
#
# a = Database()
# a.getUbidStatus("1111")
# firebase = firebase.FirebaseApplication('https://cvip-a44cd.firebaseio.com/', None)
# result = firebase.get('/ubidDb', None)
# for dict1 in result:
# x=(result[dict1])
# print(x['ubid'])
# a.clockInUser('-M5tqBEF89MM-FIi1XBl')
# print(a.checkClockedStatus('-M5tqBEF89MM-FIi1XBl'))
# # known_face_names, known_face_encodings, uniqueID, ubid = a.getNameAndFeatureVector()
# # print(known_face_names)
# known_face_names, known_face_encodings, uniqueID, ubid = a.getNameAndFeatureVector()
# print(ubid)
| 3.40625 | 3 |
graphs/make_benchmarks.py | c0ver/pyBedGraph | 18 | 12760166 | <reponame>c0ver/pyBedGraph<gh_stars>10-100
import sys
import os
import time
from pathlib import Path
from Benchmark import Benchmark
# sys.path.append("..")
from pyBedGraph import BedGraph
MIN_NUM_TEST = 100000
MAX_NUM_TEST = 1000000
DEFAULT_INTERVAL_SIZE = 500
DEFAULT_NUM_TESTS = 10000
RUN_TIME_NAMES = [
'pyBW exact',
'pyBW app.',
'pyBG exact'
]
INTERVAL_ERROR_NAMES = [
'pyBW app.'
]
INTERVAL_RUNTIME_NAMES = [
'pyBW exact',
'pyBW app.',
'pyBG exact'
]
total_start_time = time.time()
interval_test_list = [100, 250, 500, 750, 1000, 2500, 5000, 10000, 50000, 100000]
interval_test_list = [100, 250, 500, 1000]
bin_size_test_list = [5, 10, 20]
# Tests to make sure it works
#interval_test_list = [100]
#bin_size_test_list = [5]
#MIN_NUM_TEST = 10000
#MAX_NUM_TEST = 10000
def interval_size_error_benchmark():
interval_error_results = {}
for name in INTERVAL_ERROR_NAMES:
interval_error_results[name] = []
stats_to_bench = ['mean']
for interval_size in interval_test_list:
result = bench.benchmark(DEFAULT_NUM_TESTS, interval_size, chrom_name,
None, stats_to_bench)
interval_error_results['pyBW app.'].append(result['pyBigWig_mean']['error'])
print(f"Total time taken so far (min): {(time.time() - total_start_time) / 60}")
stats_to_bench = ['approx_mean']
for bin_size_divide in bin_size_test_list:
interval_error_results['pyBG app. bin=int_size/' + str(bin_size_divide)] = []
for interval_size in interval_test_list:
bin_size = int(interval_size / bin_size_divide)
result = bench.benchmark(DEFAULT_NUM_TESTS, interval_size,
chrom_name, bin_size, stats_to_bench,
bench_pyBigWig_approx=False)
interval_error_results['pyBG app. bin=int_size/' + str(bin_size_divide)].append(result['approx_mean']['error'])
print(f"Total time taken so far (min): {(time.time() - total_start_time) / 60}")
print(interval_error_results)
error_types = ['percent_error', 'ms_error', 'abs_error', 'not_included']
with open(f'graphs/{data_name}/interval_error_results.txt', 'w') as out:
out.write(" ".join([str(x) for x in interval_test_list]) + '\n')
for key in interval_error_results:
out.write(key + '\n')
error_list = interval_error_results[key]
for error_type in error_types:
out.write(error_type + " ")
for error_dict in error_list:
out.write(str(error_dict[error_type]) + " ")
out.write('\n')
# generate_images.create_error_interval(data_name, interval_test_list, interval_error_results)
def interval_size_runtime_benchmark():
interval_runtime_results = {}
for name in INTERVAL_RUNTIME_NAMES:
interval_runtime_results[name] = []
stats_to_bench = ['mean']
for interval_size in interval_test_list:
result = bench.benchmark(DEFAULT_NUM_TESTS, interval_size, chrom_name,
None, stats_to_bench, True, True)
interval_runtime_results['pyBW app.'].append(result['pyBigWig_mean']['approx_run_time'])
interval_runtime_results['pyBW exact'].append(result['pyBigWig_mean']['exact_run_time'])
interval_runtime_results['pyBG exact'].append(result['mean']['run_time'])
print(f"Total time taken so far (min): {(time.time() - total_start_time) / 60}")
stats_to_bench = ['approx_mean']
for bin_size_divide in bin_size_test_list:
interval_runtime_results['pyBG app. bin=int_size/' + str(bin_size_divide)] = []
for interval_size in interval_test_list:
bin_size = int(interval_size / bin_size_divide)
result = bench.benchmark(DEFAULT_NUM_TESTS, interval_size,
chrom_name, bin_size, stats_to_bench,
True, False, False)
interval_runtime_results['pyBG app. bin=int_size/' + str(bin_size_divide)].append(result['approx_mean']['run_time'])
print(f"Total time taken so far (min): {(time.time() - total_start_time) / 60}")
print(interval_runtime_results)
with open(f'graphs/{data_name}/interval_runtime_results.txt', 'w') as out:
out.write(" ".join([str(x) for x in interval_test_list]) + '\n')
for key in interval_runtime_results:
output = key + "\n" + " ".join([str(x) for x in interval_runtime_results[key]]) + '\n'
out.write(output)
def runtime_benchmark():
# create list of num_tests
num_test_list = [x for x in range(MIN_NUM_TEST, MAX_NUM_TEST + 1, MIN_NUM_TEST)]
run_time_results = {}
stats_to_bench = ['mean']
for name in RUN_TIME_NAMES:
run_time_results[name] = []
for num_test in num_test_list:
result = bench.benchmark(num_test, DEFAULT_INTERVAL_SIZE, chrom_name,
None, stats_to_bench, True, True)
run_time_results['pyBG exact'].append(result['mean']['run_time'])
run_time_results['pyBW app.'].append(result['pyBigWig_mean']['approx_run_time'])
run_time_results['pyBW exact'].append(result['pyBigWig_mean']['exact_run_time'])
print(f"Total time taken so far (min): {(time.time() - total_start_time) / 60}")
stats_to_bench = ['approx_mean']
for bin_size_divide in bin_size_test_list:
bin_size = int(DEFAULT_INTERVAL_SIZE / bin_size_divide)
run_time_results['pyBG app. bin=' + str(bin_size)] = []
for num_test in num_test_list:
result = bench.benchmark(num_test, DEFAULT_INTERVAL_SIZE,
chrom_name, bin_size, stats_to_bench, True,
False, False)
run_time_results['pyBG app. bin=' + str(bin_size)].append(result['approx_mean']['run_time'])
print(f"Total time taken so far (min): {(time.time() - total_start_time) / 60}")
print(run_time_results)
with open(f'graphs/{data_name}/run_time_results.txt', 'w') as out:
out.write(" ".join([str(x) for x in num_test_list]) + '\n')
for key in run_time_results:
output = key + "\n" + " ".join([str(x) for x in run_time_results[key]]) + '\n'
out.write(output)
# generate_images.create_runtime_num_test(data_name, num_test_list, run_time_results)
if len(sys.argv) != 3:
print("Needs 2 arguments:\n"
"arg 1 - chrom_sizes_file\n"
"arg 2 - bigWig file")
exit(-1)
chrom_name = 'chr1'
start_time = time.time()
bedGraph = BedGraph(sys.argv[1], sys.argv[2], chrom_name)
print("Time for loading bedGraph file: ", time.time() - start_time)
start_time = time.time()
print(f"Time for loading {chrom_name}: ", time.time() - start_time, '\n')
bench = Benchmark(bedGraph, sys.argv[2])
data_name = Path(sys.argv[2]).stem
if not os.path.isdir(f'graphs'):
os.mkdir(f'graphs')
if not os.path.isdir(f'graphs/{data_name}'):
os.mkdir(f'graphs/{data_name}')
# runtime_benchmark()
interval_size_error_benchmark()
# interval_size_runtime_benchmark()
| 2.640625 | 3 |
src/sprites/characters/behaviours/raven/RavenFollowPlayerState.py | NEKERAFA/Soul-Tower | 0 | 12760167 | # -*- coding: utf-8 -*-
import random
from src.sprites.characters.behaviours.RavenBehaviourState import *
from src.sprites.Character import *
from src.sprites.MySprite import *
from src.sprites.EnemyRange import *
# ------------------------------------------------------------------------------
# Clase RavenFollowPlayerState
class RavenFollowPlayerState(RavenBehaviourState):
def __init__(self, previousState):
RavenBehaviourState.__init__(self)
self.delayTime = random.randint(4, 6)*1000
self.elapseTime = 0
self.previousState = previousState
def move_ai(self, enemy, player):
# Obtenemos las posiciones del enemigo y el jugador
(enemyX, enemyY) = enemy.rect.center
(playerX, playerY) = player.rect.center
# Obtenemos el ángulo entre el enemigo y el jugador
angle = int(math.degrees(math.atan2(enemyY-playerY, playerX-enemyX)))
# Corrección cuando el ángulo es entre 180-360
if angle < 0:
angle = 360 + angle
# Calculamos hacia donde tiene que moverse el personaje
lookAt, move = EnemyRange.discretice_angle(angle)
# Se actualiza el movimiento del personaje
Character.move(enemy, move)
# Comprobamos si se está colisionando con el enemigo para volver al
# otro estado
if pygame.sprite.collide_mask(player, enemy):
enemy.change_behaviour(self.previousState)
self.previousState.angle = int(angle+180)
if self.previousState.angle > 360:
self.previousState.angle -= 360
def update(self, enemy, time, mapRect, mapMask):
# Se actualiza el movimiento del personaje
Character.update_movement(enemy, time)
enemy.speed = (enemy.speed[0]*1.25, enemy.speed[1]*1.25)
MySprite.update(enemy, time)
self.elapseTime += time
if self.elapseTime > self.delayTime or not mapRect.inflate(-48, -48).contains(enemy.rect):
enemy.change_behaviour(self.previousState)
| 3.109375 | 3 |
leetcode/unique_char.py | clnFind/DayDayAlgorithm | 0 | 12760168 | <reponame>clnFind/DayDayAlgorithm
# -*- coding: utf-8 -*-
def find_first_unique_char(s):
"""
字符串中找到第一个出现的唯一字符
:param s:
:return:
"""
char_dict = dict()
unique_char = list()
for char in s:
if char_dict.get(char):
unique_char.remove(char)
else:
print(unique_char)
unique_char.append(char)
char_dict[char] = True
print(unique_char)
return unique_char[0]
if __name__ == '__main__':
s = "asdfgashdgflpooh"
r = find_first_unique_char(s)
print(r)
| 3.375 | 3 |
tests/test_user_message_routing.py | ks91/bbc1-pub | 89 | 12760169 | <reponame>ks91/bbc1-pub
# -*- coding: utf-8 -*-
import pytest
from gevent import monkey
monkey.patch_all()
from gevent.pool import Pool
from gevent.server import StreamServer
from gevent import socket
from gevent.socket import wait_read
import threading
import shutil
import binascii
import queue
import time
import os
import sys
sys.path.extend(["../"])
from bbc1.core import bbclib
from bbc1.core.message_key_types import KeyType
from bbc1.core import bbc_network, user_message_routing, bbc_config, query_management, bbc_stats, message_key_types
LOGLEVEL = 'debug'
LOGLEVEL = 'info'
ticker = query_management.get_ticker()
core_nodes = 10
dummy_cores = [None for i in range(core_nodes)]
networkings = [None for i in range(core_nodes)]
client_socks = [None for i in range(core_nodes*2)]
user_routings = [None for i in range(core_nodes)]
result_queue = queue.Queue()
domain_id = bbclib.get_new_id("test_domain")
asset_group_id = bbclib.get_new_id("asset_group_1")
nodes = [None for i in range(core_nodes)]
users = [bbclib.get_new_id("test_user_%i" % i) for i in range(core_nodes*2)]
sample_resource_id = bbclib.get_new_id("sample_resource_id")
def get_random_data(length=16):
import random
source_str = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
return "".join([random.choice(source_str) for x in range(length)])
def dummy_server_loop(socket, address):
msg_parser = message_key_types.Message()
try:
while True:
wait_read(socket.fileno())
buf = socket.recv(8192)
if len(buf) == 0:
break
msg_parser.recv(buf)
while True:
msg = msg_parser.parse()
if msg is None:
break
result_queue.put(msg)
except:
print("## disconnected")
def start_dummy_server(port):
server = StreamServer(("0.0.0.0", port), dummy_server_loop, spawn=Pool(core_nodes*2))
server.start()
class DummyCore:
class DB:
def add_domain(self, domain_id):
pass
def insert_transaction_locally(self, domain_id, resource_id, resource_type, data):
print("insert_locally: domain_id=%s, resource_id=%s" % (binascii.b2a_hex(domain_id[:4]),
binascii.b2a_hex(resource_id[:4])))
def find_transaction_locally(self, domain_id, resource_id):
if resource_id == sample_resource_id:
print("find_locally: FOUND %s" % binascii.b2a_hex(resource_id[:4]))
return b'sample_resource'
else:
print("find_locally: NOTFOUND!!!!!!!")
return None
class Storage:
def set_storage_path(self, domain_id, storage_type, storage_path):
pass
def __init__(self, port):
self.ledger_manager = DummyCore.DB()
self.storage_manager = DummyCore.Storage()
self.stats = bbc_stats.BBcStats()
th = threading.Thread(target=start_dummy_server, args=(port,))
th.setDaemon(True)
th.start()
class TestBBcNetwork(object):
def test_01_start(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
global dummy_cores, networkings, nodes
user_message_routing.UserMessageRouting.REFRESH_FORWARDING_LIST_INTERVAL = 10 # for testing
for i, nw in enumerate(networkings):
if os.path.exists(".bbc1-%d"%i):
shutil.rmtree(".bbc1-%d"%i)
dummy_cores[i] = DummyCore(9000+i)
config = bbc_config.BBcConfig(directory=".bbc1-%d"%i)
networkings[i] = bbc_network.BBcNetwork(core=dummy_cores[i], config=config, p2p_port=6641+i, loglevel=LOGLEVEL)
dummy_cores[i].networking = networkings[i]
networkings[i].create_domain(domain_id=domain_id)
user_routings[i] = networkings[i].domains[domain_id]['user']
nodes[i] = networkings[i].domains[domain_id]['neighbor'].my_node_id
assert nodes[i] is not None
assert networkings[i].ip_address != ''
print("IPv4: %s, IPv6 %s, port: %d" % (networkings[i].ip_address, networkings[i].ip6_address,
networkings[i].port))
def test_02_set_initial_peer(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
for i in range(core_nodes-5):
networkings[i].add_neighbor(domain_id=domain_id, node_id=nodes[0],
ipv4=networkings[0].ip_address, port=networkings[0].port)
print(networkings[i].domains[domain_id]['neighbor'].show_list())
def test_03_wait_and_show(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
print("-- wait 4 seconds --")
time.sleep(4)
for i in range(core_nodes):
print(networkings[i].domains[domain_id]['neighbor'].show_list())
def test_04_send_ping(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
ipv4 = networkings[0].ip_address
ipv6 = networkings[0].ip6_address
port = networkings[0].port
for i in range(1, core_nodes):
networkings[i].send_domain_ping(domain_id=domain_id, ipv4=ipv4, ipv6=ipv6, port=port, is_static=True)
def test_05_wait_and_show(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
print("-- wait 10 seconds --")
time.sleep(10)
for i in range(core_nodes):
print(networkings[i].domains[domain_id]['neighbor'].show_list())
assert len(list(networkings[i].domains[domain_id]['neighbor'].nodeinfo_list.keys())) == core_nodes - 1
def test_10_register_users(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
global client_socks
for i in range(core_nodes):
client_socks[i*2] = socket.create_connection(("127.0.0.1", 9000+i))
client_socks[i*2+1] = socket.create_connection(("127.0.0.1", 9000+i))
user_routings[i].register_user(user_id=users[i*2], socket=client_socks[i*2])
user_routings[i].register_user(user_id=users[i*2+1], socket=client_socks[i*2+1])
assert len(user_routings[i].registered_users) == 2
def test_11_send_message_to_another_in_the_same_node(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
msg = {
KeyType.domain_id: domain_id,
KeyType.source_user_id: users[0],
KeyType.destination_user_id: users[1],
KeyType.message: 100,
}
user_routings[0].send_message_to_user(msg)
time.sleep(1)
recvmsg = result_queue.get()
print(recvmsg)
assert KeyType.reason not in recvmsg
assert recvmsg[KeyType.message] == 100
def test_12_send_message_to_another_in_the_different_node(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
msg = {
KeyType.domain_id: domain_id,
KeyType.source_user_id: users[0],
KeyType.destination_user_id: users[2],
KeyType.message: 200,
}
user_routings[0].send_message_to_user(msg)
time.sleep(1)
recvmsg = result_queue.get()
print(recvmsg)
assert KeyType.reason not in recvmsg
assert recvmsg[KeyType.message] == 200
assert len(user_routings[0].forwarding_entries[users[2]]['nodes']) == 1
msg = {
KeyType.domain_id: domain_id,
KeyType.source_user_id: users[4],
KeyType.destination_user_id: users[19],
KeyType.message: 300,
}
user_routings[2].send_message_to_user(msg)
time.sleep(1)
recvmsg = result_queue.get()
print(recvmsg)
assert KeyType.reason not in recvmsg
assert recvmsg[KeyType.message] == 300
assert len(user_routings[2].forwarding_entries[users[19]]['nodes']) == 1
def test_13_send_message_to_invalid_user(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
msg = {
KeyType.domain_id: domain_id,
KeyType.source_user_id: users[7],
KeyType.destination_user_id: bbclib.get_new_id("test_user_invalid"),
KeyType.message: 200,
}
user_routings[3].send_message_to_user(msg)
time.sleep(1)
recvmsg = result_queue.get()
print(recvmsg)
assert KeyType.reason in recvmsg
assert recvmsg[KeyType.message] == 200
def test_14_unregister_user19(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
assert len(user_routings[9].registered_users) == 2
user_routings[9].unregister_user(user_id=users[19], socket=client_socks[19])
assert len(user_routings[9].registered_users) == 1
def test_15_send_message_to_user19(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
print("# users[19] originally connected with cores[9], but now unregistered.")
msg = {
KeyType.domain_id: domain_id,
KeyType.source_user_id: users[4],
KeyType.destination_user_id: users[19],
KeyType.message: 400,
}
user_routings[2].send_message_to_user(msg)
time.sleep(1)
assert users[19] not in user_routings[2].forwarding_entries
def test_16_wait_for_forward_list_all_purged(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
assert len(user_routings[0].forwarding_entries) == 1
print("*** wait 10 seconds ***")
time.sleep(10)
for i in range(core_nodes):
assert len(user_routings[i].forwarding_entries) == 0
def test_17_reset_all_connections(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
for i in range(core_nodes):
user_routings[i].unregister_user(user_id=users[i*2], socket=client_socks[i*2])
user_routings[i].unregister_user(user_id=users[i*2+1], socket=client_socks[i*2+1])
for i in range(core_nodes):
assert len(user_routings[i].registered_users) == 0
def test_18_multi_connections_on_a_core(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
user_routings[0].register_user(user_id=users[0], socket=client_socks[0])
user_routings[1].register_user(user_id=users[2], socket=client_socks[2])
user_routings[1].register_user(user_id=users[2], socket=client_socks[3])
assert len(user_routings[1].registered_users[users[2]]) == 2
msg = {
KeyType.domain_id: domain_id,
KeyType.source_user_id: users[0],
KeyType.destination_user_id: users[2],
KeyType.message: 500,
}
user_routings[0].send_message_to_user(msg)
time.sleep(1)
assert result_queue.qsize() == 2
for i in range(result_queue.qsize()):
recv = result_queue.get()
assert recv[KeyType.message] == 500
user_routings[1].unregister_user(user_id=users[2], socket=client_socks[3])
user_routings[0].send_message_to_user(msg)
time.sleep(1)
assert result_queue.qsize() == 1
for i in range(result_queue.qsize()):
recv = result_queue.get()
assert recv[KeyType.message] == 500
def test_19_multicast_and_multiconnection(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
user_routings[5].register_user(user_id=users[10], socket=client_socks[10])
user_routings[2].register_user(user_id=users[4], socket=client_socks[4])
user_routings[4].register_user(user_id=users[4], socket=client_socks[8])
user_routings[4].register_user(user_id=users[4], socket=client_socks[9])
assert len(user_routings[2].registered_users[users[4]]) == 1
assert len(user_routings[4].registered_users[users[4]]) == 2
msg = {
KeyType.domain_id: domain_id,
KeyType.source_user_id: users[10],
KeyType.destination_user_id: users[4],
KeyType.message: 600,
}
user_routings[5]._resolve_accommodating_core_node(dst_user_id=users[4], src_user_id=users[10])
time.sleep(1)
assert len(user_routings[5].forwarding_entries[users[4]]['nodes']) == 2
# cores[2] and cores[4]
user_routings[5].send_message_to_user(msg)
time.sleep(1)
assert result_queue.qsize() == 3
for i in range(result_queue.qsize()):
recv = result_queue.get()
assert recv[KeyType.message] == 600
if __name__ == '__main__':
pytest.main()
| 1.96875 | 2 |
experiments/cifar-homogeneity/parser.py | g-benton/hessian-eff-dim | 34 | 12760170 | <filename>experiments/cifar-homogeneity/parser.py
import argparse
def parser():
parser = argparse.ArgumentParser(description="Random Training")
parser.add_argument(
"--epochs",
type=int,
default=10,
metavar="N",
help="number of epochs to train (default: 200)",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
args = parser.parse_args()
return args
| 2.390625 | 2 |
arrays/topk.py | santoshmano/pybricks | 0 | 12760171 | <reponame>santoshmano/pybricks<gh_stars>0
from heapq import *
def topk(nums,k):
minHeap = []
#construct heap with k elements first
for i in range(k):
heappush(minHeap, nums[i])
for i in range(k, len(nums):
if minHeap[0] < nums[i]:
heappop(minHeap)
heappush(minHeap, nums[i])
return list(minHeap)
print(topk([2, 4, 1, 0, 5], 3)
| 3.34375 | 3 |
Jobs/Python_start/proj_1/hello.py | legioner9/Node_Way_source_2 | 0 | 12760172 | <gh_stars>0
print('Hello World!!!')
if True:
print('FROM if')
num = 1
while num <= 10:
print(num)
num += 1
list_ = [1, 2, 3, 4, 5]
for i in range(1, 7):
print(i)
| 3.859375 | 4 |
core/projectMetadata.py | Bernardrouhi/HandFree | 0 | 12760173 | <reponame>Bernardrouhi/HandFree
import os
import json
from PySide2.QtCore import (Signal, QObject)
import pipeline_handler
from pipeline_handler import Pipeline
PROJECT_EXTENSION = ".hfp"
class ProjectKeys():
WorkDirectory = "Work_directory"
PublishDirectory = "Publish_directory"
Project = "Project"
Version = "Version"
AssetTypes = "AssetTypes"
AssetSpace = "AssetSpace"
WorkSpace = "Workspace"
class AssetSpaceKeys():
Maya = "Maya"
SusbtancePainter = "SubstancePainter"
Empty = "Empty"
class ProjectMeta(QObject):
'''Handle Hand Free Project file'''
onWorkDirectoryUpdate = Signal()
def __init__(self):
QObject.__init__(self)
self._data = self.PROJECT_METADATA()
self._path = str(os.path.expanduser('~'))
self._pm = Pipeline()
self.init()
def PROJECT_METADATA(self):
'''Project default Metadata structure
'''
return {
ProjectKeys.WorkDirectory:"",
ProjectKeys.PublishDirectory:"",
ProjectKeys.Project:"",
ProjectKeys.AssetTypes:{},
ProjectKeys.Version:"1.0"
}.copy()
def ASSETSPACE_METADATA(self):
return {ProjectKeys.AssetSpace:"", ProjectKeys.WorkSpace:""}.copy()
def init(self):
'''Set environment variable if it's not set otherwise load the environment variable.
'''
# Project Name
if self._pm.get_ProjectName():
self.ProjectName = self._pm.get_ProjectName()
else:
self._pm.set_ProjectName(self._data[ProjectKeys.Project])
# Work Directory
if self._pm.get_WorkDirectory():
self.WorkDirectory = self._pm.get_WorkDirectory()
else:
self._pm.set_WorkDirectory(self._data[ProjectKeys.WorkDirectory])
def get_LastPath(self):
'''Get last opened directory
Return:
(str): directory path.
'''
return self._path
def set_LastPath(self, directory=str):
'''Set last opened directory
Return:
(str): directory path.
'''
self._path = directory
def get_ProjectName(self):
'''Get the name of project
Return:
(str): Name of the Project
'''
return self._pm.get_ProjectName()
def set_ProjectName(self, project_name=str):
'''update the project name
Args:
project_name (str): Name of the Project.
Return: None
'''
self._data[ProjectKeys.Project] = project_name
self._pm.set_ProjectName(project_name)
def get_WorkDirectory(self):
'''Get the WorkDirectory
Return:
(str): Path to project work directory.
'''
return self._pm.get_WorkDirectory()
def set_WorkDirectory(self, work_directory=str):
'''update the Work Directory
Args:
work_directory (str): Path to project work directory.
Return: None
'''
self._data[ProjectKeys.WorkDirectory] = work_directory
self._pm.set_WorkDirectory(work_directory)
self.onWorkDirectoryUpdate.emit()
def get_PublishDirectory(self):
'''Get the Publish_directory
Return:
(str): Path to project work directory.
'''
return self._pm.get_PublishDirectory()
def set_PublishDirectory(self, publish_directory=str):
'''update the Work Directory
Args:
publish_directory (str): Path to project work directory.
Return: None
'''
self._data[ProjectKeys.PublishDirectory] = publish_directory
self._pm.set_PublishDirectory(publish_directory)
def get_AssetTypesName(self):
'''Get list of all the project AssetTypes'''
return self._data[ProjectKeys.AssetTypes].keys()
def get_AssetTypes(self):
'''get the Project assetTypes'''
return self._data[ProjectKeys.AssetTypes]
def set_AssetTypes(self, assetTypeDict=dict):
self._data[ProjectKeys.AssetTypes] = assetTypeDict
def set_assetType(self, assetType=str, assetSpaceList=list):
self._data[ProjectKeys.AssetTypes][assetType] = assetSpaceList
def get_AssetSpaces(self, assetType=str()):
assetSpaces = list()
assetTypes = self.get_AssetTypes()
if assetType in assetTypes:
for each in assetTypes[assetType]:
assetSpaces.append(each[ProjectKeys.AssetSpace])
return assetSpaces
def update_settings(self, project_name=str, work_directory=str):
'''update all the settings.
Args:
project_name (str): Name of the Project.
work_directory (str): Path to project work directory.
Return: None
'''
self.set_ProjectName(project_name)
self.set_WorkDirectory(work_directory)
def load(self, ProjectFile=str):
'''Load Hand Free Projet file
Args:
ProjectFile (str): Path to project file.
Return: None
'''
if ProjectFile and ProjectFile.lower().endswith(PROJECT_EXTENSION):
# update the latest path
self._path = os.path.dirname(ProjectFile)
with open(ProjectFile, 'r') as outfile:
# Load project file
if outfile:
LoadedData = json.load(outfile)
if ProjectKeys.Version in LoadedData and LoadedData[ProjectKeys.Version] == "1.0":
if ProjectKeys.Project in LoadedData:
self.set_ProjectName(project_name=LoadedData[ProjectKeys.Project])
if ProjectKeys.WorkDirectory in LoadedData:
self.set_WorkDirectory(work_directory=LoadedData[ProjectKeys.WorkDirectory])
if ProjectKeys.AssetTypes in LoadedData:
self.set_AssetTypes(assetTypeDict=LoadedData[ProjectKeys.AssetTypes])
if ProjectKeys.PublishDirectory in LoadedData:
self.set_PublishDirectory(publish_directory=LoadedData[ProjectKeys.PublishDirectory])
def save(self, ProjectFile=str):
'''Save Hand Free Projet file
Args:
ProjectFile (str): Path to project file.
Return: None
'''
if ProjectFile:
# add extension
file_path = ProjectFile.split('.')[0]
file_path += PROJECT_EXTENSION
# update the latest path
self._path = os.path.dirname(ProjectFile)
# save project file
with open(file_path, 'w') as outfile:
json.dump(self._data, outfile, ensure_ascii=False, indent=4)
def print_settings(self):
'''
'''
print (self.get_WorkDirectory())
print (self.get_ProjectName())
| 2.078125 | 2 |
serving_client.py | inureyes/backend.ai-client-py | 1 | 12760174 | <gh_stars>1-10
import requests
import argparse
import base64
import json
import os
from tabulate import tabulate
parser = argparse.ArgumentParser(description='Predict using specific model')
parser.add_argument('model', metavar='MODEL', type=str,
help='Model name to use.')
parser.add_argument('payload', metavar='PAYLOAD', type=str,
help='JSON-type payload to request prediction to model.')
parser.add_argument('--info', action='store_true', default=False,
help='Abstract model information of given session ID')
parser.add_argument('--detail', action='store_true', default=False,
help='Detailed model information of given session ID')
args = parser.parse_args()
def main():
MODEL_NAME = os.environ.get('MODEL_NAME')
if args.info:
response = requests.get('http://localhost:8501/v1/models/model_'+MODEL_NAME)
print(response.json())
if args.detail:
response = requests.get('http://localhost:8501/v1/models/model_'+MODEL_NAME+'/metadata')
print(response.json())
else:
SERVER_URL = 'http://localhost:8501/v1/models/model_'+MODEL_NAME+':predict'
predict_request = '{"instances": [%s]}' % args.payload
#predict_request = '{"instances" : [{"b64": "%s"}]}' % base64.b64encode(
# dl_request.content)
#IMAGE_URL = 'https://tensorflow.org/images/blogs/serving/cat.jpg'
#dl_request = requests.get(IMAGE_URL, stream=True)
#dl_request.raise_for_status()
#SERVER_URL = 'http://localhost:8501/v1/models/model_'+MODEL_NAME+':predict'
#predict_request = '{"instances" : [{"b64": "%s"}]}' % base64.b64encode(dl_request.content).decode()
#predict_request = args.payload
response = requests.post(SERVER_URL, data=predict_request)
#response.raise_for_status()
#print(response.content)
prediction = response.json()
print(prediction)
if __name__ == '__main__':
main()
| 2.6875 | 3 |
cep_price_console/unified_upload/view_3_column_mapping.py | zanebclark/cep_price_console | 0 | 12760175 | <reponame>zanebclark/cep_price_console
from cep_price_console.utils.mypandastable import MyTable
from cep_price_console.utils.gui_utils import VerticalScrolledFrame
from cep_price_console.utils.log_utils import CustomAdapter, debug
from cep_price_console.unified_upload.step import Step
from cep_price_console.unified_upload.model import Function
from cep_price_console.unified_upload.treeview import TreeviewConstructor
import tkinter as tk
import tkinter.ttk as ttk
import logging
from pandastable import TableModel
import pandas as pd
"""
You shouldn't be using a paned window for the column entries. Turn the column label into an entry that is pre-populated
with the current column name. Link a change in the column name to a change to the pandas column. Link a deletion of a
column mapper to a deletion of the pandas column. Get rid of those options on the mypandastable subclass
DDI Warehouse Connection Properties
username: CEP\ddiadmin
servername: DDI-SERVER\SQLEXPRESS
Collation: SQL_Latin1_General_CP1_CI_AS
IPAll: 64374
"""
class ColumnSelection(ttk.Frame):
acceptable_ext = ['.xlsx', '.xls']
logger = CustomAdapter(logging.getLogger(str(__name__)), None)
@debug(lvl=logging.DEBUG, prefix='')
def __init__(self, view, *args, **kwargs):
self.mapper_dict = None
self.initial = False
self.terminal = False
self.view = view
# noinspection PyArgumentList
super().__init__(self.view, style="even.group.TFrame", padding=5, relief=tk.RIDGE, *args, **kwargs)
self.rowconfigure(4, weight=1)
self.columnconfigure(0, weight=1)
self.header = ttk.Label(self,
text="Step 3: Column Mapping",
style="even.heading2.TLabel")
self.header.grid(row=0, column=0, sticky=tk.W)
self.instr_sep = ttk.Separator(self)
self.instr_sep.grid(row=1, column=0, sticky=tk.EW)
self.inst = ttk.Label(self, anchor=tk.NW, justify=tk.LEFT)
instr = (
" - a) Select the workbook in question by clicking 'Browse', or by pasting the full filepath in the"
"textbox. \n"
" - Browsing for the file is recommended. \n"
" - Only .xls and .xlsx formats are permitted at this time. If the target file is in a different format, "
"open it in Excel and save it as an .xlsx file."
)
self.inst.config(text=instr, style="even.notes.TLabel", wraplength=20) # TODO: Write notes
self.inst.grid(row=2, column=0, sticky=tk.W)
self.instr_sep_2 = ttk.Separator(self)
self.instr_sep_2.grid(row=3, column=0, sticky=tk.EW)
self.paned_outer = tk.PanedWindow(self,
orient=tk.HORIZONTAL,
name="paned_outer",
sashrelief=tk.RAISED,
sashwidth=7)
self.paned_outer.grid(row=4, column=0, sticky=tk.NSEW)
self.mapping_paned_frame = VerticalScrolledFrame(self.paned_outer, padding=5, relief=tk.RIDGE)
self.mapping_paned_frame.interior.columnconfigure(0, weight=1)
self.mapping_paned_frame.interior.rowconfigure(0, weight=1)
self.mapping_paned_window = tk.PanedWindow(
self.mapping_paned_frame.interior,
orient=tk.VERTICAL,
name="mapping_pane",
sashrelief=tk.FLAT,
sashwidth=5
)
self.mapping_paned_window.grid(row=0, column=0, sticky=tk.NSEW)
self.paned_outer.add(self.mapping_paned_frame,
sticky=tk.NSEW,
width=480,
pady=5,
padx=5,
stretch="never")
self.table_frame = ttk.Frame(self.paned_outer,
padding=5,
relief=tk.RIDGE)
self.paned_outer.add(self.table_frame,
sticky=tk.NSEW,
# width=480,
pady=5,
padx=5)
self.table = MyTable(self.table_frame, showtoolbar=False, showstatusbar=True)
self.unbind_all("<KP_8>")
self.unbind_all("<Return>")
self.unbind_all("<Tab>")
self.table.show()
self.model = None
self.bind("<Configure>", self.on_resize)
self.grid(row=0, column=0, sticky=tk.NSEW)
self.grid_remove()
@debug(lvl=logging.DEBUG, prefix='')
def open(self):
if self.view.column_mapping_dataframe is None:
self.model = TableModel(
dataframe=pd.read_excel(
io=self.view.wb_filename,
header=self.view.header_row - 1,
sheet_name=self.view.ws_name_selection
)
)
self.mapper_dict = {}
for column_name in self.model.df.columns.values.tolist():
self.mapper_dict[column_name] = ColumnMapper(
master_frame=self,
paned_frame=self.mapping_paned_window,
col_name=column_name
)
for obj in self.mapper_dict.values():
self.mapping_paned_window.add(
obj,
minsize=30,
stretch="never"
)
else:
self.model = self.view.column_mapping_dataframe
self.table.updateModel(self.model)
self.table.statusbar.update()
self.bind_all("<KP_8>", self.table.handle_arrow_keys)
self.bind_all("<Return>", self.table.handle_arrow_keys)
self.bind_all("<Tab>", self.table.handle_arrow_keys)
self.grid()
@debug(lvl=logging.DEBUG, prefix='')
def close(self):
self.view.column_mapping_dataframe = self.table.model
self.unbind_all("<KP_8>")
self.unbind_all("<Return>")
self.unbind_all("<Tab>")
self.grid_remove()
# noinspection PyUnusedLocal
# @debug(lvl=logging.DEBUG, prefix='')
def on_resize(self, event):
self.inst.configure(wraplength=self.winfo_width())
class ColumnMapping(Step):
logger = CustomAdapter(logging.getLogger(str(__name__)), None)
@debug(lvl=logging.DEBUG, prefix='')
def __init__(self, view, *args, **kwargs):
super().__init__(namely=str(ColumnMapping.__name__), order=2, view=view, *args, **kwargs)
self.upload_query = None
# self.__option_list = None # TODO: What's this?
self.frame_main.columnconfigure(1, weight=1)
self.frame_main.rowconfigure(1, weight=1)
self.column_mapping_hdr = ttk.Label(self.frame_main)
self.column_mapping_hdr.config(text="Column Mapping", style="heading1.TLabel")
self.column_mapping_hdr.grid(row=0, column=0, columnspan=2, sticky=tk.W)
self.treeview = TreeviewConstructor(self.frame_main)
self.__class__.logger.log(logging.DEBUG, "Done!")
self.treeview.grid(row=1,
column=1,
sticky=tk.NSEW)
self.canvas_frame = ttk.Frame(self.frame_main, padding=2)
self.canvas_frame.grid(row=1, column=0, sticky=tk.NSEW)
self.canvas_frame.rowconfigure(0, weight=1)
self.canvas_frame.columnconfigure(0, weight=1)
self.canvas = tk.Canvas(self.canvas_frame)
self.canvas.grid(row=0, column=0, sticky=tk.NSEW)
self.canvas.columnconfigure(0, weight=1)
self.canvas.rowconfigure(0, weight=1)
self.column_mapping_frame = ttk.Frame(self.canvas)
self.column_mapping_frame.update_idletasks()
self.column_mapping_frame.configure(padding=5, style="even.group.TFrame")
self.column_mapping_frame.columnconfigure(0, weight=1)
self.column_mapping_frame.rowconfigure(0, weight=1)
self.canvas.create_window((0, 0),
window=self.column_mapping_frame,
anchor=tk.NW,
tags="column_mapping_frame")
self.filler_frame = ttk.Frame(self.canvas_frame)
self.filler_frame.grid(row=1, column=0, sticky=tk.NSEW)
self.ysb = ttk.Scrollbar(self.canvas_frame)
self.ysb.config(orient=tk.VERTICAL, command=self.canvas.yview)
self.ysb.grid(row=0, column=1, sticky=tk.NS)
self.canvas.configure(yscrollcommand=self.ysb.set, borderwidth=2, highlightthickness=0)
self.col_mapping_instr_lbl = ttk.Label(self.column_mapping_frame)
self.col_mapping_instr_lbl.config(text="These are the instructions on how to do this thing. \n"
"1) You need to do something. \n"
"2) You need to do something else. \n"
"3) Finally, you need to do something else. \n"
"Then you are done!")
self.col_mapping_instr_lbl.grid(row=0, column=0)
self.reset = ttk.Button(self.frame_cmd)
self.reset.config(text="Reset Comboboxes") # command=self.reset_combo_boxes) # TODO: write function
self.reset.grid(row=0, column=0)
@debug(lvl=logging.DEBUG, prefix='')
def populate_frame(self):
self.upload_query = self.view.model.upload_query()
self.treeview.populate_query(query=self.upload_query, hide_list=None, id_col="line_number", limit=30)
for col_obj in sorted(self.treeview.col_obj_dict.values(), key=lambda x: x.order):
if col_obj.order != 0:
ColumnMapper(master=self,
parent_frame=self.column_mapping_frame,
grid_row=col_obj.order,
desc=col_obj.desc)
self.update_option_list()
self.column_mapping_frame.update_idletasks()
self.canvas.itemconfigure("column_mapping_frame",
height=self.column_mapping_frame.winfo_reqheight(),
width=self.column_mapping_frame.winfo_reqwidth())
self.canvas.config(scrollregion=self.canvas.bbox(tk.ALL),
width=self.column_mapping_frame.winfo_reqwidth(),
height=self.column_mapping_frame.winfo_reqwidth())
# self.testing()
@debug(lvl=logging.DEBUG, prefix='')
def proceed_logic(self):
func_obj = Function.func_dict.get("Vendor Part Number")
if func_obj is not None:
self.btn_next.state(['!disabled'])
else:
self.btn_next.state(['disabled'])
@debug(lvl=logging.DEBUG, prefix='')
def update_option_list(self):
opt_list = [ColumnMapper.function_default]
for func in Function.func_dict.values():
if func.field_desc is None:
opt_list.append(func.name)
for colmapper in ColumnMapper.col_mapper_dict.values():
colmapper.function_combo.config(values=opt_list)
@debug(lvl=logging.DEBUG, prefix='')
def testing(self):
ColumnMapper.col_mapper_dict["Action Indicator"].function_var.set("Action Indicator")
ColumnMapper.col_mapper_dict["Brand Long Name"].function_var.set("Brand")
ColumnMapper.col_mapper_dict["Cost End Column Price-KANSAS CITY-KAN-17"].function_var.set("C1 Cost")
ColumnMapper.col_mapper_dict["Item Depth"].function_var.set("Depth")
ColumnMapper.col_mapper_dict["Description 125 Character"].function_var.set("Description")
ColumnMapper.col_mapper_dict["Item Height"].function_var.set("Height")
ColumnMapper.col_mapper_dict["List Price"].function_var.set("L1 Price")
ColumnMapper.col_mapper_dict["Manufacturer Long Name"].function_var.set("Manufacturer Name")
ColumnMapper.col_mapper_dict["Manufacturer Part Number"].function_var.set("Manufacturer Part Number")
ColumnMapper.col_mapper_dict["Manufacturer URL"].function_var.set("Manufacturer URL")
ColumnMapper.col_mapper_dict["Unit of Measure"].function_var.set("Primary UOM")
ColumnMapper.col_mapper_dict["Unit of Measure Qty"].function_var.set("Primary UOM Quantity")
ColumnMapper.col_mapper_dict["Unit within UOM"].function_var.set("Secondary UOM")
ColumnMapper.col_mapper_dict["UNSPSC"].function_var.set("UNSPSC Code")
ColumnMapper.col_mapper_dict["UPC Item GTIN"].function_var.set("UPC")
ColumnMapper.col_mapper_dict["Item Number"].function_var.set("Vendor Part Number")
ColumnMapper.col_mapper_dict["Item Number Revised"].function_var.set("Vendor Part Number Revision")
ColumnMapper.col_mapper_dict["Item Cubic Inches"].function_var.set("Volume")
ColumnMapper.col_mapper_dict["Item Weight"].function_var.set("Weight")
ColumnMapper.col_mapper_dict["Item Width"].function_var.set("Width")
@debug(lvl=logging.DEBUG, prefix='')
def next(self, *args):
self.complete = True
self.view.flow_manager()
class ColumnMapper(ttk.Frame):
logger = CustomAdapter(logging.getLogger(str(__name__)), None)
datatypes = [] # TODO: Fill Out
values = [
'string',
'float',
'integer',
'date',
'boolean',
'error',
'blank'
]
function_default = "Select Function"
custom_default = "Enter Custom Header"
@debug(lvl=logging.DEBUG, prefix='')
def __init__(self,
master_frame,
paned_frame,
col_name,
*args,
**kwargs):
self.paned_frame = paned_frame
# noinspection PyArgumentList
super().__init__(self.paned_frame, *args, **kwargs)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.master_frame = master_frame
self.col_name = col_name
self.col_datatype = self.master_frame.model.df[self.col_name].dtype
self.label = ttk.Label(self, text=self.col_name, style="dflt.TLabel")
self.label.grid(row=0, column=0, sticky=tk.E)
# Datatype
self.datatype_var = tk.StringVar()
self.datatype_var.set(self.col_datatype)
self.datatype_var.trace_add('write', self.datatype_var_on_change)
self.datatype_combo = ttk.Combobox(self,
state="readonly",
values=ColumnMapper.datatypes,
textvariable=self.datatype_var)
self.datatype_combo.grid(row=0, column=1)
self.datatype_combo.bind('<<ComboboxSelected>>', self.datatype_combo_on_change)
# Field Function
self.function = None
self.function_var = tk.StringVar()
self.function_var.set(ColumnMapper.function_default)
self.function_var.trace_add('write', self.function_var_on_change)
self.function_combo = ttk.Combobox(self,
state="readonly",
textvariable=self.function_var)
self.function_combo.grid(row=0, column=2)
self.close_btn = ttk.Button(self,
text="X",
command=self.remove_frame,
width=3,
style="bad.TButton")
self.close_btn.grid(row=0, column=3, sticky=tk.E)
# TODO: Allow user to do custom stuff
# # Field Custom
# self.custom_chk_var = tk.IntVar()
# self.custom_chk_var.set(0)
#
# self.custom_chk_box = ttk.Checkbutton(self)
# self.custom_chk_box.grid(row=0, column=3)
# self.custom_chk_box.config(variable=self.custom_chk_var,
# command=self.custom_chk_box_toggle)
#
# self.custom_var = tk.StringVar()
# self.custom_var.set(ColumnMapper.custom_default)
# self.custom_entry = ttk.Entry(self)
# self.custom_entry.grid(row=0, column=4)
# self.custom_entry.state(['disabled'])
# custom_validation = (self.register(self.validate_custom_value),
# '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
# self.custom_entry.config(textvariable=self.custom_var,
# validate='focusout',
# validatecommand=custom_validation)
@debug(lvl=logging.DEBUG, prefix='')
def remove_frame(self):
self.paned_frame.remove(self)
@debug(lvl=logging.DEBUG, prefix='')
def datatype_combo_on_change(self, *args):
print(*args) # TODO: Support datatype change
@debug(lvl=logging.DEBUG, prefix='')
def datatype_var_on_change(self, *args):
print(*args) # TODO: Support datatype change
@debug(lvl=logging.DEBUG, prefix='')
def function_var_on_change(self, *args):
pass
# if self.function is not None:
# self.function.field_desc = None
# self.function = None
#
# if self.function_var != ColumnMapper.function_default:
# func_obj = Function.func_dict.get(self.function_var.get())
# if func_obj is not None:
# self.function = func_obj
# self.function.field_desc = self.col_name
# else:
# self.__class__.logger.log(logging.ERROR, "Function Object Not Found: Function Variable = {0}".format(
# self.function_var.get()))
# self.master.update_option_list()
# self.master.proceed_logic()
# @debug(lvl=logging.DEBUG, prefix='')
# def custom_chk_box_toggle(self, *args):
# if self.custom_chk_var.get() == 0: # Unchecked
# self.__class__.logger.log(logging.DEBUG, "Button unchecked")
# self.custom_entry.state(['disabled'])
# self.custom_var.set(ColumnMapper.custom_default)
# self.function_combo.state(['!disabled'])
#
# elif self.custom_chk_var.get() == 1: # Checked
# self.__class__.logger.log(logging.DEBUG, "Button checked")
# self.custom_entry.state(['!disabled'])
# self.function_combo.state(['disabled'])
# self.function_var.set(ColumnMapper.function_default)
# else:
# self.__class__.logger.log(logging.ERROR, "Button not 0 or 1!")
# @debug(lvl=logging.DEBUG, prefix='')
# def validate_custom_value(self,
# action,
# index,
# value_if_allowed,
# prior_value,
# text,
# validation_type,
# trigger_type,
# widget_name):
# pass # TODO: Validate custom values
| 1.898438 | 2 |
manuscript/GAT_ANALYSIS/plot_main_figures.py | jakob-he/TADA | 3 | 12760176 | <reponame>jakob-he/TADA
"""Plots ther anlysis results of GAT"""
import argparse
import pathlib
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.patches as mpatches
from collections import OrderedDict
def argparser():
parser = argparse.ArgumentParser('Plots l2fold of a GAT analysis with the qvalue.')
parser.add_argument('-a','--pathogenic',help='Path to the pathogenic anlysis file.')
parser.add_argument('-b','--nonpathogenic',help='Path to the non-pathogenic anlysis file.')
parser.add_argument('-c','--compare',help='Path to the count comparision of pathogenic and non-pathogenic.')
parser.add_argument('-o','--output',help='Figure title.')
parser.add_argument('-q','--qvalue',type=float,help='Q-value threshold.')
args = parser.parse_args()
return args
def main():
#get input arguments
args = argparser()
pathogenic_ana = pathlib.Path(args.pathogenic)
nonpathogenic_ana = pathlib.Path(args.nonpathogenic)
compare_ana = pathlib.Path(args.compare)
#define dict for y axis ticks
tick_dict = {'HMM_Active_Enhancer':'Activer Enhancer',
'HMM_Insulator':'Insulator',
'HMM_Poised_Promotor':'Poised Promotor',
'HMM_Strong_Enhancer':'Strong Enhancer',
'HMM_Transcriptional_elongation':'Transcriptional Elongation',
'HMM_Transcriptional_transition':'Transcriptional Transition',
'HMM_Weak_Enhancer':'Weak Enhancer',
'HMM_Weak_Promotor':'Weak Promotor',
'HMM_Weak_Repetitive':'Repetitiv',
'HMM_Weak_Transcribed':'Weak Transcribed',
'Crossover_Paternal_Cov_75':'Crossover Peaks Paternal 75%',
'Crossover_Paternal_Cov_90':'Crossover Peaks Paternal 90%',
'Crossover_Maternal_Cov_75':'Crossover Peaks Maternal 75%',
'Crossover_Maternal_Cov_90':'Crossover Peaks Maternal 90%',
'all_enhancer_75_conservation':r'Conserved Enhancer',
'all_enhancer_90_conservation':r'Highly Conserved Enhancer',
'fantom_5_all_enhancer':'Enhancer',
'fantom_5_brain_enhancer':'Brain',
'Phylop_Negative':'Negative PhyloP Enhancer',
'Phylop_Positive':'Positive PhyloP Enhancer',
'Phylop_Positive_75':r'Conserved Enhancer',
'Phylop_Positive_90':r'Enhancer with PhyloP $\geq$ 90 Percentile',
'TADS_high_conservation_enhancer':'TADS containing an enhancer with Phastcon > 0.9',
'TADs_high_mean_conservation':'TADS with mean Phastcon > 0.2',
'TADs_high_pLI_genes':'TADS containing a gene with pLI=1',
'TADs_high_mean_pLI':'TADS with mean pLI > 0.8',
'gnomAD_genes':'Genes',
'telomeres':'Telomeric Regions',
'TADs_with_phastcon_1_enhancer':'TADs with Phastcon = 1',
'TADs_with_phastcon_0.9_enhancer':'TADs with Phastcon >= 0.9',
'TADs_with_phastcon_0.5_enhancer':'TADs with Phastcon >= 0.5',
'TADs_with_phastcon_0.1_enhancer':'TADs with Phastcon >= 0.1',
'TADs_with_phastcon_0_enhancer':'TADs with Phastcon >= 0',
'TADs_with_pLI_1_genes':'TADs with pLI = 1',
'TADs_with_pLI_0.9_genes':'TADs with pLI >= 0.9',
'TADs_with_pLI_0.5_genes':'TADS with pLI >= 0.5',
'TADs_with_pLI_0.51_genes':'TADS with pLI >= 0.1',
'TADs_with_pLI_0_genes':'TADS with pLI >= 0',
'tads_without_functional_elements':'TADs without genes and enhancers',
'CTCF': 'CTCF Binding Sites',
'DDG2P_genes': 'DDG2P Genes',
'high_HI':r'HI Genes',
'low_HI':r'HS Genes',
'high_loeuf':r'pLoF Tolerant Genes',
'low_loeuf':r'pLoF Intolerant Genes',
'Segmental_duplications':'Segmental Duplications',
'TAD_boundaries':'TAD Boundaries',
'high_pLI':r'GnomAD Genes with pLI $\geq$ 0.9',
'low_pLI':r'GnomAD Genes with pLI $\leq$ 0.1',
}
# load pathogenic data into a df
cols = ['Telomeric Regions','TAD Boundaries','CTCF Binding Sites','Highly Conserved Enhancer','Conserved Enhancer','Enhancer','HI Genes','HS Genes','pLoF Intolerant Genes','pLoF Tolerant Genes','DDG2P Genes','Genes']
annotation_dict = {annotation:idx for idx,annotation in enumerate(cols)}
patho_df = pd.read_csv(pathogenic_ana,header=0,sep='\t')
patho_df['annotation'] = [tick_dict[annotation] for annotation in patho_df['annotation']]
patho_df['annotation'] = [annotation_dict[annotation] for annotation in patho_df['annotation']]
patho_df.sort_values(by='annotation',inplace=True)
patho_df.reset_index(inplace=True)
patho_df['label'] = 'patho'
# load pathogenic data into a df
nonpatho_df = pd.read_csv(nonpathogenic_ana,header=0,sep='\t')
nonpatho_df['annotation'] = [tick_dict[annotation] if annotation in tick_dict else annotation for annotation in nonpatho_df['annotation']]
nonpatho_df['annotation'] = [annotation_dict[annotation] for annotation in nonpatho_df['annotation']]
nonpatho_df.sort_values(by='annotation',inplace=True)
nonpatho_df.reset_index(inplace=True)
nonpatho_df['label'] = 'non_patho'
merged_df = pd.concat([patho_df,nonpatho_df])
merged_df.reset_index(inplace=True)
# create figure
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, gridspec_kw = {'width_ratios':[5, 1]}, figsize=(15,12))
# set barheight
barheight = 0.40
bar_padding_1 = 0.05
bar_padding_2 = 0.1
bars = np.arange(patho_df.shape[0])
patho_bars = [bar + barheight for bar in bars]
nonpatho_bars = [bar + barheight + bar_padding_1 for bar in patho_bars]
bar_ticks = [bar + barheight/2 + bar_padding_1/2 for bar in patho_bars]
f.subplots_adjust(hspace=0.025, wspace=0.05)
# ax1.grid(b=True,axis='both',which='major', color='lightgrey', linewidth=1.0)
ax1.barh(patho_bars, patho_df['l2fold'], height=barheight, color = '#913a1d', linewidth=1.5,edgecolor='#781e00')
ax1.barh(nonpatho_bars, nonpatho_df['l2fold'], height=barheight, color = '#1d9191', linewidth=1.5,edgecolor='#007878')
#sns.barplot(x="l2fold", y="annotation", data=df, color='#007878',ax=ax1)
ax1.set_xlim(-1.5,1.5)
ax1.set_ylim(0,13)
ax1.set_ylabel('')
ax1.set_xlabel('log2 FoldChange',fontsize=15)
ax1.spines['left'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.yaxis.set_ticks_position('none')
ax1.axvline(0,linewidth=1, color='black')
ax1.set_axisbelow(True)
#ax1.axhline(nonpatho_bars[0] + barheight/2 + 0.05, xmax = 0.8, linewidth=1,color='#b9bab7')
for bar in nonpatho_bars:
ax1.axhline(bar + barheight/2 + 0.05,linewidth=1,color='#b9bab7')
# read in file with compared counts
compare_df = pd.read_csv(compare_ana,header=0,sep='\t',comment='#')
compare_df['annotation'] = [tick_dict[annotation] if annotation in tick_dict else annotation for annotation in compare_df['annotation']]
compare_df['annotation'] = [annotation_dict[annotation] for annotation in compare_df['annotation']]
compare_df.sort_values(by='annotation',inplace=True)
compare_df.reset_index(inplace=True)
sizes = [min(200,abs(300*fold)) for fold in compare_df['observed']]
for idx,size in enumerate(sizes):
facecolors = 'black'
alpha = 0.5
if compare_df['qvalue'][idx] <= args.qvalue:
alpha = 1.0
ax2.scatter(x=0,y=bar_ticks[idx],marker='s',s=size,color='black',linewidths=2,alpha=alpha,facecolors=facecolors)
ax2.set_frame_on(False) #Remove both axes
ax2.get_yaxis().set_visible(False)
ax2.get_xaxis().set_visible(False)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
ax2.set_title('Overlap Difference',fontsize=15)
childrenLS=ax1.get_children()
barlist=list(filter(lambda x: isinstance(x, matplotlib.patches.Rectangle), childrenLS))
for idx, qvalue in enumerate(merged_df['qvalue']):
if qvalue > args.qvalue:
barlist[idx].set_color('#bdbebf')
patho_patch = mpatches.Patch(color='#913a1d', label='Pathogenic')
non_patho_patch = mpatches.Patch(color='#1d9191', label='Non-Pathogenic')
legend = ax2.legend(by_label.values(), by_label.keys(),loc='center left',bbox_to_anchor=(1,0.5),framealpha=0.05,labelspacing=1)
plt.setp(legend.get_title(),fontsize=15)
ax1.set_yticks(bar_ticks)
ax1.set_yticklabels(labels=cols)
ax1.tick_params(labelsize=15)
ax1.legend(handles=[patho_patch,non_patho_patch],loc='lower right',labelspacing=1,fontsize=15,bbox_to_anchor=(1,0.15),borderaxespad=0.7)
f.savefig(f'{args.output}.png',bbox_inches='tight')
if __name__=='__main__':
main()
| 2.59375 | 3 |
scripts/split_csv.py | n3011/deepchem | 14 | 12760177 | <reponame>n3011/deepchem
"""
Splits large CSVs into multiple shards.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import argparse
import gzip
import pandas as pd
def parse_args(input_args=None):
parser = argparse.ArgumentParser()
parser.add_argument(
"--csv-file", required=1,
help="Name of input CSV file.")
parser.add_argument(
"--shard-size", required=1, type=int,
help="Number of shards to split file into.")
parser.add_argument(
"--out", required=1,
help="Root name of output CSV shards.")
parser.add_argument(
"--gzip-output", action="store_true",
help="Gzip the output.")
return parser.parse_args(input_args)
def shard_csv(input_file, shard_size, out_name, gzip_output):
"""Shard the csv file into multiple shards."""
compression = "gzip" if gzip_output else None
file_obj = None
try:
if input_file.endswith(".gz"):
file_obj = gzip.open(input_file)
else:
file_obj = open(input_file)
for shard_num, df_shard in enumerate(
pd.read_csv(input_file, index_col=0, chunksize=shard_size)):
suffix = "czv.gz" if gzip_output else "csv"
output_name = "%s_%d.%s" % (out_name, shard_num, suffix)
print("Writing output to %s" % output_name)
df_shard.to_csv(output_name, compression=compression)
finally:
if file_obj is not None:
file_obj.close()
def main():
args = parse_args()
input_file = args.csv_file
shard_size = args.shard_size
out_name = args.out
gzip_output = args.gzip_output
shard_csv(input_file, shard_size, out_name, gzip_output)
if __name__ == '__main__':
main()
| 3.03125 | 3 |
src/systems/base_system.py | alextamkin/dabs | 63 | 12760178 | <reponame>alextamkin/dabs
import os
from abc import abstractmethod
import pytorch_lightning as pl
import torch
from omegaconf import DictConfig
from torch.utils.data import DataLoader, Dataset, IterableDataset
from src.datasets.catalog import DATASET_DICT
from src.models import transformer
def get_model(config: DictConfig, dataset_class: Dataset):
'''Retrieves the specified model class, given the dataset class.'''
if config.model.name == 'transformer':
model_class = transformer.DomainAgnosticTransformer
else:
raise ValueError(f'Encoder {config.model.name} doesn\'t exist.')
# Retrieve the dataset-specific params.
return model_class(
input_specs=dataset_class.spec(),
**config.model.kwargs,
)
class BaseSystem(pl.LightningModule):
def __init__(self, config: DictConfig):
'''An abstract class that implements some shared functionality for training.
Args:
config: a hydra config
'''
super().__init__()
self.config = config
self.dataset = DATASET_DICT[config.dataset.name]
self.model = get_model(config, self.dataset)
@abstractmethod
def objective(self, *args):
'''Computes the loss and accuracy.'''
pass
@abstractmethod
def forward(self, batch):
pass
@abstractmethod
def training_step(self, batch, batch_idx):
pass
@abstractmethod
def validation_step(self, batch, batch_idx):
pass
def setup(self, stage):
'''Called right after downloading data and before fitting model, initializes datasets with splits.'''
self.train_dataset = self.dataset(base_root=self.config.data_root, download=True, train=True)
self.val_dataset = self.dataset(base_root=self.config.data_root, download=True, train=False)
try:
print(f'{len(self.train_dataset)} train examples, {len(self.val_dataset)} val examples')
except TypeError:
print('Iterable/streaming dataset- undetermined length.')
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.config.dataset.batch_size,
num_workers=self.config.dataset.num_workers,
shuffle=not isinstance(self.train_dataset, IterableDataset),
drop_last=True,
pin_memory=True,
)
def val_dataloader(self):
if not self.val_dataset:
raise ValueError('Cannot get validation data for this dataset')
return DataLoader(
self.val_dataset,
batch_size=self.config.dataset.batch_size,
num_workers=self.config.dataset.num_workers,
shuffle=False,
drop_last=False,
pin_memory=True,
)
def configure_optimizers(self):
params = [p for p in self.parameters() if p.requires_grad]
if self.config.optim.name == 'adam':
optim = torch.optim.AdamW(params, lr=self.config.optim.lr, weight_decay=self.config.optim.weight_decay)
elif self.config.optim.name == 'sgd':
optim = torch.optim.SGD(
params,
lr=self.config.optim.lr,
weight_decay=self.config.optim.weight_decay,
momentum=self.config.optim.momentum,
)
else:
raise ValueError(f'{self.config.optim.name} optimizer unrecognized.')
return optim
def on_train_end(self):
model_path = os.path.join(self.trainer.checkpoint_callback.dirpath, 'model.ckpt')
torch.save(self, model_path)
print(f'Pretrained model saved to {model_path}')
| 2.640625 | 3 |
test/test_dephead.py | ulf1/nlptasks | 2 | 12760179 | import nlptasks as nt
import nlptasks.dephead
def test_01():
seqs_token = [[
"Der", "Helmut", "Kohl", "spe<PASSWORD>", "<PASSWORD>", "mit", "Kohl", "."]]
target = [
(46, 0), (46, 1), (47, 2), (47, 3), (47, 4), (47, 5), (49, 6), (47, 7),
(19, 0), (31, 1), (36, 2), (42, 3), (21, 4), (17, 5), (19, 6), (32, 7)]
maskseqs, seqlens = nt.dephead.factory("spacy-de")(seqs_token)
assert seqlens == [8]
for pair in maskseqs[0]:
assert pair in target
def test_02(): # check if calling pad_dephead causes an error
seqs_token = [[
"Der", "Helmut", "Kohl", "<PASSWORD>", "<PASSWORD>", "mit", "Kohl", "."]]
target = [
(46, 0), (46, 1), (47, 2), (47, 3), (47, 4), (47, 5), (49, 6), (47, 7),
(19, 0), (31, 1), (36, 2), (42, 3), (21, 4), (17, 5), (19, 6), (32, 7)]
maskseqs, seqlens = nt.dephead.factory("spacy-de")(
seqs_token, maxlen=6, padding='post', truncating='post')
assert seqlens == [8]
for pair in maskseqs[0]:
assert pair in target
for pair in maskseqs[0]:
assert pair[1] < 6
def test_03(): # preload model
seqs_token = [[
"Der", "Helmut", "Kohl", "speist", "Schweinshaxe", "mit", "Kohl", "."]]
target = [
(46, 0), (46, 1), (47, 2), (47, 3), (47, 4), (47, 5), (49, 6), (47, 7),
(19, 0), (31, 1), (36, 2), (42, 3), (21, 4), (17, 5), (19, 6), (32, 7)]
identifier = "spacy-de"
model = nt.dephead.get_model(identifier)
fn = nt.dephead.factory(identifier)
maskseqs, seqlens = fn(seqs_token, model=model)
assert seqlens == [8]
for pair in maskseqs[0]:
assert pair in target
def test_11():
seqs_token = [[
"Der", "Helmut", "Kohl", "spe<PASSWORD>", "Sch<PASSWORD>", "mit", "Kohl", "."]]
target = [
(64, 0), (66, 1), (64, 2), (62, 3), (66, 4), (69, 5), (67, 6), (66, 7),
(24, 0), (45, 1), (35, 2), (58, 3), (49, 4), (9, 5), (42, 6), (56, 7)]
maskseqs, seqlens = nt.dephead.factory("stanza-de")(seqs_token)
assert seqlens == [8]
for pair in maskseqs[0]:
assert pair in target
def test_12(): # check if calling pad_dephead causes an error
seqs_token = [[
"Der", "Helmut", "Kohl", "spe<PASSWORD>", "Sch<PASSWORD>", "mit", "Kohl", "."]]
target = [
(64, 0), (66, 1), (64, 2), (62, 3), (66, 4), (69, 5), (67, 6), (66, 7),
(24, 0), (45, 1), (35, 2), (58, 3), (49, 4), (9, 5), (42, 6), (56, 7)]
maskseqs, seqlens = nt.dephead.factory("stanza-de")(
seqs_token, maxlen=6, padding='post', truncating='post')
assert seqlens == [8]
for pair in maskseqs[0]:
assert pair in target
for pair in maskseqs[0]:
assert pair[1] < 6
def test_13(): # preload model
seqs_token = [[
"Der", "Hel<PASSWORD>", "<PASSWORD>", "spe<PASSWORD>", "<PASSWORD>", "mit", "<PASSWORD>", "."]]
target = [
(64, 0), (66, 1), (64, 2), (62, 3), (66, 4), (69, 5), (67, 6), (66, 7),
(24, 0), (45, 1), (35, 2), (58, 3), (49, 4), (9, 5), (42, 6), (56, 7)]
identifier = "stanza-de"
model = nt.dephead.get_model(identifier)
fn = nt.dephead.factory(identifier)
maskseqs, seqlens = fn(seqs_token, model=model)
assert seqlens == [8]
for pair in maskseqs[0]:
assert pair in target
| 2.53125 | 3 |
openslides/utils/tornado_webserver.py | DerPate/OpenSlides | 1 | 12760180 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import os
import posixpath
from urllib import unquote
from django.conf import settings
from django.core.handlers.wsgi import WSGIHandler as Django_WSGIHandler
from django.utils.translation import ugettext as _
from sockjs.tornado import SockJSRouter, SockJSConnection
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.options import parse_command_line
from tornado.web import (
Application,
FallbackHandler,
StaticFileHandler,
HTTPError
)
from tornado.wsgi import WSGIContainer
class DjangoStaticFileHandler(StaticFileHandler):
"""Handels static data by using the django finders."""
def initialize(self):
"""Overwrite some attributes."""
# NOTE: root is never actually used and default_filename is not
# supported (must always be None)
self.root = u''
self.default_filename = None
@classmethod
def get_absolute_path(cls, root, path):
from django.contrib.staticfiles import finders
normalized_path = posixpath.normpath(unquote(path)).lstrip('/')
absolute_path = finders.find(normalized_path)
return absolute_path
def validate_absolute_path(self, root, absolute_path):
# differences from base implementation:
# - we ignore self.root since our files do not necessarily have
# a shared root prefix
# - we do not handle self.default_filename (we do not use it and it
# does not make much sense here anyway)
if absolute_path is None or not os.path.exists(absolute_path):
raise HTTPError(404)
if not os.path.isfile(absolute_path):
raise HTTPError(403, 'The requested resource is not a file.')
return absolute_path
class ProjectorSocketHandler(SockJSConnection):
"""
Handels the websocket for the projector.
"""
waiters = set()
def on_open(self, info):
ProjectorSocketHandler.waiters.add(self)
def on_close(self):
ProjectorSocketHandler.waiters.remove(self)
@classmethod
def send_updates(cls, data):
for waiter in cls.waiters:
waiter.send(data)
def run_tornado(addr, port, reload=False):
# Don't try to read the command line args from openslides
parse_command_line(args=[])
# Print listening address and port to command line
if addr == '0.0.0.0':
url_string = _("the machine's local ip address")
else:
url_string = 'http://%s:%s' % (addr, port)
print _("Starting OpenSlides' tornado webserver listening to %(url_string)s") % {'url_string': url_string}
# Setup WSGIContainer
app = WSGIContainer(Django_WSGIHandler())
# Collect urls
projectpr_socket_js_router = SockJSRouter(ProjectorSocketHandler, '/projector/socket')
from openslides.core.chatbox import ChatboxSocketHandler
chatbox_socket_js_router = SockJSRouter(ChatboxSocketHandler, '/core/chatbox')
other_urls = [
(r"%s(.*)" % settings.STATIC_URL, DjangoStaticFileHandler),
(r'%s(.*)' % settings.MEDIA_URL, StaticFileHandler, {'path': settings.MEDIA_ROOT}),
('.*', FallbackHandler, dict(fallback=app))]
# Start the application
tornado_app = Application(projectpr_socket_js_router.urls + chatbox_socket_js_router.urls + other_urls, debug=reload)
server = HTTPServer(tornado_app)
server.listen(port=port, address=addr)
IOLoop.instance().start()
| 2.09375 | 2 |
release-version-bulk-rename.py | drivetoimprove/jira-scripts | 0 | 12760181 | <gh_stars>0
import click
from jira import JIRA
from contextManagerRequests import no_ssl_verification
@click.command()
@click.option('--source', prompt='Source Project', help='Origin of the data, jira project XXX')
@click.option('--prefix', prompt='Prefix', help='Prefix for the release names')
def rename(source, prefix):
with no_ssl_verification():
jira = JIRA('https://your.jira.server.com/jira')
versions = jira.project_versions(source)
for version in versions:
new_version_name = prefix + '_' + version.name
jira.rename_version(source, version.name, new_version_name)
click.echo('%s version renamed to %s.' % (version.name, new_version_name))
if __name__ == '__main__':
rename()
| 2.546875 | 3 |
hw/2018_fall/hw1/solution/hw1.py | samuelcheang0419/cme193 | 15 | 12760182 | import numpy as np
import scipy
import scipy.sparse
import matplotlib.pyplot as plt
# part 2 - read sparse matrix from csv file
def read_coo(fname):
Y = np.loadtxt(fname, delimiter=',')
rows = np.array(Y[:, 0], int)
cols = np.array(Y[:, 1], int)
V = Y[:, 2]
return scipy.sparse.coo_matrix((np.array(V), (rows, cols)))
A = read_coo("../sbm.csv")
# part 3 - create sparse + Rank-1 class
class sparse_rank1(object):
def __init__(self, S, alpha, u, v):
self.S = S
self.alpha = alpha
self.u = u
self.v = v
self.shape = S.shape
def dot(self, x):
return self.S.dot(x) + self.alpha*self.u*self.v.dot(x)
# part 4 - power method
# compute power method
# tol is a key-word argument for convergence tolerance
def power_method(A, tol=1e-8):
# rayleigh quotient
# returns v^T*Av
def rq(v, A):
return v.dot(A.dot(v))
n = A.shape[1]
# generate random vector with unit length
v = np.random.normal(0, 1, n)
v /= np.linalg.norm(v)
rqs = [] # keep track of rayleigh quotients as we progress
rqs.append(rq(v, A))
while True:
# v <- A*v
v = A.dot(v)
# normalize v
v /= np.linalg.norm(v)
rqs.append(rq(v, A))
# check if rayleigh quotient has converged
if np.abs(rqs[-1] - rqs[-2]) < tol:
break
# set eigenvalue
lam = rqs[-1]
return v, lam
# part 5 - spectral embedding
v, lam = power_method(A)
B = sparse_rank1(A, -lam, v, v)
v2, lam2 = power_method(B)
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
ax.scatter(v, v2)
ax.set_xlabel(r'$v_1$')
ax.set_ylabel(r'$v_2$')
ax.set_title("Spectral Embedding")
plt.savefig('sbm.png')
plt.show()
| 3.171875 | 3 |
wxPython/wxGlade-0.9.1/tests/casefiles/bug194.py | DarkShadow4/python | 0 | 12760183 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# generated by wxGlade
#
import wx
# begin wxGlade: dependencies
import gettext
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class Frame194(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: Frame194.__init__
kwds["style"] = kwds.get("style", 0)
wx.Frame.__init__(self, *args, **kwds)
self.SetSize((800, 600))
self.list_box_single = wx.ListBox(self, wx.ID_ANY, choices=[_("Listbox wxLB_SINGLE")])
self.list_box_multiple = wx.ListBox(self, wx.ID_ANY, choices=[_("Listbox wxLB_MULTIPLE")], style=wx.LB_MULTIPLE)
self.list_box_extended = wx.ListBox(self, wx.ID_ANY, choices=[_("Listbox wxLB_EXTENDED")], style=wx.LB_EXTENDED)
self.check_list_box_single = wx.CheckListBox(self, wx.ID_ANY, choices=[_("CheckListBox wxLB_SINGLE")], style=wx.LB_SINGLE)
self.check_list_box_multiple = wx.CheckListBox(self, wx.ID_ANY, choices=[_("CheckListBox wxLB_MULTIPLE")], style=wx.LB_MULTIPLE)
self.check_list_box_extended = wx.CheckListBox(self, wx.ID_ANY, choices=[_("CheckListBox wxLB_EXTENDED")], style=wx.LB_EXTENDED)
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: Frame194.__set_properties
self.SetTitle(_("frame_1"))
self.list_box_single.SetSelection(0)
self.list_box_multiple.SetSelection(0)
self.list_box_extended.SetSelection(0)
self.check_list_box_single.SetSelection(0)
self.check_list_box_multiple.SetSelection(0)
self.check_list_box_extended.SetSelection(0)
# end wxGlade
def __do_layout(self):
# begin wxGlade: Frame194.__do_layout
sizer_1 = wx.GridSizer(2, 3, 0, 0)
sizer_1.Add(self.list_box_single, 1, wx.ALL | wx.EXPAND, 5)
sizer_1.Add(self.list_box_multiple, 1, wx.ALL | wx.EXPAND, 5)
sizer_1.Add(self.list_box_extended, 1, wx.ALL | wx.EXPAND, 5)
sizer_1.Add(self.check_list_box_single, 1, wx.ALL | wx.EXPAND, 5)
sizer_1.Add(self.check_list_box_multiple, 1, wx.ALL | wx.EXPAND, 5)
sizer_1.Add(self.check_list_box_extended, 1, wx.ALL | wx.EXPAND, 5)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
# end of class Frame194
class MyApp(wx.App):
def OnInit(self):
self.Bug194_Frame = Frame194(None, wx.ID_ANY, "")
self.SetTopWindow(self.Bug194_Frame)
self.Bug194_Frame.Show()
return True
# end of class MyApp
if __name__ == "__main__":
gettext.install("app") # replace with the appropriate catalog name
app = MyApp(0)
app.MainLoop()
| 2.125 | 2 |
Py exercises/My code/CSS exercises/06_modify each item in a lol using pop and insert.py | arvindkarir/python-pandas-code | 0 | 12760184 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 29 10:35:53 2017
@author: User
"""
L = [[1,2], [35,4,78], [7,8,1,10],[2,3], [9,1,45]]
#def testA(L):
# aFactor = 2
# for m in L:
# for ind, element in enumerate(m):
# newElement = element*aFactor
# m.pop(ind) # this works
# m.insert(ind, newElement) # this works
# return L
#
#print(testA(L))
# and this works as well
def testA(L):
for m in L:
for ind, element in enumerate(m):
m[ind] *= 2
return L
print(testA(L)) | 3.53125 | 4 |
pointnet_model.py | BowenRaymone/KaggleLyftCompetition | 2 | 12760185 | # Thanks to KKiller on Kaggle for designing this model.
from torch.utils.data import Dataset, DataLoader
from abc import ABC
from pathlib import Path
from numcodecs import blosc
import pandas as pd, numpy as np
import bisect
import itertools as it
from tqdm import tqdm
import logzero
import json
import torch
from torch import nn, optim
import torch.nn.functional as F
from torch.autograd import Variable
from pytorch_lightning import Trainer
from pytorch_lightning import LightningModule
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
import pickle, copy, re, time, datetime, random, warnings, gc
import zarr
with open('parameters.json') as json_file:
JSON_PARAMETERS = json.load(json_file)
DATA_ROOT = Path("/data/lyft-motion-prediction-autonomous-vehicles")
TRAIN_ZARR = JSON_PARAMETERS["TRAIN_ZARR"]
VALID_ZARR = JSON_PARAMETERS["VALID_ZARR"]
HBACKWARD = JSON_PARAMETERS["HBACKWARD"]
HFORWARD = JSON_PARAMETERS["HFORWARD"]
NFRAMES = JSON_PARAMETERS["NFRAMES"]
FRAME_STRIDE = JSON_PARAMETERS["FRAME_STRIDE"]
AGENT_FEATURE_DIM = JSON_PARAMETERS["AGENT_FEATURE_DIM"]
MAX_AGENTS = JSON_PARAMETERS["MAX_AGENTS"]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
NUM_WORKERS = JSON_PARAMETERS["NUM_WORKERS"]
BATCH_SIZE = JSON_PARAMETERS["BATCH_SIZE"]
EPOCHS=JSON_PARAMETERS["EPOCHS"]
GRADIENT_CLIP_VAL = JSON_PARAMETERS["GRADIENT_CLIP_VAL"]
LIMIT_VAL_BATCHES = JSON_PARAMETERS["LIMIT_VAL_BATCHES"]
ROOT = JSON_PARAMETERS["ROOT"]
Path(ROOT).mkdir(exist_ok=True, parents=True)
def get_utc():
TIME_FORMAT = r"%Y-%m-%dT%H:%M:%S%Z"
return datetime.datetime.now(datetime.timezone.utc).strftime(TIME_FORMAT)
PERCEPTION_LABELS = JSON_PARAMETERS["PERCEPTION_LABELS"]
KEPT_PERCEPTION_LABELS = JSON_PARAMETERS["KEPT_PERCEPTION_LABELS"]
KEPT_PERCEPTION_LABELS_DICT = {label:PERCEPTION_LABELS.index(label) for label in KEPT_PERCEPTION_LABELS}
KEPT_PERCEPTION_KEYS = sorted(KEPT_PERCEPTION_LABELS_DICT.values())
class LabelEncoder:
def __init__(self, max_size=500, default_val=-1):
self.max_size = max_size
self.labels = {}
self.default_val = default_val
@property
def nlabels(self):
return len(self.labels)
def reset(self):
self.labels = {}
def partial_fit(self, keys):
nlabels = self.nlabels
available = self.max_size - nlabels
if available < 1:
return
keys = set(keys)
new_keys = list(keys - set(self.labels))
if not len(new_keys):
return
self.labels.update(dict(zip(new_keys, range(nlabels, nlabels + available) )))
def fit(self, keys):
self.reset()
self.partial_fit(keys)
def get(self, key):
return self.labels.get(key, self.default_val)
def transform(self, keys):
return np.array(list(map(self.get, keys)))
def fit_transform(self, keys, partial=True):
self.partial_fit(keys) if partial else self.fit(keys)
return self.transform(keys)
class CustomLyftDataset(Dataset):
feature_mins = np.array([-17.336, -27.137, 0. , 0., 0. , -3.142, -37.833, -65.583],
dtype="float32")[None,None, None]
feature_maxs = np.array([17.114, 20.787, 42.854, 42.138, 7.079, 3.142, 29.802, 35.722],
dtype="float32")[None,None, None]
def __init__(self, zdataset, scenes=None, nframes=10, frame_stride=15, hbackward=10,
hforward=50, max_agents=150, agent_feature_dim=8):
"""
Custom Lyft dataset reader.
Parmeters:
----------
zdataset: zarr dataset
The root dataset, containing scenes, frames and agents
nframes: int
Number of frames per scene
frame_stride: int
The stride when reading the **nframes** frames from a scene
hbackward: int
Number of backward frames from current frame
hforward: int
Number forward frames from current frame
max_agents: int
Max number of agents to read for each target frame. Note that,
this also include the backward agents but not the forward ones.
"""
super().__init__()
self.zdataset = zdataset
self.scenes = scenes if scenes is not None else []
self.nframes = nframes
self.frame_stride = frame_stride
self.hbackward = hbackward
self.hforward = hforward
self.max_agents = max_agents
self.nread_frames = (nframes-1)*frame_stride + hbackward + hforward
self.frame_fields = ['timestamp', 'agent_index_interval']
self.agent_feature_dim = agent_feature_dim
self.filter_scenes()
def __len__(self):
return len(self.scenes)
def filter_scenes(self):
self.scenes = [scene for scene in self.scenes if self.get_nframes(scene) > self.nread_frames]
def __getitem__(self, index):
return self.read_frames(scene=self.scenes[index])
def get_nframes(self, scene, start=None):
frame_start = scene["frame_index_interval"][0]
frame_end = scene["frame_index_interval"][1]
nframes = (frame_end - frame_start) if start is None else ( frame_end - max(frame_start, start) )
return nframes
def _read_frames(self, scene, start=None):
nframes = self.get_nframes(scene, start=start)
assert nframes >= self.nread_frames
frame_start = scene["frame_index_interval"][0]
start = start or frame_start + np.random.choice(nframes-self.nread_frames)
frames = self.zdataset.frames.get_basic_selection(
selection=slice(start, start+self.nread_frames),
fields=self.frame_fields,
)
return frames
def parse_frame(self, frame):
return frame
def parse_agent(self, agent):
return agent
def read_frames(self, scene, start=None, white_tracks=None, encoder=False):
white_tracks = white_tracks or []
frames = self._read_frames(scene=scene, start=start)
agent_start = frames[0]["agent_index_interval"][0]
agent_end = frames[-1]["agent_index_interval"][1]
agents = self.zdataset.agents[agent_start:agent_end]
X = np.zeros((self.nframes, self.max_agents, self.hbackward, self.agent_feature_dim), dtype=np.float32)
target = np.zeros((self.nframes, self.max_agents, self.hforward, 2), dtype=np.float32)
target_availability = np.zeros((self.nframes, self.max_agents, self.hforward), dtype=np.uint8)
X_availability = np.zeros((self.nframes, self.max_agents, self.hbackward), dtype=np.uint8)
for f in range(self.nframes):
backward_frame_start = f*self.frame_stride
forward_frame_start = f*self.frame_stride+self.hbackward
backward_frames = frames[backward_frame_start:backward_frame_start+self.hbackward]
forward_frames = frames[forward_frame_start:forward_frame_start+self.hforward]
backward_agent_start = backward_frames[-1]["agent_index_interval"][0] - agent_start
backward_agent_end = backward_frames[-1]["agent_index_interval"][1] - agent_start
backward_agents = agents[backward_agent_start:backward_agent_end]
le = LabelEncoder(max_size=self.max_agents)
le.fit(white_tracks)
le.partial_fit(backward_agents["track_id"])
for iframe, frame in enumerate(backward_frames):
backward_agent_start = frame["agent_index_interval"][0] - agent_start
backward_agent_end = frame["agent_index_interval"][1] - agent_start
backward_agents = agents[backward_agent_start:backward_agent_end]
track_ids = le.transform(backward_agents["track_id"])
mask = (track_ids != le.default_val)
mask_agents = backward_agents[mask]
mask_ids = track_ids[mask]
X[f, mask_ids, iframe, :2] = mask_agents["centroid"]
X[f, mask_ids, iframe, 2:5] = mask_agents["extent"]
X[f, mask_ids, iframe, 5] = mask_agents["yaw"]
X[f, mask_ids, iframe, 6:8] = mask_agents["velocity"]
X_availability[f, mask_ids, iframe] = 1
for iframe, frame in enumerate(forward_frames):
forward_agent_start = frame["agent_index_interval"][0] - agent_start
forward_agent_end = frame["agent_index_interval"][1] - agent_start
forward_agents = agents[forward_agent_start:forward_agent_end]
track_ids = le.transform(forward_agents["track_id"])
mask = track_ids != le.default_val
target[f, track_ids[mask], iframe] = forward_agents[mask]["centroid"]
target_availability[f, track_ids[mask], iframe] = 1
target -= X[:,:,[-1], :2]
target *= target_availability[:,:,:,None]
X[:,:,:, :2] -= X[:,:,[-1], :2]
X *= X_availability[:,:,:,None]
X -= self.feature_mins
X /= (self.feature_maxs - self.feature_mins)
if encoder:
return X, target, target_availability, le
return X, target, target_availability
def collate(x):
x = map(np.concatenate, zip(*x))
x = map(torch.from_numpy, x)
return x
def shapefy( xy_pred, xy, xy_av):
NDIM = 3
xy_pred = xy_pred.view(-1, HFORWARD, NDIM, 2)
xy = xy.view(-1, HFORWARD, 2)[:,:,None]
xy_av = xy_av.view(-1, HFORWARD)[:,:,None]
return xy_pred, xy, xy_av
def LyftLoss(c, xy_pred, xy, xy_av):
c = c.view(-1, c.shape[-1])
xy_pred, xy, xy_av = shapefy(xy_pred, xy, xy_av)
c = torch.softmax(c, dim=1)
l = torch.sum(torch.mean(torch.square(xy_pred-xy), dim=3)*xy_av, dim=1)
# The LogSumExp trick for better numerical stability
# https://en.wikipedia.org/wiki/LogSumExp
m = l.min(dim=1).values
l = torch.exp(m[:, None]-l)
l = m - torch.log(torch.sum(l*c, dim=1))
denom = xy_av.max(2).values.max(1).values
l = torch.sum(l*denom)/denom.sum()
return 3*l # I found that my loss is usually 3 times smaller than the LB score
def MSE(xy_pred, xy, xy_av):
xy_pred, xy, xy_av = shapefy(xy_pred, xy, xy_av)
return 9*torch.mean(torch.sum(torch.mean(torch.square(xy_pred-xy), 3)*xy_av, dim=1))
def MAE(xy_pred, xy, xy_av):
xy_pred, xy, xy_av = shapefy(xy_pred, xy, xy_av)
return 9*torch.mean(torch.sum(torch.mean(torch.abs(xy_pred-xy), 3)*xy_av, dim=1))
class BaseNet(LightningModule):
def __init__(self, batch_size=32, lr=5e-4, weight_decay=1e-8, num_workers=0,
criterion=LyftLoss, data_root=DATA_ROOT, epochs=1):
super().__init__()
self.save_hyperparameters(
dict(
HBACKWARD = HBACKWARD,
HFORWARD = HFORWARD,
NFRAMES = NFRAMES,
FRAME_STRIDE = FRAME_STRIDE,
AGENT_FEATURE_DIM = AGENT_FEATURE_DIM,
MAX_AGENTS = MAX_AGENTS,
TRAIN_ZARR = TRAIN_ZARR,
VALID_ZARR = VALID_ZARR,
batch_size = batch_size,
lr=lr,
weight_decay=weight_decay,
num_workers=num_workers,
criterion=criterion,
epochs=epochs,
)
)
self._train_data = None
self._collate_fn = None
self._train_loader = None
self.batch_size = batch_size
self.num_workers = num_workers
self.lr = lr
self.epochs=epochs
self.weight_decay = weight_decay
self.criterion = criterion
self.data_root = data_root
def train_dataloader(self):
z = zarr.open(self.data_root.joinpath(TRAIN_ZARR).as_posix(), "r")
scenes = z.scenes.get_basic_selection(slice(None), fields= ["frame_index_interval"])
train_data = CustomLyftDataset(
z,
scenes = scenes,
nframes=NFRAMES,
frame_stride=FRAME_STRIDE,
hbackward=HBACKWARD,
hforward=HFORWARD,
max_agents=MAX_AGENTS,
agent_feature_dim=AGENT_FEATURE_DIM,
)
train_loader = DataLoader(train_data, batch_size = self.batch_size,collate_fn=collate,
pin_memory=True, num_workers = self.num_workers, shuffle=True)
self._train_data = train_data
self._train_loader = train_loader
return train_loader
def val_dataloader(self):
z = zarr.open(self.data_root.joinpath(VALID_ZARR).as_posix(), "r")
scenes = z.scenes.get_basic_selection(slice(None), fields=["frame_index_interval"])
val_data = CustomLyftDataset(
z,
scenes = scenes,
nframes=NFRAMES,
frame_stride=FRAME_STRIDE,
hbackward=HBACKWARD,
hforward=HFORWARD,
max_agents=MAX_AGENTS,
agent_feature_dim=AGENT_FEATURE_DIM,
)
val_loader = DataLoader(val_data, batch_size = self.batch_size, collate_fn=collate,
pin_memory=True, num_workers = self.num_workers, shuffle=True)
self._val_data = val_data
self._val_loader = val_loader
return val_loader
def validation_epoch_end(self, outputs):
avg_loss = torch.mean(torch.tensor([x['val_loss'] for x in outputs]))
avg_mse = torch.mean(torch.tensor([x['val_mse'] for x in outputs]))
avg_mae = torch.mean(torch.tensor([x['val_mae'] for x in outputs]))
tensorboard_logs = {'val_loss': avg_loss, "val_rmse": torch.sqrt(avg_mse), "val_mae": avg_mae}
torch.cuda.empty_cache()
gc.collect()
return {
'val_loss': avg_loss,
'log': tensorboard_logs,
"progress_bar": {"val_ll": tensorboard_logs["val_loss"], "val_rmse": tensorboard_logs["val_rmse"]}
}
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr= self.lr, betas= (0.9,0.999),
weight_decay= self.weight_decay, amsgrad=False)
scheduler = optim.lr_scheduler.CosineAnnealingLR(
optimizer,
T_max=self.epochs,
eta_min=1e-5,
)
return [optimizer], [scheduler]
class STNkd(nn.Module):
def __init__(self, k=64):
super(STNkd, self).__init__()
self.conv = nn.Sequential(
nn.Conv1d(k, 256, kernel_size=1), nn.ReLU(),
nn.Conv1d(256, 256, kernel_size=1), nn.ReLU(),
nn.Conv1d(256, 512, kernel_size=1), nn.ReLU(),
)
self.fc = nn.Sequential(
nn.Linear(512, k*k),nn.ReLU(),
)
self.k = k
def forward(self, x):
batchsize = x.size()[0]
x = self.conv(x)
x = torch.max(x, 2)[0]
x = self.fc(x)
iden = Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(1,
self.k*self.k).repeat(batchsize,1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, self.k, self.k)
return x
class PointNetfeat(nn.Module):
def __init__(self, global_feat = False, feature_transform = False, stn1_dim = 120,
stn2_dim = 64):
super(PointNetfeat, self).__init__()
self.global_feat = global_feat
self.feature_transform = feature_transform
self.stn1_dim = stn1_dim
self.stn2_dim = stn2_dim
self.stn = STNkd(k=stn1_dim)
self.conv1 = nn.Sequential(
nn.Conv1d(stn1_dim, 256, kernel_size=1), nn.ReLU(),
)
self.conv2 = nn.Sequential(
nn.Conv1d(256, 256, kernel_size=1), nn.ReLU(),
nn.Conv1d(256, 1024, kernel_size=1), nn.ReLU(),
nn.Conv1d(1024, 2048, kernel_size=1), nn.ReLU(),
)
if self.feature_transform:
self.fstn = STNkd(k=stn2_dim)
def forward(self, x):
n_pts = x.size()[2]
trans = self.stn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans)
x = x.transpose(2, 1)
x = self.conv1(x)
if self.feature_transform:
trans_feat = self.fstn(x)
x = x.transpose(2,1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2,1)
else:
trans_feat = None
pointfeat = x
x = self.conv2(x)
x = torch.max(x, 2)[0]
if self.global_feat:
return x, trans, trans_feat
else:
x = x[:,:,None].repeat(1, 1, n_pts)
return torch.cat([x, pointfeat], 1), trans, trans_feat
class LyftNet(BaseNet):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pnet = PointNetfeat()
self.fc0 = nn.Sequential(
nn.Linear(2048+256, 1024), nn.ReLU(),
)
self.fc = nn.Sequential(
nn.Linear(1024, 300),
)
self.c_net = nn.Sequential(
nn.Linear(1024, 3),
)
def forward(self, x):
bsize, npoints, hb, nf = x.shape
# Push points to the last dim
x = x.transpose(1, 3)
# Merge time with features
x = x.reshape(bsize, hb*nf, npoints)
x, trans, trans_feat = self.pnet(x)
# Push featuresxtime to the last dim
x = x.transpose(1,2)
x = self.fc0(x)
c = self.c_net(x)
x = self.fc(x)
return c,x
def training_step(self, batch, batch_idx):
x, y, y_av = [b.to(device) for b in batch]
c, preds = self(x)
loss = self.criterion(c,preds,y, y_av)
with torch.no_grad():
logs = {
'loss': loss,
"mse": MSE(preds, y, y_av),
"mae": MAE(preds, y, y_av),
}
return {'loss': loss, 'log': logs, "progress_bar": {"rmse":torch.sqrt(logs["mse"]) }}
@torch.no_grad()
def validation_step(self, batch, batch_idx):
x, y, y_av = [b.to(device) for b in batch]
c,preds = self(x)
loss = self.criterion(c, preds, y, y_av)
val_logs = {
'val_loss': loss,
"val_mse": MSE(preds, y, y_av),
"val_mae": MAE(preds, y, y_av),
}
return val_logs
def get_last_checkpoint(root):
res = None
mtime = -1
for model_name in Path(root).glob("lyfnet*.ckpt"):
e = model_name.stat().st_ctime
if e > mtime:
mtime=e
res = model_name
return res
def get_last_version(root):
last_version = 0
for model_name in Path(root).glob("version_*"):
version = int(model_name.as_posix().split("_")[-1])
if version > last_version:
last_version = version
return last_version
| 2.09375 | 2 |
gitmostwanted/web.py | JS555/YayanRuhianResearch_ | 0 | 12760186 | <reponame>JS555/YayanRuhianResearch_
from gitmostwanted.app import app
from gitmostwanted.lib.filter import number_humanize
from gitmostwanted.blueprints import\
repo_rating, repo_trending, static_content, user_attitude, user_oauth, user_profile
app.register_blueprint(static_content.static_content)
app.register_blueprint(user_oauth.user_oauth)
app.register_blueprint(repo_trending.repo_trending)
app.register_blueprint(repo_rating.repo_rating)
app.register_blueprint(user_attitude.user_attitude)
app.register_blueprint(user_profile.user_profile)
app.jinja_env.filters['number_humanize'] = number_humanize
app.jinja_env.add_extension('jinja2.ext.do')
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 1.4375 | 1 |
bkash/_constants.py | aniruddha-adhikary/bkash-api-client-python | 19 | 12760187 | <reponame>aniruddha-adhikary/bkash-api-client-python<filename>bkash/_constants.py
API_VERSION = '0.40.0'
SANDBOX_BASE = 'https://{service}.sandbox.bka.sh/v{version}/{path}'
PRODUCTION_BASE = 'https://{service}.pay.bka.sh/v{version}/{path}'
| 1.09375 | 1 |
src/hal/macros/goto_field.py | sdss/HAL | 0 | 12760188 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: <NAME> (<EMAIL>)
# @Date: 2021-10-10
# @Filename: goto_field.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
from __future__ import annotations
import asyncio
from time import time
from hal import config
from hal.exceptions import MacroError
from hal.macros import Macro
__all__ = ["GotoFieldMacro"]
class GotoFieldMacro(Macro):
"""Go to field macro."""
name = "goto_field"
__STAGES__ = [
"prepare",
("slew", "reconfigure"),
"boss_hartmann",
"fvc",
"boss_arcs",
"boss_flat",
"acquire",
"guide",
]
__CLEANUP__ = ["cleanup"]
async def prepare(self):
"""Check configuration and run pre-slew checks."""
all_stages = list(self.stage_status.keys())
if "reconfigure" in all_stages:
configuration_loaded = self.actor.models["jaeger"]["configuration_loaded"]
last_seen = configuration_loaded.last_seen
if last_seen is None:
self.command.warning("The age of the loaded configuration is unknown.")
elif time() - last_seen > 3600: # One hour
raise MacroError("Configuration is too old. Load a new configuration.")
# Start closing the FFS if they are open but do not block.
await self._close_ffs(wait=False)
# If lamps are needed, turn them on now but do not wait for them to warm up.
if "boss_hartmann" in self.stages or "boss_arcs" in self.stages:
await self.helpers.lamps.turn_lamp(
self.command,
["HgCd", "Ne"],
True,
turn_off_others=True,
)
elif "boss_flat" in self.stages:
await self.helpers.lamps.turn_lamp(
self.command,
["ff"],
True,
turn_off_others=True,
)
async def _close_ffs(self, wait: bool = True):
"""Closes the FFS."""
if not self.helpers.ffs.all_closed():
self.command.info("Closing FFS")
task = self.helpers.ffs.close(self.command)
if wait:
await task
else:
asyncio.create_task(task)
async def slew(self):
"""Slew to field."""
configuration_loaded = self.actor.models["jaeger"]["configuration_loaded"]
ra, dec, pa = configuration_loaded[3:6]
if any([ra is None, dec is None, pa is None]):
raise MacroError("Unknown RA/Dec/PA coordinates for field.")
await self.helpers.tcc.goto_position(
self.command,
where={"ra": ra, "dec": dec, "rot": pa},
)
async def reconfigure(self):
"""Reconfigures the FPS."""
self.command.info("Reconfiguring FPS array.")
if self.actor.models["jaeger"]["folded"][0] is not True:
self.command.warning("FPS is not folded. Unwinding.")
await self.send_command("jaeger", "explode 5")
await self.send_command("jaeger", "unwind")
await self.send_command("jaeger", "configuration execute")
async def boss_hartmann(self):
"""Takes the hartmann sequence."""
self.command.info("Running hartmann collimate.")
# First check that the FFS are closed and lamps on. We don't care for how long.
await self._close_ffs()
lamp_status = self.helpers.lamps.list_status()
if lamp_status["Ne"][0] is not True or lamp_status["HgCd"][0] is not True:
await self.helpers.lamps.turn_lamp(
self.command,
["HgCd", "Ne"],
True,
turn_off_others=True,
wait_for_warmup=False,
)
# Run hartmann and adjust the collimator but ignore residuals.
await self.send_command(
"hartmann",
"collimate ignoreResiduals",
time_limit=config["timeouts"]["hartmann"],
)
# Now check if there are residuals that require modifying the blue ring.
sp1Residuals = self.actor.models["hartmann"]["sp1Residuals"][2]
if sp1Residuals != "OK":
raise MacroError(
"Please adjust the blue ring and run goto-field again. "
"The collimator has been adjusted."
)
async def boss_arcs(self):
"""Takes BOSS arcs."""
self.command.info("Taking BOSS arc.")
await self._close_ffs()
# This won't wait if the lamps are already on and warmed up.
self.command.debug("Waiting for lamps to warm up.")
await self.helpers.lamps.turn_lamp(
self.command,
["HgCd", "Ne"],
True,
turn_off_others=True,
wait_for_warmup=True,
)
arc_time = self.config["arc_time"]
await self.helpers.boss.expose(
self.command,
arc_time,
exp_type="arc",
readout=True,
read_async=True,
)
async def boss_flat(self):
"""Takes the BOSS flat."""
self.command.info("Taking BOSS flat.")
await self._close_ffs()
pretasks = [
self.helpers.lamps.turn_lamp(
self.command,
["ff"],
True,
turn_off_others=True,
wait_for_warmup=True,
)
]
if self.helpers.boss.readout_pending: # Readout from the arc.
pretasks.append(self.helpers.boss.readout(self.command))
self.command.debug("Preparing lamps and reading pending exposures.")
await asyncio.gather(*pretasks)
# Now take the flat. Do not read it yet.
flat_time = self.config["flat_time"]
await self.helpers.boss.expose(
self.command,
flat_time,
exp_type="flat",
readout=True,
read_async=True,
)
# We are done with lamps at this point.
await self.helpers.lamps.all_off(self.command, wait=False)
async def fvc(self):
"""Run the FVC loop."""
self.command.info("Halting the axes.")
await self.helpers.tcc.axis_stop(self.command, axis="rot")
self.command.info("Running FVC loop.")
fvc_command = await self.send_command(
"jaeger",
"fvc loop",
time_limit=config["timeouts"]["fvc"],
raise_on_fail=False,
)
# fvc loop should never fail unless an uncaught exception.
if fvc_command.status.did_fail:
raise MacroError("FVC loop failed.")
# Check RMS to determine whether to continue or not.
fvc_rms = self.actor.models["jaeger"]["fvc_rms"][0]
if fvc_rms > self.config["fvc_rms_threshold"]:
raise MacroError(f"FVC loop failed. RMS={fvc_rms}.")
self.command.info("Re-slewing to field.")
await self.slew()
async def _set_guider_offset(self):
"""Sets the guider offset."""
offset = self.config["guider_offset"]
if offset is not None:
offset = " ".join(map(str, offset))
self.command.info(f"Setting guide offset to {offset}.")
await self.send_command("cherno", f"offset {offset}")
async def acquire(self):
"""Acquires the field."""
if not self.helpers.tcc.check_axes_status("Tracking"):
raise MacroError("Axes must be tracking for acquisition.")
if not self.helpers.ffs.all_open():
self.command.info("Opening FFS")
await self.helpers.ffs.open(self.command)
await self._set_guider_offset()
guider_time = self.config["guider_time"]
self.command.info("Acquiring field.")
await self.send_command(
"cherno",
f"acquire -t {guider_time} --full",
time_limit=guider_time + 60.0,
)
async def guide(self):
"""Starts the guide loop."""
if not self.helpers.tcc.check_axes_status("Tracking"):
raise MacroError("Axes must be tracking for guiding.")
if not self.helpers.ffs.all_open():
self.command.info("Opening FFS")
await self.helpers.ffs.open(self.command)
if "acquire" not in self.stage_status:
await self._set_guider_offset()
guider_time = self.config["guider_time"]
self.command.info("Starting guide loop.")
asyncio.create_task(self.send_command("cherno", f"acquire -c -t {guider_time}"))
async def cleanup(self):
"""Turns off all lamps."""
await self.helpers.lamps.all_off(self.command, wait=False, force=True)
# Read any pending BOSS exposure.
if self.helpers.boss.readout_pending:
await self.helpers.boss.readout(self.command)
| 1.90625 | 2 |
holoviews/plotting/bokeh/tabular.py | stonebig/holoviews | 0 | 12760189 | <gh_stars>0
from bokeh.models.widgets import DataTable, TableColumn
import param
import numpy as np
from ...core import Dataset
from ...element import ItemTable
from ..plot import GenericElementPlot
from .plot import BokehPlot
class TablePlot(BokehPlot, GenericElementPlot):
height = param.Number(default=None)
width = param.Number(default=400)
style_opts = ['row_headers', 'selectable', 'editable',
'sortable', 'fit_columns', 'width', 'height']
finalize_hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing a column.
The hook is passed the plot object and the displayed
object, and other plotting handles can be accessed via plot.handles.""")
_update_handles = ['source', 'glyph']
def __init__(self, element, plot=None, **params):
super(TablePlot, self).__init__(element, **params)
self.handles = {} if plot is None else self.handles['plot']
element_ids = self.hmap.traverse(lambda x: id(x), [Dataset, ItemTable])
self.static = len(set(element_ids)) == 1 and len(self.keys) == len(self.hmap)
self.callbacks = [] # Callback support on tables not implemented
def _execute_hooks(self, element):
"""
Executes finalize hooks
"""
for hook in self.finalize_hooks:
try:
hook(self, element)
except Exception as e:
self.warning("Plotting hook %r could not be applied:\n\n %s" % (hook, e))
def get_data(self, element, ranges=None, empty=False):
dims = element.dimensions()
data = {d: np.array([]) if empty else element.dimension_values(d)
for d in dims}
mapping = {d.name: d.name for d in dims}
data = {d.name: values if values.dtype.kind in "if" else list(map(d.pprint_value, values))
for d, values in data.items()}
return data, mapping
def initialize_plot(self, ranges=None, plot=None, plots=None, source=None):
"""
Initializes a new plot object with the last available frame.
"""
# Get element key and ranges for frame
element = self.hmap.last
key = self.keys[-1]
self.current_frame = element
self.current_key = key
data, _ = self.get_data(element, ranges)
if source is None:
source = self._init_datasource(data)
self.handles['source'] = source
dims = element.dimensions()
columns = [TableColumn(field=d.name, title=d.pprint_label) for d in dims]
properties = self.lookup_options(element, 'style')[self.cyclic_index]
table = DataTable(source=source, columns=columns, height=self.height,
width=self.width, **properties)
self.handles['plot'] = table
self.handles['glyph_renderer'] = table
self._execute_hooks(element)
self.drawn = True
return table
@property
def current_handles(self):
"""
Returns a list of the plot objects to update.
"""
handles = []
if self.static and not self.dynamic:
return handles
element = self.current_frame
previous_id = self.handles.get('previous_id', None)
current_id = None if self.current_frame is None else element._plot_id
for handle in self._update_handles:
if (handle == 'source' and self.dynamic and current_id == previous_id):
continue
if handle in self.handles:
handles.append(self.handles[handle])
# Cache frame object id to skip updating if unchanged
if self.dynamic:
self.handles['previous_id'] = current_id
return handles
def update_frame(self, key, ranges=None, plot=None):
"""
Updates an existing plot with data corresponding
to the key.
"""
element = self._get_frame(key)
source = self.handles['source']
data, _ = self.get_data(element, ranges)
self._update_datasource(source, data)
| 2.46875 | 2 |
nadine-2.2.3/interlink/migrations/0006_auto_20191230_1550.py | alvienzo720/Dep_Nadine | 0 | 12760190 | <reponame>alvienzo720/Dep_Nadine
# Generated by Django 2.2.9 on 2019-12-30 23:50
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('interlink', '0005_mailinglist_enabled'),
]
operations = [
migrations.AlterField(
model_name='mailinglist',
name='moderators',
field=models.ManyToManyField(blank=True, limit_choices_to={'is_staff': True}, related_name='interlink_moderated', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='mailinglist',
name='subscribers',
field=models.ManyToManyField(blank=True, related_name='interlink_subscriptions', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='mailinglist',
name='unsubscribed',
field=models.ManyToManyField(blank=True, related_name='interlink_unsubscribed', to=settings.AUTH_USER_MODEL),
),
]
| 1.578125 | 2 |
test/test_ui.py | OpenMindInnovation/timeflux_ui | 7 | 12760191 | """Tests for ui.py"""
import pytest
def test_none():
return True
| 1.429688 | 1 |
test/test_wave_utils.py | nowindxdw/0AI_ears | 0 | 12760192 | <reponame>nowindxdw/0AI_ears
from __future__ import absolute_import
from __future__ import print_function
import pytest
import os
#xears_test
from xears.data_utils import wave_utils
def test_wave_test():
#A_path = os.path.dirname(__file__)+os.path.sep+'xears'+os.path.sep+'mp3source'+os.path.sep+'model.wav'
A_path = os.path.dirname(__file__)+os.path.sep+'xears'+os.path.sep+'data_source'+os.path.sep+'test30.wav'
wave_data,time = wave_utils.readWav(A_path)
#print(wave_data)
#wave_data = wave_utils.preprocess_wave(wave_data)
#wave_data = wave_utils.deprocess_wave(wave_data)
print(wave_data.shape)
print(wave_data)
#wave_utils.drawWave(wave_data,time)
#noise_wave_data = wave_utils.gen_noise_wave(wave_data)
#wave_utils.drawWave(noise_wave_data,time)
if __name__ == '__main__':
pytest.main([__file__])
raw_input('Press Enter to exit...') | 1.992188 | 2 |
lib/TransferModules/GridFTPTransferCertificate.py | cedadev/mistamover | 0 | 12760193 | <filename>lib/TransferModules/GridFTPTransferCertificate.py
# BSD Licence
# Copyright (c) 2012, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
from TransferBase import TransferBase
from TransferUtils import *
import os
import tempfile
import sys
this_dir = os.path.dirname(__file__)
top_dir = os.path.abspath(os.path.dirname(this_dir + "../"))
lib_dir = os.path.join(top_dir, "lib")
sys.path.append(lib_dir)
from Response import Response, ResponseCode
from ControlFile import ControlFile
from ReceiptFile import ReceiptFile
from ThankyouFile import ThankyouFile
class GridFTPTransferCertificate(TransferBase):
"""
GridFTP transfer type
"""
def __init__(self, config):
self.config = config
self.cmd = self.config.get("gridftp_certificate.cmd");
self.setConfig(config)
self.short_name = "gridftp"
self.initLogger(self.short_name)
self.thankyou_file_path = None
self.rcpt_file_path = None
self.ctl_file_path = None
# this is called by TransferModule
def setupPushCmd(self):
'''
called by TransferBase in order to create a push command
'''
if self.ctl_file_path != None:
try:
os.remove(self.ctl_file_path)
except:
pass
if self.rcpt_file_path != None:
try:
os.remove(self.rcpt_file_path)
except:
pass
f = self.getFile()
if not self.checkFileExists(self.config.get("data_stream.directory") + "/" + f):
return None
# set up a control file
if self.config.get("outgoing.target_uses_arrival_monitor"):
# set up a control file
item_name = self.getFile()
item_path = (os.path.join(self.config.get("data_stream.directory"),
os.path.basename(item_name)))
ctl_file_name = (".%s.%s" % (item_name, self.config.get(
"outgoing.control_file_extension")))
ctl_file_path = (os.path.join(self.config.get(
"data_stream.directory"), os.path.basename(ctl_file_name)))
self.ctl_file_path = ctl_file_path
item_size = os.path.getsize(item_path)
item_cksum = TransferUtils.calcChecksum(item_path)
ts = "%.2f" % time.time()
rcpt_file_name = (".%s.%s.%s" % (item_name, ts, self.config.get(
"outgoing.receipt_file_extension")))
ctl_file = ControlFile(ctl_file_path, can_overwrite=True)
ctl_file.create(item_name, item_size, item_cksum, rcpt_file_name)
self.rcpt_file_name = rcpt_file_name
self.rcpt_file_path = (TransferUtils.getPathInDir(rcpt_file_name,
self.config.get("data_stream.directory")))
f = self.getFile()
gftp = self.cmd
if self.config.get("outgoing.target_uses_arrival_monitor") == False:
gftp += " -sync -sync-level 3"
pushcmd = (gftp + " " + self.config.get("data_stream.directory") + "/"
+ f + " gsiftp://" + self.config.get("outgoing.target_host") + ":"
+ str(self.config.get("gridftp_certificate.port")) + "//" +
self.config.get("outgoing.target_dir") + "/" + f)
if self.config.get("outgoing.target_uses_arrival_monitor"):
pushcmd += ("; " + gftp + " " + self.config.get(
"data_stream.directory") + "/" + ctl_file_name + " gsiftp://"
+ self.config.get("outgoing.target_host") + ":" +
str(self.config.get("gridftp_certificate.port")) +
self.config.get("outgoing.target_dir") + "/" + ctl_file_name)
self.info("setupPushCmd %s " % pushcmd)
return pushcmd
def setupPullRcptCmd(self):
'''
called by TransferBase to setup the command that pulls receipt files
from the target
'''
gftp = self.cmd
pullrcpt = (gftp + " gsiftp://" + self.config.get("outgoing.target_host")
+ ":" + str(self.config.get("gridftp_certificate.port")) + "//" + self.config.get(
"outgoing.target_dir") + "/" + self.rcpt_file_name + " " +
self.config.get("data_stream.directory") + "/" + self.rcpt_file_name)
return pullrcpt
def setupPushThanksCmd(self):
'''
called by TransferBase to setup the command that pushes a ThankYou file
to the target
'''
if self.thankyou_file_path != None:
try:
os.remove(self.thankyou_file_path)
except:
pass
# there should be no issue here as this is only called after
# a receipt file has been proven to be valid
gftp = self.cmd
try:
rcpt = ReceiptFile(self.rcpt_file_path)
rcpt_data = rcpt.read()
except Exception, err:
self.info("push thanks setup fail %s" % err)
return ""
thankyou_file_name = rcpt_data[4]
thankyou_file_path = (TransferUtils.getPathInDir(thankyou_file_name,
self.config.get("data_stream.directory")))
self.thankyou_file_path = thankyou_file_path
thankyou_file = ThankyouFile(thankyou_file_path)
thankyou_file.create(self.rcpt_file_name)
self.thankyou_file_path = thankyou_file_path
thankyoucmd = (gftp + " " + self.config.get("data_stream.directory") +
"/" + thankyou_file_name + " gsiftp://" + self.config.get(
"outgoing.target_host") + ":" + str(self.config.get(
"gridftp_certificate.port")) + "//" + self.config.get("outgoing.target_dir")
+ "/" + thankyou_file_name)
return thankyoucmd
# this is called by TransferModule
def setupStopFileCmd(self):
'''
called by TransferBase in order to create the command to check for stop
files
'''
tf, name = tempfile.mkstemp()
self.stopname = name
self.stoptf = tf
pullstop = (self.cmd + " gsiftp://" + self.config.get(
"outgoing.target_host") + ":" + str(self.config.get("gridftp_certificate.port"))
+ "//" + self.config.get("outgoing.target_dir") + "/" +
self.config.get("outgoing.stop_file") + " " + name)
self.info("setupStopFileCmd %s " % pullstop)
return pullstop
def checkVars(self):
try:
os.environ['GLOBUS_LOCATION']
except:
raise Exception("GLOBUS_LOCATION environment variable is not set")
if not self.config.checkSet("gridftp_certificate.port"):
raise Exception("gridftp_certificate.port is not set")
if not self.config.checkSet("gridftp_certificate.cmd"):
raise Exception("gridftp.cmd is not set")
if not self.config.checkSet("gridftp_certificate.password"):
raise Exception("gridftp_certificate.password is not set")
# these are required for transfer
if not self.config.checkSet("outgoing.target_dir"):
raise Exception("outgoing.target_dir is not set")
if not self.config.checkSet("outgoing.target_host"):
raise Exception("outgoing.target_host is not set")
if not self.config.checkSet("data_stream.directory"):
raise Exception("data_stream.directory is not set")
# this is the entry point for the module
def setupTransfer(self, f):
self.setFile(f)
if not self.checkFileExists(self.config.get("data_stream.directory") + "/" + f):
rc = ResponseCode(False)
grv = Response(rc, "Not attempting file transfer")
return grv
file_name = (TransferUtils.getPlainFileName(self.config.get(
"data_stream.directory"), f, self.config.get("outgoing.dir_size_limit")))
if not file_name:
(TransferUtils.quarantine(f, self.config.get("data_stream.directory"),
self.config.get("outgoing.quarantine_dir")))
grv = Response.failure("Did not attempt transfer of %s" % f)
self.info("Did not attempt transfer of %s" % f)
return grv
else:
self.setFile(os.path.basename(file_name))
fn = os.path.join(self.config.get("data_stream.directory"), file_name)
filesize = os.path.getsize(fn)
try:
self.checkVars()
except Exception, ex:
rc = ResponseCode(False)
r = Response(rc, "not all required variables in GridFTPTransfer are set : ", str(ex))
self.info("not all required variables in GridFTPTransfer are set : %s " % str(ex))
return r
self.setStopReturnCode(1)
self.setStopError("failed: No such file or directory")
# in order to use grid ftp we need a valid credential
self.info("GridFTPTransfer checking credentials")
checkCredential = "grid-proxy-info -exists"
grv = self.transferData(checkCredential)
if str(grv.code) == "Failure":
# try and set a new credential
self.info("GridFTPTransfer checking credentials failed %s" % grv.data)
tf, name = tempfile.mkstemp()
os.write(tf, self.config.get("gridftp_certificate.password"))
os.fsync(tf)
setupCredential = ("grid-proxy-init -pwstdin " + " < " + name)
grv = self.transferData(setupCredential)
try:
os.close(tf)
os.remove(name)
except:
pass
if str(grv.code) == "Failure":
return grv
grv = self.waitForStopFile()
if str(grv.code) == "Success":
grv = self.pushData()
self.info(" rv = %s " % str(grv.code))
self.info("GridFTPTransferCertificate exiting %s" % str(grv.code))
self.info("Successfully sent: %s; size: %s" % (self.getFile(), filesize))
return grv
| 2.5625 | 3 |
questionary/prompts/autocomplete.py | qualichat/questionary | 851 | 12760194 | <filename>questionary/prompts/autocomplete.py<gh_stars>100-1000
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Union,
Iterable,
)
from prompt_toolkit.completion import CompleteEvent, Completer, Completion
from prompt_toolkit.document import Document
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.shortcuts.prompt import PromptSession, CompleteStyle
from prompt_toolkit.styles import Style, merge_styles
from prompt_toolkit.lexers import SimpleLexer
from questionary.constants import DEFAULT_QUESTION_PREFIX, DEFAULT_STYLE
from questionary.prompts.common import build_validator
from questionary.question import Question
class WordCompleter(Completer):
choices_source: Union[List[str], Callable[[], List[str]]]
ignore_case: bool
meta_information: Dict[str, Any]
match_middle: bool
def __init__(
self,
choices: Union[List[str], Callable[[], List[str]]],
ignore_case: bool = True,
meta_information: Optional[Dict[str, Any]] = None,
match_middle: bool = True,
) -> None:
self.choices_source = choices
self.ignore_case = ignore_case
self.meta_information = meta_information or {}
self.match_middle = match_middle
def _choices(self) -> Iterable[str]:
return (
self.choices_source()
if callable(self.choices_source)
else self.choices_source
)
def _choice_matches(self, word_before_cursor: str, choice: str) -> int:
"""Match index if found, -1 if not. """
if self.ignore_case:
choice = choice.lower()
if self.match_middle:
return choice.find(word_before_cursor)
elif choice.startswith(word_before_cursor):
return 0
else:
return -1
@staticmethod
def _display_for_choice(choice: str, index: int, word_before_cursor: str) -> HTML:
return HTML("{}<b><u>{}</u></b>{}").format(
choice[:index],
choice[index : index + len(word_before_cursor)],
choice[index + len(word_before_cursor) : len(choice)],
)
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
choices = self._choices()
# Get word/text before cursor.
word_before_cursor = document.text_before_cursor
if self.ignore_case:
word_before_cursor = word_before_cursor.lower()
for choice in choices:
index = self._choice_matches(word_before_cursor, choice)
if index == -1:
# didn't find a match
continue
display_meta = self.meta_information.get(choice, "")
display = self._display_for_choice(choice, index, word_before_cursor)
yield Completion(
choice,
start_position=-len(choice),
display=display.formatted_text,
display_meta=display_meta,
style="class:answer",
selected_style="class:selected",
)
def autocomplete(
message: str,
choices: List[str],
default: str = "",
qmark: str = DEFAULT_QUESTION_PREFIX,
completer: Optional[Completer] = None,
meta_information: Optional[Dict[str, Any]] = None,
ignore_case: bool = True,
match_middle: bool = True,
complete_style: CompleteStyle = CompleteStyle.COLUMN,
validate: Any = None,
style: Optional[Style] = None,
**kwargs: Any,
) -> Question:
"""Prompt the user to enter a message with autocomplete help.
Example:
>>> import questionary
>>> questionary.autocomplete(
... 'Choose ant specie',
... choices=[
... 'Camponotus pennsylvanicus',
... 'Linepithema humile',
... 'Eciton burchellii',
... "Atta colombica",
... 'Polyergus lucidus',
... 'Polyergus rufescens',
... ]).ask()
? Choose ant specie Atta colombica
'Atta colombica'
.. image:: ../images/autocomplete.gif
This is just a really basic example, the prompt can be customised using the
parameters.
Args:
message: Question text
choices: Items shown in the selection, this contains items as strings
default: Default return value (single value).
qmark: Question prefix displayed in front of the question.
By default this is a ``?``
completer: A prompt_toolkit :class:`prompt_toolkit.completion.Completion`
implementation. If not set, a questionary completer implementation
will be used.
meta_information: A dictionary with information/anything about choices.
ignore_case: If true autocomplete would ignore case.
match_middle: If true autocomplete would search in every string position
not only in string begin.
complete_style: How autocomplete menu would be shown, it could be ``COLUMN``
``MULTI_COLUMN`` or ``READLINE_LIKE`` from
:class:`prompt_toolkit.shortcuts.CompleteStyle`.
validate: Require the entered value to pass a validation. The
value can not be submitted until the validator accepts
it (e.g. to check minimum password length).
This can either be a function accepting the input and
returning a boolean, or an class reference to a
subclass of the prompt toolkit Validator class.
style: A custom color and style for the question parts. You can
configure colors as well as font types for different elements.
Returns:
:class:`Question`: Question instance, ready to be prompted (using ``.ask()``).
"""
merged_style = merge_styles([DEFAULT_STYLE, style])
def get_prompt_tokens() -> List[Tuple[str, str]]:
return [("class:qmark", qmark), ("class:question", " {} ".format(message))]
def get_meta_style(meta: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
if meta:
for key in meta:
meta[key] = HTML("<text>{}</text>").format(meta[key])
return meta
validator = build_validator(validate)
if completer is None:
if not choices:
raise ValueError("No choices is given, you should use Text question.")
# use the default completer
completer = WordCompleter(
choices,
ignore_case=ignore_case,
meta_information=get_meta_style(meta_information),
match_middle=match_middle,
)
p = PromptSession(
get_prompt_tokens,
lexer=SimpleLexer("class:answer"),
style=merged_style,
completer=completer,
validator=validator,
complete_style=complete_style,
**kwargs,
)
p.default_buffer.reset(Document(default))
return Question(p.app)
| 2.640625 | 3 |
textattack/constraints/__init__.py | dheerajrav/TextAttack | 0 | 12760195 | <filename>textattack/constraints/__init__.py
"""
.. _constraint:
Constraint Package
===================
Constraints determine whether a given transformation is valid. Since transformations do not perfectly preserve semantics semantics or grammaticality, constraints can increase the likelihood that the resulting transformation preserves these qualities. All constraints are subclasses of the ``Constraint`` abstract class, and must implement at least one of ``__call__`` or ``call_many``.
We split constraints into three main categories.
:ref:`Semantics <semantics>`: Based on the meaning of the input and perturbation.
:ref:`Grammaticality <grammaticality>`: Based on syntactic properties like part-of-speech and grammar.
:ref:`Overlap <overlap>`: Based on character-based properties, like edit distance.
A fourth type of constraint restricts the search method from exploring certain parts of the search space:
:ref:`pre_transformation <pre_transformation>`: Based on the input and index of word replacement.
"""
from .pre_transformation_constraint import PreTransformationConstraint
from .constraint import Constraint
from . import grammaticality
from . import semantics
from . import overlap
from . import pre_transformation
| 2.453125 | 2 |
conftest.py | JakeRoggenbuck/snow_script | 0 | 12760196 | import sys
import os
root = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'src')
sys.path.insert(0, root)
| 1.757813 | 2 |
54.py | HatsuneMikuV/PythonLearning | 0 | 12760197 | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: 54
Author : anglemiku
Eamil : <EMAIL>
date: 2020/1/8
-------------------------------------------------
Change Activity: 2020/1/8:
-------------------------------------------------
"""
'''
题目:利用递归函数调用方式,将所输入的5个字符,以相反顺序打印出来。
'''
def recursive(ss):
if type(ss) != str:
return ''
if len(ss) == 0:
return ''
return recursive(ss[1:]) + ss[:1]
if __name__ == '__main__':
print recursive("123")
print recursive("1")
print recursive(1)
pass | 3.734375 | 4 |
bitcoin/navalny/code.py | vashu1/data_snippets | 1 | 12760198 | from blockchain_parser.blockchain import Blockchain
import os
import multiprocessing
import json
from constants import *
CPU_CORES = int(multiprocessing.cpu_count() / 2 + 1) # for overnight processing set something like multiprocessing.cpu_count()
def satoshi2btc(value):
# A satoshi is the smallest unit of a bitcoin, equivalent to 100 millionth of a bitcoin.
return value / 1e8
def output_record(output):
assert len(output.addresses) < 2
if len(output.addresses) == 0:
return {}
return {
'address': output.addresses[0].address,
'btc': satoshi2btc(output.value),
'type': output.type,
}
def transaction_record(header, transaction):
return {
'timestamp': header.timestamp.strftime(DATETIME_FORMAT),
'hash': transaction.hash,
'outputs': [output_record(output) for output in transaction.outputs]
}
def transaction_with_address(filename, address):
blockchain = Blockchain(filename)
total_transaction_count = 0
transactions = []
for block in blockchain.get_unordered_blocks(): # blockchain.get_ordered_blocks(path, end=1000))
for transaction in block.transactions:
total_transaction_count += 1
if address in [addr.address for output in transaction.outputs for addr in output.addresses]:
transaction = transaction_record(block.header, transaction)
transactions.append(transaction)
return filename, total_transaction_count, transactions
# wrap for get_transactions(), can't use lambda since pool requires pickable callable
def get_transactions(filename):
return transaction_with_address(filename, ADDRESS)
if __name__ == '__main__':
# get list of files
files = os.listdir(BITCOIN_BLOCKCHAIN_PATH)
files = filter(lambda fn: fn.startswith('blk') and fn.endswith('.dat'), files)
files = list(sorted(files))[:-1] # skip last partial file
files = [os.path.join(BITCOIN_BLOCKCHAIN_PATH, fn) for fn in files]
files = list(reversed(files))
print(f'=== {len(files)} files found')
# substract processed
if (os.path.isfile(PROCESSED_FILES_LIST)):
with open(PROCESSED_FILES_LIST, 'rt') as f:
processed = [os.path.join(BITCOIN_BLOCKCHAIN_PATH, line.split(' ')[0]) for line in f.readlines()]
print(f'=== processed count {len(processed)}')
print(processed[0], files[0])
files = list(set(files) - set(processed))
print(f'=== {len(files)} files to process...')
#for fn in reversed(files):
with multiprocessing.Pool(processes=CPU_CORES) as pool:
for result in pool.imap_unordered(get_transactions, files):
filename, total_transaction_count, transactions = result
filename = filename.replace(BITCOIN_BLOCKCHAIN_PATH, '')
print(f'=== {filename=} {total_transaction_count=} found={len(transactions)}')
with open(PROCESSED_FILES_LIST, 'at') as f:
f.write(f'{filename} {total_transaction_count} {len(transactions)}\n')
for transaction in transactions:
transaction['filename'] = filename
print(str(transaction))
with open(TRANSACTIONS_JSON_LIST, 'at') as f:
f.write(f'{json.dumps(transaction)}\n') | 2.9375 | 3 |
Python-Level06-MNIST_SubClassingAPI.py | taisuke-ito/PythonMacLinux | 3 | 12760199 | <gh_stars>1-10
#【1】 半角入力になっていることを確認してください。キーボードの左上に「半角/全角」キーがあります。
#【2】 CTRLキーを押しながらF5キーを押してデバッグを開始します。
# tensorflowのライブラリを読み込んでいます。tfと名前をつけています。
# 読み込むのに少し時間がかかります。
import tensorflow as tf
#【3】 CTRLキーを押しながらF10を押してデバッグを続けます。(CTRL+F5は最初だけです。以降は、CTRL+F10です。)
#Denseクラス、Flattenクラス、Conv2Dクラスをインポートします。
from tensorflow.keras.layers import Dense, Flatten, Conv2D
#Modelクラスをインポートします。
from tensorflow.keras import Model
#MNIST(手書き数字)のモジュールを、オブジェクトとして変数に代入します。
mnist = tf.keras.datasets.mnist
#MNIST(手書き数字)のデータを読み込みます。
(x_train, y_train), (x_test, y_test) = mnist.load_data()
#画像のピクセル値は、0~255の数値なので、255で割ることで、0~1の値に正規化します。
x_train, x_test = x_train / 255.0, x_test / 255.0
#tensorflowライブラリがデータを読み込めるように、次元を修正します。(x_train)
x_train = x_train[..., tf.newaxis]
#tensorflowライブラリがデータを読み込めるように、次元を修正します。(x_test)
x_test = x_test[..., tf.newaxis]
#訓練用のデータを準備します。(x_trainは学習用の画像データ、y_trainは学習用の教師データです。)
#x_trainは、60000枚の画像データですが、プログラムを読んでいくには、枚数が多すぎるので、0番~9番の10枚の画像データに絞ります。
#.copy()メソッドで配列に入れなおします。
x_train = x_train[0:10,:,:,:].copy()
#y_trainは、60000個の教師データですが、プログラムを読んでいくには、個数が多すぎるので、0番~9番の10枚分の教師データに絞ります。
#.copy()メソッドで配列に入れなおします。
y_train = y_train[0:10].copy()
#tf.data.Dataset.from_tensor_slicesは、スライスで画像データのミニバッチ分を取得するイテレータオブジェクトを生成します。
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(1).batch(2)
#テストデータを準備します。(x_test, y_test)
x_test = x_test[0:10,:,:,:].copy()
y_test = y_test[0:10].copy()
#tf.data.Dataset.from_tensor_slicesは、スライスで画像データのミニバッチ分を取得するイテレータオブジェクトを生成します。
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(2)
class MyModel(Model):
#サブクラスMyModelの定義です。 Modelクラスを継承して定義しています。
def __init__(self):
#__init__メソッドは、コンストラクタといってクラスがインスタンス化される時に自動実行されるメソッドです。
#__init__メソッドは、レイヤー(層)の定義をしておきます。
super(MyModel, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(32, 3, activation='relu')
self.flatten = tf.keras.layers.Flatten()
self.d1 = tf.keras.layers.Dense(128, activation='relu')
self.d2 = tf.keras.layers.Dense(10, activation='softmax')
def call(self, x):
#callメソッドは、順伝播(フォワードパス)を定義しておきます。
# FunctionalAPIのように数珠繋ぎにして記述します。
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
#MyModelクラスのオブジェクトを生成します。
model = MyModel()
#誤差関数(=損失関数)を、オブジェクトで生成しています。
#GradientTapeブロックに記述することで、誤差関数の偏微分が計算できます。
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
#最適化手法として、Adamオブジェクトを生成しています。Adamオブジェクトの方法で、パラメータWを更新します。
optimizer = tf.keras.optimizers.Adam()
#学習時(=訓練時)の誤差
train_loss = tf.keras.metrics.Mean(name='train_loss')
#学習時(=訓練時)の正解率
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
#テスト時の誤差
test_loss = tf.keras.metrics.Mean(name='test_loss')
#テスト時の正解率
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
#@tf.functionは、デバッグのために外します。
def train_step(images, labels):
with tf.GradientTape() as tape:
#GradientTapeの中に、誤差関数を記述しておくと、誤差関数の偏微分を計算できます。
predictions = model(images)
loss = loss_object(labels, predictions)
#実際に、誤差関数の偏微分を計算しているところです。計算結果の偏微分値のベクトルは、変数gradientsに格納されます。
gradients = tape.gradient(loss, model.trainable_variables)
#重みWベクトルを更新しているところです。
#zip関数で、偏微分値のベクトルと、重みパラメータの値のベクトルをまとめて渡し、更新します。
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
#訓練時の誤差(=損失値)、正解率を計算します。
train_loss(loss)
train_accuracy(labels, predictions)
<EMAIL>
def test_step(images, labels):
#モデルに画像を渡して、推論しています。predictionsは推論した結果です。
predictions = model(images)
t_loss = loss_object(labels, predictions)
#テスト時の誤差(=損失値)、正解率をを計算しています。
test_loss(t_loss)
test_accuracy(labels, predictions)
EPOCHS = 6
#ニューラルネットワークのモデルを学習させます。(モデル内部のパラメータを調整していきます。正しく予測できるモデルにするためです。)
#エポックのループです。1エポックは、用意した画像全部を使った1ループ分の処理です。
for epoch in range(EPOCHS):
#何エポック目か。コンソール画面に出力されます。
#epochは数値なので、str関数で文字列に変換して、'エポック目'と文字列連結します。
print(str(epoch) + 'エポック目')
#ミニバッチ分の画像とラベルをとりだして訓練するループ
for images, labels in train_ds:
train_step(images, labels)
#ミニバッチ分の画像とラベルをとりだしてテストするループ
for test_images, test_labels in test_ds:
test_step(test_images, test_labels)
#訓練状況の出力
print ('Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'.format(
epoch+1, train_loss.result(), train_accuracy.result()*100, test_loss.result(), test_accuracy.result()*100))
#1エポック分の学習が終了したので、訓練情報をリセットします。
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
print('End') | 2.609375 | 3 |
tests/integrational/native_sync/test_fetch_messages.py | natekspencer/pubnub-python | 146 | 12760200 | import time
from pubnub.models.consumer.history import PNFetchMessagesResult
from pubnub.models.consumer.pubsub import PNPublishResult
from pubnub.pubnub import PubNub
from tests.helper import pnconf_copy
from tests.integrational.vcr_helper import use_cassette_and_stub_time_sleep_native
COUNT = 120
class TestFetchMessages:
@use_cassette_and_stub_time_sleep_native(
'tests/integrational/fixtures/native_sync/fetch_messages/max_100_single.yaml',
filter_query_parameters=['uuid', 'pnsdk', 'l_pub'])
def test_fetch_messages_return_max_100_for_single_channel(self):
ch = "fetch-messages-ch-1"
pubnub = PubNub(pnconf_copy())
pubnub.config.uuid = "fetch-messages-uuid"
for i in range(COUNT):
envelope = pubnub.publish().channel(ch).message("hey-%s" % i).sync()
assert isinstance(envelope.result, PNPublishResult)
assert envelope.result.timetoken > 0
while True:
time.sleep(1)
if len(pubnub.history().channel(ch).count(COUNT).sync().result.messages) >= 100:
break
envelope = pubnub.fetch_messages().channels(ch).sync()
assert envelope is not None
assert isinstance(envelope.result, PNFetchMessagesResult)
assert len(envelope.result.channels[ch]) == 100
@use_cassette_and_stub_time_sleep_native(
'tests/integrational/fixtures/native_sync/fetch_messages/max_25_multiple.yaml',
filter_query_parameters=['uuid', 'pnsdk', 'l_pub'])
def test_fetch_messages_return_max_25_for_multiple_channels(self):
ch1 = "fetch-messages-ch-1"
ch2 = "fetch-messages-ch-2"
pubnub = PubNub(pnconf_copy())
pubnub.config.uuid = "fetch-messages-uuid"
for i in range(COUNT):
envelope1 = pubnub.publish().channel(ch1).message("hey-%s" % i).sync()
assert isinstance(envelope1.result, PNPublishResult)
assert envelope1.result.timetoken > 0
envelope2 = pubnub.publish().channel(ch2).message("hey-%s" % i).sync()
assert isinstance(envelope2.result, PNPublishResult)
assert envelope2.result.timetoken > 0
while True:
time.sleep(1)
if len(pubnub.history().channel(ch1).count(COUNT).sync().result.messages) >= 100 and \
len(pubnub.history().channel(ch2).count(COUNT).sync().result.messages) >= 100:
break
envelope = pubnub.fetch_messages().channels([ch1, ch2]).sync()
assert isinstance(envelope.result, PNFetchMessagesResult)
assert len(envelope.result.channels[ch1]) == 25
assert len(envelope.result.channels[ch2]) == 25
@use_cassette_and_stub_time_sleep_native(
'tests/integrational/fixtures/native_sync/fetch_messages/max_25_with_actions.yaml',
filter_query_parameters=['uuid', 'pnsdk', 'l_pub'])
def test_fetch_messages_actions_return_max_25(self):
ch = "fetch-messages-actions-ch-1"
pubnub = PubNub(pnconf_copy())
pubnub.config.uuid = "fetch-messages-uuid"
for i in range(COUNT):
envelope = pubnub.publish().channel(ch).message("hey-%s" % i).sync()
assert isinstance(envelope.result, PNPublishResult)
assert envelope.result.timetoken > 0
while True:
time.sleep(1)
if len(pubnub.history().channel(ch).count(COUNT).sync().result.messages) >= 100:
break
envelope = pubnub.fetch_messages().channels(ch).include_message_actions(True).sync()
assert envelope is not None
assert isinstance(envelope.result, PNFetchMessagesResult)
assert len(envelope.result.channels[ch]) == 25
| 2.0625 | 2 |
model/AMMI.py | J-zin/Semantic-Hashing-Models | 1 | 12760201 | <filename>model/AMMI.py
import argparse
import torch
import torch.nn as nn
from model.base_model import Base_Model
import utils.entropy as ent
from utils.pytorch_helper import FF, get_init_function
class AMMI(Base_Model):
def __init__(self, hparams):
super().__init__(hparams=hparams)
def define_parameters(self):
self.entropy = EntropyHelper(self.hparams)
self.pZ_Y = Posterior(self.hparams.num_features,
self.hparams.order_posterior,
self.data.vocab_size,
self.hparams.num_layers_posterior,
self.hparams.dim_hidden)
if not self.hparams.brute:
self.qZ = Prior(self.hparams.num_features,
self.hparams.order_prior,
self.hparams.num_layers, # Using general num_layers
self.hparams.dim_hidden, # Using general dim_hidden
self.hparams.raw_prior)
self.apply(get_init_function(self.hparams.init))
self.lr_prior = self.hparams.lr if self.hparams.lr_prior < 0 else \
self.hparams.lr_prior
def forward(self, Y):
P_ = self.pZ_Y(Y.sign())
P = torch.sigmoid(P_)
Q_ = P_
hZ_cond = self.entropy.hZ_X(P, Q_)
if self.hparams.brute:
hZ = self.entropy.hZ(P)
else:
optimizer_prior = torch.optim.Adam(self.qZ.parameters(),
lr=self.lr_prior)
for _ in range(self.hparams.num_steps_prior):
optimizer_prior.zero_grad()
hZ = self.entropy.hZ_X(P.detach(), self.qZ())
hZ.backward()
nn.utils.clip_grad_norm_(self.qZ.parameters(),
self.hparams.clip)
optimizer_prior.step()
hZ = self.entropy.hZ_X(P, self.qZ())
loss = hZ_cond - self.hparams.entropy_weight * hZ
return {'loss': loss, 'hZ_cond': hZ_cond, 'hZ': hZ}
def configure_optimizers(self):
params = list(self.pZ_Y.parameters())
return torch.optim.Adam(self.pZ_Y.parameters(), lr=self.hparams.lr)
def configure_gradient_clippers(self):
clippers = [(self.pZ_Y.parameters(), self.hparams.clip)]
return clippers
def encode_discrete(self, Y):
P = torch.sigmoid(self.pZ_Y(Y.sign()))
encodings = self.entropy.viterbi(P)[0]
return encodings # {0,1}^{B x m}
def get_hparams_grid(self):
grid = Base_Model.get_general_hparams_grid()
grid.update({
'lr_prior': [0.1, 0.03, 0.01, 0.003, 0.001, 0.0003, 0.0001],
'entropy_weight': [1, 1.5, 2, 2.5, 3, 3.5],
'num_steps_prior': [1, 2, 4],
'dim_hidden': [8, 12, 16, 20, 24, 28],
'num_layers': [0, 1, 2],
'raw_prior': [False, False, False, True],
})
return grid
@staticmethod
def get_model_specific_argparser():
parser = Base_Model.get_general_argparser()
parser.add_argument('--num_features', type=int, default=32,
help='num discrete features [%(default)d]')
parser.add_argument('--batch_size', type=int, default=32,
help='batch size [%(default)d]')
parser.add_argument('--lr', type=float, default=0.01,
help='initial learning rate [%(default)g]')
parser.add_argument('--order_posterior', type=int, default=0,
help='Markov order of posterior [%(default)d]')
parser.add_argument('--order_prior', type=int, default=3,
help='Markov order of prior [%(default)d]')
parser.add_argument('--num_layers_posterior', type=int, default=0,
help='num layers in posterior [%(default)d]')
parser.add_argument('--num_steps_prior', type=int, default=4,
help='num gradient steps on prior per loss '
'[%(default)d]')
parser.add_argument('--raw_prior', action='store_true',
help='raw logit embeddings for prior encoder?')
parser.add_argument('--lr_prior', type=float, default=-1,
help='initial learning rate for prior (same as lr '
' if -1) [%(default)g]')
parser.add_argument('--entropy_weight', type=float, default=2,
help='entropy weight in MI [%(default)g]')
parser.add_argument('--brute', action='store_true',
help='brute-force entropy calculation?')
parser.add_argument('--dim_hidden', type=int, default=400,
help='dimension of hidden state [%(default)d]')
parser.add_argument('--num_layers', type=int, default=0,
help='num layers [%(default)d]')
parser.add_argument('--beta', type=float, default=1,
help='beta term (as in beta-VAE) [%(default)g]')
parser.add_argument('--median_threshold', type=bool, default=False,
help='num mixture components [%(default)d]')
return parser
class EntropyHelper(nn.Module):
def __init__(self, hparams):
super().__init__()
self.register_buffer('quads',
ent.precompute_quads(hparams.order_posterior))
assert hparams.order_prior >= hparams.order_posterior
device = torch.device('cuda' if hparams.cuda else 'cpu')
self.buffs = ent.precompute_buffers(hparams.batch_size,
hparams.order_posterior,
hparams.order_prior,
device)
if hparams.brute:
self.register_buffer('I', ent.precompute_I(hparams.num_features,
hparams.order_posterior))
def hZ_X(self, P, Q_):
if len(Q_.size()) == 2:
Q_ = Q_.repeat(P.size(0), 1, 1)
return ent.estimate_hZ_X(P, Q_, quads=self.quads, buffers=self.buffs)
def hZ(self, P):
return ent.estimate_hZ(P, I=self.I.repeat(P.size(0), 1, 1))
def viterbi(self, P):
return ent.compute_viterbi(P, quads=self.quads)
class Posterior(nn.Module):
def __init__(self, num_features, markov_order, dim_input, num_layers,
dim_hidden):
super(Posterior, self).__init__()
self.num_features = num_features
num_logits = num_features * pow(2, markov_order)
self.ff = FF(dim_input, dim_hidden, num_logits, num_layers)
def forward(self, inputs):
logits = self.ff(inputs).view(inputs.size(0), self.num_features, -1)
P_ = torch.cat([-logits, logits], dim=2) # B x m x 2^(o+1)
return P_
class Prior(nn.Module):
def __init__(self, num_features, markov_order, num_layers, dim_hidden,
raw=False):
super(Prior, self).__init__()
self.raw = raw
if raw:
self.theta = nn.Embedding(num_features, pow(2, markov_order))
else:
self.theta = nn.Embedding(num_features, dim_hidden)
self.ff = FF(dim_hidden, dim_hidden, pow(2, markov_order),
num_layers)
def forward(self):
logits = self.theta.weight if self.raw else self.ff(self.theta.weight)
R_ = torch.cat([-logits, logits], dim=1) # m x 2^(r+1)
return R_ | 2.390625 | 2 |
app.py | AnishaCh/flask | 0 | 12760202 | <filename>app.py<gh_stars>0
from flask import Flask, render_template, flash
app = Flask(__name__)
app.secret_key = b'<KEY>
@app.route('/')
def dashboard():
flash("flash test!!!!")
flash("fladfasdfsaassh test!!!!")
flash("asdfas asfsafs!!!!")
return render_template("dotcomBubble.html")
app.run(port=4996) | 1.921875 | 2 |
oneflow/python/test/graph/test_graph.py | MaoXianXin/oneflow | 1 | 12760203 | <reponame>MaoXianXin/oneflow
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow.experimental as flow
import oneflow
class SubModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = flow.nn.Conv2d(1, 1, 5)
self.relu = flow.nn.ReLU()
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
return x
class CustomModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.layer = SubModule()
self.fc1 = flow.nn.Linear(36, 4)
self.register_buffer(
"dummy_buff", flow.Tensor(1, 4),
)
def forward(self, x):
x = self.layer(x)
x = oneflow.F.flatten(x, 1)
x = self.fc1(x) + self.dummy_buff
return x
@flow.unittest.skip_unless_1n1d()
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestGraph(flow.unittest.TestCase):
def test_add_nested_module(test_case):
x = flow.Tensor(1, 1, 10, 10)
flow.nn.init.uniform_(x, a=-1.0, b=1.0)
# Module init and call
m = CustomModule()
y = m(x)
class CustomGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = m
def build(self, x):
return self.m(x)
# Graph init
g = CustomGraph()
# check _c_nn_graph init
test_case.assertEqual(g.name, g._c_nn_graph.name)
# g.m is Block
test_case.assertTrue(isinstance(g.m, flow.nn.graph.Block))
# g.m.name is "m"
test_case.assertEqual(g.m.name, "m")
# g.m.dummy_buff is Tensor, Graph.build(...) need buffer to be Tensor
test_case.assertTrue(isinstance(g.m.dummy_buff, flow.Tensor))
# g.m._buffers["dummy_buff"] is Block
test_case.assertTrue(
isinstance(g.m._buffers["dummy_buff"], flow.nn.graph.Block)
)
# conv1 is Block
test_case.assertTrue(isinstance(g.m.layer.conv1, flow.nn.graph.Block))
# conv1.name is "conv1"
test_case.assertEqual(g.m.layer.conv1.name, "conv1")
# conv1.weight is Tensor, Graph.build(...) need weight to be Tensor
test_case.assertTrue(isinstance(g.m.layer.conv1.weight, flow.Tensor))
# conv1._parameters["weight"] is Block
test_case.assertTrue(
isinstance(g.m.layer.conv1._parameters["weight"], flow.nn.graph.Block)
)
# conv1.kernel_size is original data in original module
test_case.assertEqual(g.m.layer.conv1.kernel_size, (5, 5))
# Graph build
z = g.build(x)
# g got the same result as m
test_case.assertTrue(np.array_equal(y.numpy(), z.numpy()))
def test_graph_config(test_case):
class CustomGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = CustomModule()
self.config.enable_auto_mixed_precision(True)
def build(self, x):
x = self.m(x)
return x
g = CustomGraph()
# check default training is True
test_case.assertEqual(g.config.training, False)
# set graph config
g.config.enable_fuse_add_to_output(True)
g.config.enable_fuse_add_to_output(False)
# check _named_state get the right tensor
for n, t in g._named_state():
test_case.assertEqual(id(eval("g." + n)), id(t))
# print repr of nn.Graph
print(repr(g))
def test_graph_name(test_case):
class ACustomGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
def build(self, x):
return x
class BCustomGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
def build(self, x):
return x
class CBCustomGraph(BCustomGraph):
def __init__(self):
super().__init__()
def create_graph(cnt):
a = ACustomGraph()
test_case.assertEqual(a.name, "ACustomGraph_" + str(cnt))
b = BCustomGraph()
test_case.assertEqual(b.name, "BCustomGraph_" + str(cnt))
cb = CBCustomGraph()
test_case.assertEqual(cb.name, "CBCustomGraph_" + str(cnt))
flow.nn.Graph._child_init_cnt.clear()
for i in range(0, 3):
create_graph(i)
flow.nn.Graph._child_init_cnt.clear()
for i in range(0, 3):
create_graph(i)
if __name__ == "__main__":
unittest.main()
| 2.28125 | 2 |
algorithms/python/leetcode/common/sort_util.py | ytjia/coding-pratice | 0 | 12760204 | # -*- coding: utf-8 -*-
# Authors: <NAME> <<EMAIL>>
def binary_search(arr, target, begin=None, end=None):
"""
:param arr:
:param target:
:param begin:
:param end:
:return:
"""
if begin is None:
begin = 0
if end is None:
end = len(arr) - 1
if end < begin or end < 0:
return False, None
elif end == begin:
if arr[end] == target:
return True, end
else:
return False, None
mid = begin + (end - begin) / 2
if arr[mid] == target:
return True, mid
elif arr[mid] < target:
return binary_search(arr, target, mid + 1, end)
else:
return binary_search(arr, target, begin, mid - 1)
| 3.6875 | 4 |
changebot/blueprints/tests/test_stale_issues.py | dr-rodriguez/astropy-bot | 1 | 12760205 | import json
import time
from unittest.mock import patch
from changebot.webapp import app
from changebot.github.github_api import RepoHandler, IssueHandler
from changebot.blueprints.stale_issues import (process_issues,
ISSUE_CLOSE_EPILOGUE,
ISSUE_CLOSE_WARNING,
is_close_warning,
is_close_epilogue)
def test_is_close_warning():
assert is_close_warning(ISSUE_CLOSE_WARNING)
def test_is_close_epilogue():
assert is_close_epilogue(ISSUE_CLOSE_EPILOGUE)
def now():
return time.time()
class TestHook:
def setup_method(self, method):
self.client = app.test_client()
@patch.object(app, 'cron_token', '<PASSWORD>')
def test_valid(self):
data = {'repository': 'test-repo', 'cron_token': '<PASSWORD>', 'installation': '123'}
with patch('changebot.blueprints.stale_issues.process_issues') as p:
response = self.client.post('/close_stale_issues', data=json.dumps(data),
content_type='application/json')
assert response.data == b''
assert p.call_count == 1
@patch.object(app, 'cron_token', '<PASSWORD>')
def test_invalid_cron(self):
data = {'repository': 'test-repo', 'cron_token': '<PASSWORD>', 'installation': '123'}
with patch('changebot.blueprints.stale_issues.process_issues') as p:
response = self.client.post('/close_stale_issues', data=json.dumps(data),
content_type='application/json')
assert response.data == b'Incorrect cron_token'
assert p.call_count == 0
@patch.object(app, 'cron_token', '<PASSWORD>')
def test_missing_keyword(self):
data = {'cron_token': '<PASSWORD>', 'installation': '123'}
with patch('changebot.blueprints.stale_issues.process_issues') as p:
response = self.client.post('/close_stale_issues', data=json.dumps(data),
content_type='application/json')
assert response.data == b'Payload mising repository'
assert p.call_count == 0
@patch.object(app, 'stale_issue_close', True)
@patch.object(app, 'stale_issue_close_seconds', 34442)
@patch.object(app, 'stale_issue_warn_seconds', 14122)
class TestProcessIssues:
def setup_method(self, method):
self.patch_get_issues = patch.object(RepoHandler, 'get_issues')
self.patch_submit_comment = patch.object(IssueHandler, 'submit_comment')
self.patch_close = patch.object(IssueHandler, 'close')
self.patch_get_label_added_date = patch.object(IssueHandler, 'get_label_added_date')
self.patch_find_comments = patch.object(IssueHandler, 'find_comments')
self.patch_set_labels = patch.object(IssueHandler, 'set_labels')
self.get_issues = self.patch_get_issues.start()
self.submit_comment = self.patch_submit_comment.start()
self.close = self.patch_close.start()
self.get_label_added_date = self.patch_get_label_added_date.start()
self.find_comments = self.patch_find_comments.start()
self.set_labels = self.patch_set_labels.start()
def teardown_method(self, method):
self.patch_get_issues.stop()
self.patch_submit_comment.stop()
self.patch_close.stop()
self.patch_get_label_added_date.stop()
self.patch_find_comments.stop()
self.patch_set_labels.stop()
def test_close_comment_exists(self):
# Time is beyond close deadline, and there is already a comment. In this
# case no new comment should be posted and the issue should be kept open
# since this likely indicates the issue was open again manually.
self.get_issues.return_value = ['123']
self.get_label_added_date.return_value = now() - 34443
self.find_comments.return_value = ['1']
with app.app_context():
# The list() call is to forge the generator to run fully
list(process_issues('repo', 'installation'))
self.get_issues.assert_called_with('open', 'Close?')
self.get_label_added_date.assert_called_with('Close?')
assert self.submit_comment.call_count == 0
assert self.close.call_count == 0
assert self.set_labels.call_count == 0
def test_close(self):
# Time is beyond close deadline, and there is no comment yet so the
# closing comment can be posted and the issue closed.
self.get_issues.return_value = ['123']
self.get_label_added_date.return_value = now() - 34443
self.find_comments.return_value = []
with app.app_context():
# The list() call is to forge the generator to run fully
list(process_issues('repo', 'installation'))
assert self.submit_comment.call_count == 1
expected = ISSUE_CLOSE_EPILOGUE
self.submit_comment.assert_called_with(expected)
assert self.close.call_count == 1
assert self.set_labels.call_count == 1
def test_close_disabled(self):
# Second case: time is beyond close deadline, and there is no comment yet
# but the global option to allow closing has not been enabled. Since there
# is no comment, the warning gets posted (rather than the 'epilogue')
self.get_issues.return_value = ['123']
self.get_label_added_date.return_value = now() - 34443
self.find_comments.return_value = []
with app.app_context():
with patch.object(app, 'stale_issue_close', False):
# The list() call is to forge the generator to run fully
list(process_issues('repo', 'installation'))
assert self.submit_comment.call_count == 1
expected = ISSUE_CLOSE_WARNING.format(pasttime='9 hours ago', futuretime='5 hours')
self.submit_comment.assert_called_with(expected)
assert self.close.call_count == 0
assert self.set_labels.call_count == 0
def test_warn_comment_exists(self):
# Time is beyond warn deadline but within close deadline. There is
# already a warning, so don't do anything.
self.get_issues.return_value = ['123']
self.get_label_added_date.return_value = now() - 34400
self.find_comments.return_value = ['1']
with app.app_context():
list(process_issues('repo', 'installation'))
assert self.submit_comment.call_count == 0
assert self.close.call_count == 0
assert self.set_labels.call_count == 0
def test_warn(self):
# Time is beyond warn deadline but within close deadline. There isn't a
# comment yet, so a comment should be posted.
self.get_issues.return_value = ['123']
self.get_label_added_date.return_value = now() - 34400
self.find_comments.return_value = []
with app.app_context():
list(process_issues('repo', 'installation'))
assert self.submit_comment.call_count == 1
expected = ISSUE_CLOSE_WARNING.format(pasttime='9 hours ago', futuretime='5 hours')
self.submit_comment.assert_called_with(expected)
assert self.close.call_count == 0
assert self.set_labels.call_count == 0
def test_keep_open(self):
# Time is before warn deadline so don't do anything.
self.get_issues.return_value = ['123']
self.get_label_added_date.return_value = now() - 14000
self.find_comments.return_value = []
with app.app_context():
list(process_issues('repo', 'installation'))
assert self.find_comments.call_count == 0
assert self.submit_comment.call_count == 0
assert self.close.call_count == 0
assert self.set_labels.call_count == 0
| 2.34375 | 2 |
main/forms.py | TolimanStaR/Course-Work | 1 | 12760206 | <filename>main/forms.py<gh_stars>1-10
from django import forms
from .models import Comment
class SearchForm(forms.Form):
search_text = forms.CharField(max_length=300)
class CommentForm(forms.Form):
body = forms.CharField()
| 2.21875 | 2 |
pddlstream/algorithms/scheduling/apply_fluents.py | syc7446/pddlstream | 2 | 12760207 | import copy
from pddlstream.algorithms.downward import fact_from_fd
from pddlstream.algorithms.reorder import get_partial_orders
from pddlstream.language.conversion import pddl_from_object
from pddlstream.language.object import OptimisticObject, UniqueOptValue
from pddlstream.utils import neighbors_from_orders, get_mapping
from pddlstream.language.function import FunctionResult
def get_steps_from_stream(stream_plan, step_from_fact, node_from_atom):
steps_from_stream = {}
for result in reversed(stream_plan):
steps_from_stream[result] = set()
for fact in result.get_certified():
if (fact in step_from_fact) and (node_from_atom[fact].result == result):
steps_from_stream[result].update(step_from_fact[fact])
for fact in result.instance.get_domain():
step_from_fact[fact] = step_from_fact.get(fact, set()) | steps_from_stream[result]
# TODO: apply this recursively
return steps_from_stream
def convert_fluent_streams(stream_plan, real_states, action_plan, step_from_fact, node_from_atom):
import pddl
assert len(real_states) == len(action_plan) + 1
steps_from_stream = get_steps_from_stream(stream_plan, step_from_fact, node_from_atom)
# TODO: ensure that derived facts aren't in fluents?
# TODO: handle case where costs depend on the outputs
_, outgoing_edges = neighbors_from_orders(get_partial_orders(stream_plan, init_facts=map(
fact_from_fd, filter(lambda f: isinstance(f, pddl.Atom), real_states[0]))))
static_plan = []
fluent_plan = []
for result in stream_plan:
external = result.external
if isinstance(result, FunctionResult) or (result.opt_index != 0) or (not external.is_fluent()):
static_plan.append(result)
continue
if outgoing_edges[result]:
# No way of taking into account the binding of fluent inputs when preventing cycles
raise NotImplementedError('Fluent stream is required for another stream: {}'.format(result))
#if (len(steps_from_stream[result]) != 1) and result.output_objects:
# raise NotImplementedError('Fluent stream required in multiple states: {}'.format(result))
for state_index in steps_from_stream[result]:
new_output_objects = [
#OptimisticObject.from_opt(out.value, object())
OptimisticObject.from_opt(out.value, UniqueOptValue(result.instance, object(), i))
for i, out in enumerate(result.output_objects)]
if new_output_objects and (state_index < len(action_plan)):
# TODO: check that the objects aren't used in any effects
instance = copy.copy(action_plan[state_index])
action_plan[state_index] = instance
output_mapping = get_mapping(list(map(pddl_from_object, result.output_objects)),
list(map(pddl_from_object, new_output_objects)))
instance.var_mapping = {p: output_mapping.get(v, v)
for p, v in instance.var_mapping.items()}
fluent_facts = list(map(fact_from_fd, filter(
lambda f: isinstance(f, pddl.Atom) and (f.predicate in external.fluents), real_states[state_index])))
new_instance = external.get_instance(result.instance.input_objects, fluent_facts=fluent_facts)
# TODO: handle optimistic here
new_result = new_instance.get_result(new_output_objects, opt_index=result.opt_index)
fluent_plan.append(new_result)
return static_plan + fluent_plan | 2.03125 | 2 |
signal_processing/synthetic/generators/tests/test_sine_wave_generator.py | dtemir/labgraph | 1 | 12760208 | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import numpy as np
from ..sine_wave_generator import SineWaveChannelConfig, SineWaveGenerator
def test_generate_sinusoid() -> None:
"""
Tests that the samples generated from each channel matches the parameters
specified in their configuration.
"""
test_clock_frequency = 100 # hz
test_duration = 300 # sec
# test configurations
shape = (2,)
amplitudes = np.array([5.0, 3.0])
frequencies = np.array([5, 10])
phase_shifts = np.array([1.0, 5.0])
midlines = np.array([3.0, -2.5])
sample_rate = test_clock_frequency
config = SineWaveChannelConfig(
shape, amplitudes, frequencies, phase_shifts, midlines, sample_rate
)
# The generator
generator = SineWaveGenerator(config)
# Generate expected values
t_s = np.arange(0, test_duration, 1 / test_clock_frequency)
angles = np.expand_dims(frequencies, 1) * np.expand_dims(2 * np.pi * t_s, 0)
angles = angles + np.expand_dims(phase_shifts, 1)
expected = np.expand_dims(amplitudes, 1) * np.sin(angles) + np.expand_dims(
midlines, 1
)
values = [generator.next_sample().data for t in t_s]
values = np.array(values).T
np.testing.assert_almost_equal(values, expected)
if __name__ == "__main__":
test_generate_sinusoid()
| 2.78125 | 3 |
tests/test_views.py | cocodelabs/api.palaverapp.com | 3 | 12760209 | import json
import unittest
from rivr.test import Client
from palaverapi import app
from palaverapi.models import Device, Token
class ViewTests(unittest.TestCase):
def setUp(self) -> None:
self.client = Client(app)
def test_status(self) -> None:
assert self.client.get('/').status_code == 204
def test_register(self) -> None:
response = self.client.post(
'/1/devices',
headers={'Content-Type': 'application/json'},
body=json.dumps({'device_token': 'test_token'}).encode('utf-8'),
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.headers['Content-Type'], 'application/json')
self.assertEqual(
response.content,
'{"device_token": "test_token", "push_token": "<PASSWORD>"}',
)
device = Device.get(apns_token='test_token')
assert device
push_token = (
Token.select()
.where(
Token.device == device,
Token.token == 'ec<PASSWORD>',
)
.get()
)
token = (
Token.select()
.where(Token.device == device, Token.token == 'test_token')
.get()
)
token.delete_instance()
push_token.delete_instance()
device.delete_instance()
def test_returns_200_when_re_registering(self) -> None:
response = self.client.post(
'/1/devices',
headers={'Content-Type': 'application/json'},
body=json.dumps({'device_token': 'test_token'}).encode('utf-8'),
)
response = self.client.post(
'/1/devices',
headers={'Content-Type': 'application/json'},
body=json.dumps({'device_token': '<PASSWORD>'}).encode('utf-8'),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers['Content-Type'], 'application/json')
self.assertEqual(
response.content,
'{"device_token": "<PASSWORD>", "push_token": "<PASSWORD>"}',
)
device = Device.get(apns_token='test_token')
assert device
push_token = (
Token.select()
.where(
Token.device == device,
Token.token == '<PASSWORD>',
)
.get()
)
token = (
Token.select()
.where(Token.device == device, Token.token == '<PASSWORD>_<PASSWORD>')
.get()
)
push_token.delete_instance()
token.delete_instance()
device.delete_instance()
| 2.375 | 2 |
RTB_branding/setup.py | BioversityCostaRica/rtb-climmob-extensions | 0 | 12760210 | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "README.md")) as f:
README = f.read()
with open(os.path.join(here, "CHANGES.txt")) as f:
CHANGES = f.read()
requires = [
"climmob",
]
tests_require = [
"WebTest >= 1.3.1", # py3 compat
"pytest",
"pytest-cov",
]
setup(
name="RTB_branding",
version="1.0",
description="Branding",
long_description=README + "\n\n" + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author="Alliance Bioversity-CIAT",
author_email="<EMAIL>",
url="https://rtb.climmob.net",
keywords="climmob plugin",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
extras_require={"testing": tests_require,},
install_requires=requires,
entry_points={
"climmob.plugins": ["RTB_branding = RTB_branding.plugin:RTBBranding",],
},
)
| 1.34375 | 1 |
qinling/tests/unit/api/controllers/v1/test_execution.py | lingxiankong/qinling | 2 | 12760211 | # Copyright 2017 Catalyst IT Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from qinling.db import api as db_api
from qinling import exceptions as exc
from qinling import status
from qinling.tests.unit.api import base
class TestExecutionController(base.APITest):
def setUp(self):
super(TestExecutionController, self).setUp()
db_func = self.create_function()
self.func_id = db_func.id
@mock.patch('qinling.rpc.EngineClient.create_execution')
def test_post(self, mock_create_execution):
body = {
'function_id': self.func_id,
}
resp = self.app.post_json('/v1/executions', body)
self.assertEqual(201, resp.status_int)
resp = self.app.get('/v1/functions/%s' % self.func_id)
self.assertEqual(1, resp.json.get('count'))
@mock.patch('qinling.rpc.EngineClient.create_execution')
def test_post_with_version(self, mock_rpc):
db_api.increase_function_version(self.func_id, 0,
description="version 1")
body = {
'function_id': self.func_id,
'function_version': 1
}
resp = self.app.post_json('/v1/executions', body)
self.assertEqual(201, resp.status_int)
resp = self.app.get('/v1/functions/%s' % self.func_id)
self.assertEqual(0, resp.json.get('count'))
resp = self.app.get('/v1/functions/%s/versions/1' % self.func_id)
self.assertEqual(1, resp.json.get('count'))
@mock.patch('qinling.rpc.EngineClient.create_execution')
def test_post_with_alias(self, mock_rpc):
db_api.increase_function_version(self.func_id, 0,
description="version 1")
name = self.rand_name(name="alias", prefix=self.prefix)
body = {
'function_id': self.func_id,
'function_version': 1,
'name': name
}
db_api.create_function_alias(**body)
execution_body = {
'function_alias': name
}
resp = self.app.post_json('/v1/executions', execution_body)
self.assertEqual(201, resp.status_int)
resp = self.app.get('/v1/functions/%s' % self.func_id)
self.assertEqual(0, resp.json.get('count'))
resp = self.app.get('/v1/functions/%s/versions/1' % self.func_id)
self.assertEqual(1, resp.json.get('count'))
def test_post_without_required_params(self):
resp = self.app.post(
'/v1/executions',
params={},
expect_errors=True
)
self.assertEqual(400, resp.status_int)
@mock.patch('qinling.rpc.EngineClient.create_execution')
def test_post_rpc_error(self, mock_create_execution):
mock_create_execution.side_effect = exc.QinlingException
body = {
'function_id': self.func_id,
}
resp = self.app.post_json('/v1/executions', body)
self.assertEqual(201, resp.status_int)
self.assertEqual(status.ERROR, resp.json.get('status'))
@mock.patch('qinling.rpc.EngineClient.create_execution')
def test_get(self, mock_create_execution):
body = {
'function_id': self.func_id,
}
resp = self.app.post_json('/v1/executions', body)
self.assertEqual(201, resp.status_int)
resp = self.app.get('/v1/executions/%s' % resp.json.get('id'))
self.assertEqual(self.func_id, resp.json.get('function_id'))
@mock.patch('qinling.rpc.EngineClient.create_execution')
def test_get_all(self, mock_create_execution):
body = {
'function_id': self.func_id,
}
resp = self.app.post_json('/v1/executions', body)
exec_id = resp.json.get('id')
self.assertEqual(201, resp.status_int)
resp = self.app.get('/v1/executions')
self.assertEqual(200, resp.status_int)
actual = self._assert_single_item(
resp.json['executions'], id=exec_id
)
self._assertDictContainsSubset(actual, body)
@mock.patch('qinling.rpc.EngineClient.create_execution')
def test_get_all_filter(self, mock_create_execution):
body = {
'function_id': self.func_id,
}
resp = self.app.post_json('/v1/executions', body)
exec_id = resp.json.get('id')
self.assertEqual(201, resp.status_int)
# Test filtering by 'function_id'
resp = self.app.get('/v1/executions?function_id=%s' % self.func_id)
self.assertEqual(200, resp.status_int)
actual = self._assert_single_item(
resp.json['executions'], id=exec_id
)
self._assertDictContainsSubset(actual, body)
# Test filtering by 'status'
resp = self.app.get(
'/v1/executions?function_id=%s&status=running' % self.func_id
)
self.assertEqual(200, resp.status_int)
self._assert_single_item(resp.json['executions'], id=exec_id)
@mock.patch('qinling.rpc.EngineClient.create_execution')
def test_delete(self, mock_create_execution):
body = {
'function_id': self.func_id,
}
resp = self.app.post_json('/v1/executions', body)
exec_id = resp.json.get('id')
resp = self.app.delete('/v1/executions/%s' % exec_id)
self.assertEqual(204, resp.status_int)
| 1.960938 | 2 |
tests/test_cli.py | art049/pytkdocs | 1 | 12760212 | """Tests for [the `cli` module][pytkdocs.cli]."""
import io
import json
from pytkdocs import cli
def test_show_help(capsys):
"""
Shows help.
Arguments:
capsys: Pytest fixture to capture output.
"""
with pytest.raises(SystemExit):
cli.main(["-h"])
captured = capsys.readouterr()
assert "pytkdocs" in captured.out
def test_read_whole_stdin(monkeypatch):
"""Read whole standard input."""
monkeypatch.setattr(
"sys.stdin",
io.StringIO(
"""
{
"objects": [
{
"path": "pytkdocs.cli.main"
},
{
"path": "pytkdocs.cli.get_parser"
}
]
}
"""
),
)
cli.main()
def test_read_stdin_line_by_line(monkeypatch):
"""Read standard input line by line."""
monkeypatch.setattr(
"sys.stdin",
io.StringIO(
'{"objects": [{"path": "pytkdocs.cli.main"}]}\n{"objects": [{"path": "pytkdocs.cli.get_parser"}]}\n'
),
)
cli.main(["--line-by-line"])
def test_load_complete_tree(monkeypatch):
"""Load `pytkdocs` own documentation."""
monkeypatch.setattr("sys.stdin", io.StringIO('{"objects": [{"path": "pytkdocs"}]}'))
cli.main(["--line-by-line"])
def test_discard_stdout(monkeypatch, capsys):
"""Discard standard output at import time."""
monkeypatch.setattr("sys.stdin", io.StringIO('{"objects": [{"path": "tests.fixtures.corrupt_output"}]}'))
cli.main(["--line-by-line"])
captured = capsys.readouterr()
assert not captured.out.startswith("*corruption intensifies*")
# assert no JSON parsing error
json.loads(captured.out)
def test_exception_raised_while_discard_stdout(monkeypatch, capsys):
"""Check that an error is still printed when an exception is raised and stdout is discarded."""
monkeypatch.setattr("sys.stdin", io.StringIO('{"objects": [{"path": "pytkdocs.cli"}]}'))
# raise an exception during the process
monkeypatch.setattr("pytkdocs.cli.process_json", lambda _: 1 / 0)
# assert no exception
cli.main(["--line-by-line"])
# assert json error was written to stdout
captured = capsys.readouterr()
assert captured.out
# assert no JSON parsing error
json.loads(captured.out)
def test_load_complete_tests_tree(monkeypatch):
"""Load `pytkdocs` own tests' documentation."""
monkeypatch.setattr("sys.stdin", io.StringIO('{"objects": [{"path": "tests"}]}'))
cli.main(["--line-by-line"])
| 2.5625 | 3 |
game/authentication/websocket_authentication.py | dimadk24/english-fight-api | 0 | 12760213 | from django.conf import settings
from django.utils.module_loading import import_string
from rest_framework.exceptions import AuthenticationFailed
from game.authentication.base_websocket_authentication import (
AbstractWebsocketAuthentication,
)
from game.models import AppUser
def authenticate_websocket(auth_header: str) -> AppUser:
"""Acts like authentication backends in Django.
Takes auth string from websocket authentication event
Returns AppUser instance or raises AuthenticationFailed
"""
for AuthClassString in settings.WEBSOCKET_AUTHENTICATION_CLASSES:
AuthClass = import_string(AuthClassString)
auth_instance: AbstractWebsocketAuthentication = AuthClass()
response = auth_instance.authenticate_auth_header(
auth_header=auth_header
)
if response:
return response[0]
raise AuthenticationFailed(
'No suitable AUTHENTICATION_CLASS to authenticate '
f'auth header "{auth_header}"'
)
| 2.296875 | 2 |
examples/ocaml_rockstar.py | hoojaoh/rockstar | 4,603 | 12760214 | from rockstar import RockStar
ocaml_code = 'print_string "Hello world!\n";;'
rock_it_bro = RockStar(days=400, file_name='hello.ml', code=ocaml_code)
rock_it_bro.make_me_a_rockstar()
| 1.703125 | 2 |
patter/util/layer_utils.py | arnav1993k/Denosing | 74 | 12760215 | def split_targets(targets, target_sizes):
results = []
offset = 0
for size in target_sizes:
results.append(targets[offset:offset + size])
offset += size
return results
| 2.75 | 3 |
hard-gists/4125009/snippet.py | jjhenkel/dockerizeme | 21 | 12760216 | <reponame>jjhenkel/dockerizeme<gh_stars>10-100
#!/usr/bin/env python
import sys
import appscript
def main():
iterm_transparency = appscript.app('iTerm').current_terminal.current_session.transparency
iterm_transparency.set("0.9" if sys.argv[1] == '-' else "0.1")
if __name__ == '__main__':
main()
# vim: fileencoding=utf-8 | 1.859375 | 2 |
scripts/operations/node_management/Add_Remove_Replace_NCNs/remove_management_ncn.py | rambabubolla/docs-csm | 14 | 12760217 | <reponame>rambabubolla/docs-csm
#!/usr/bin/env python3
# MIT License
#
# (C) Copyright [2022] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import argparse
import http
import json
import logging
import os
import pathlib
import re
import requests
import shutil
import subprocess
import sys
import urllib3
BASE_URL = ''
BSS_URL = ''
HSM_URL = ''
SLS_URL = ''
KEA_URL = ''
# This is the list of NCNs for which the IPs should not be removed from /etc/hosts
NCN_DO_NOT_REMOVE_IPS = [
'ncn-m001', 'ncn-m002', 'ncn-m003',
'ncn-w001', 'ncn-w002', 'ncn-w003',
'ncn-s001', 'ncn-s002', 'ncn-s003',
]
class Logger:
def __init__(self):
self.log_file = None
def init_logger(self, log_file, verbose=False):
self.log_file = log_file
if log_file:
if verbose:
logging.basicConfig(filename=log_file, filemode='w', level=logging.DEBUG,
format='%(levelname)s: %(message)s')
else:
logging.basicConfig(filename=log_file, filemode='w', level=logging.INFO,
format='%(levelname)s: %(message)s')
# the encoding argument is not in python 3.6.15
# logging.basicConfig(filename=log_file, filemode='w', level=logging.INFO, encoding='utf-8',
# format='%(levelname)s: %(message)s')
def info(self, message):
print(message)
if self.log_file:
logging.info(message)
def warning(self, message):
print(f'Warning: {message}')
if self.log_file:
logging.warning(message)
def error(self, message):
print(f'Error: {message}')
if self.log_file:
logging.error(message)
log = Logger()
class State:
def __init__(self, xname=None, directory=None, dry_run=False, verbose=False):
self.xname = xname
self.parent = None
self.ncn_name = ""
self.aliases = set()
self.ip_reservation_aliases = set()
self.ip_reservation_ips = set()
self.hsm_macs = set()
self.workers = set()
self.remove_ips = True
self.ifnames = []
self.bmc_mac = None
self.verbose = verbose
self.ipmi_username = None
self.ipmi_password = None
self.run_ipmitool = False
if directory and xname:
self.directory = os.path.join(directory, xname)
else:
self.directory = directory
if self.directory:
# todo possibly add check that prevents saved files from being overwritten
# file_list = os.listdir(self.directory)
# if 'dry-run' in file_list:
# # continue because the previous run was a dry-run
# pass
# elif len(os.listdir(self.directory)) != 0:
# print(f'Error: Save directory is not empty: {self.directory}. Use --force option to over write it.')
# sys.exit(1)
dry_run_flag_file = os.path.join(self.directory, 'dry-run')
# remove directory if previous run was a dry run
if os.path.exists(dry_run_flag_file):
shutil.rmtree(self.directory)
# create the directory
if not os.path.exists(self.directory):
os.makedirs(self.directory)
if dry_run:
pathlib.Path(dry_run_flag_file).touch()
else:
if os.path.exists(dry_run_flag_file):
os.remove(dry_run_flag_file)
def save(self, name, data):
if self.directory:
file = os.path.join(self.directory, f'{name}.json')
with open(file, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=2)
class CommandAction:
def __init__(self, command, verbose=False):
self.command = command
self.has_run = False
self.return_code = -1
self.stdout = None
self.stderr = None
self.verbose = verbose
class HttpAction:
def __init__(self, method, url, logs=None, request_body="", response_body="", completed=False, success=False):
self.method = method
self.url = url
self.logs = [] if logs is None else logs
self.request_body = request_body
self.response_body = response_body
self.completed = completed
self.success = success
self.response = None
def log(self, message):
self.logs.append(message)
def get_response_body(self, default_value=None):
if default_value is None:
return self.response_body
if self.response_body:
return self.response_body
return default_value
def setup_urls(args):
global BASE_URL
global BSS_URL
global HSM_URL
global SLS_URL
global KEA_URL
if args.base_url:
BASE_URL = args.base_url
else:
BASE_URL = 'https://api-gw-service-nmn.local/apis'
if args.bss_url:
BSS_URL = args.bss_url
elif args.test_urls:
BSS_URL = 'http://localhost:27778/boot/v1'
else:
BSS_URL = f'{BASE_URL}/bss/boot/v1'
if args.hsm_url:
HSM_URL = args.hsm_url
elif args.test_urls:
HSM_URL = 'http://localhost:27779/hsm/v2'
else:
HSM_URL = f'{BASE_URL}/smd/hsm/v2'
if args.sls_url:
SLS_URL = args.sls_url
elif args.test_urls:
SLS_URL = 'http://localhost:8376/v1'
else:
SLS_URL = f'{BASE_URL}/sls/v1'
KEA_URL = f'{BASE_URL}/dhcp-kea'
def print_urls():
log.info(f'BSS_URL: {BSS_URL}')
log.info(f'HSM_URL: {HSM_URL}')
log.info(f'SLS_URL: {SLS_URL}')
log.info(f'KEA_URL: {KEA_URL}')
def print_summary(state):
log.info('Summary:')
log.info(f' Logs: {state.directory}')
log.info(f' xname: {state.xname}')
log.info(f' ncn_name: {state.ncn_name}')
log.info(f' ncn_macs:')
log.info(f' ifnames: {", ".join(state.ifnames)}')
log.info(f' bmc_mac: {state.bmc_mac if state.bmc_mac else "Unknown"}')
def print_action(action):
if action.completed:
if action.success:
log.info(f"Called: {action.method.upper()} {action.url}")
else:
log.error(f"Failed: {action.method.upper()} {action.url}")
log.info(json.dumps(action.response_body, indent=2))
else:
log.info(f"Planned: {action.method.upper()} {action.url}")
for a_log in action.logs:
log.info(' ' + a_log)
def print_actions(actions):
for action in actions:
print_action(action)
def print_command_action(action):
if action.has_run:
log.info(f'Ran: {" ".join(action.command)}')
if action.return_code != 0:
log.error(f' Failed: {action.return_code}')
log.info(f' stdout:\n{action.stdout}')
log.info(f' stderr:\n{action.stderr}')
elif action.verbose:
log.info(f' stdout:\n{action.stdout}')
if action.stderr:
log.info(f' stderr:\n{action.stderr}')
else:
log.info(f'Planned: {" ".join(action.command)}')
def print_command_actions(actions):
for action in actions:
print_command_action(action)
def http_get(session, actions, url, exit_on_error=True):
r = session.get(url)
action = HttpAction('get', url, response_body=r.text, completed=True)
actions.append(action)
action.response = r
if r.status_code == http.HTTPStatus.OK:
action.success = True
elif exit_on_error:
log_error_and_exit(actions, str(action))
return action
def log_error_and_exit(actions, message):
print_actions(actions)
log.error(f'{message}')
sys.exit(1)
def node_bmc_to_enclosure(xname_for_bmc):
p = re.compile('^(x[0-9]{1,4}c0s[0-9]+)(b)([0-9]+)$')
if p.match(xname_for_bmc):
# convert node bmc to enclosure, for example, convert x3000c0s36b0 to x3000c0s36e0
enclosure = re.sub(p, r'\1e\3', xname_for_bmc)
return enclosure
return None
def add_delete_action_if_component_present(actions, state, session, url, save_file):
action = http_get(session, actions, url, exit_on_error=False)
if action.success:
state.save(save_file, json.loads(action.response_body))
actions.append(HttpAction('delete', url))
not_found = action.response.status_code == http.HTTPStatus.NOT_FOUND
if not_found:
action.success = True
action.log('The item does not need to be deleted, because it does not exist.')
def validate_ipmi_config(state):
if state.run_ipmitool:
if not state.ipmi_password:
log.error('IPMI_PASSWORD not set')
log.error('The environment variable IPMI_PASSWORD is required')
log.error('It should be set to the password of the BMC that is being removed')
sys.exit(1)
if not state.ipmi_username:
log.error('IPMI_USERNAME not set')
log.error('The environment variable IPMI_USERNAME is required')
log.error('It should be set to the username of the BMC that is being removed')
sys.exit(1)
def create_sls_actions(session, state):
actions = []
hardware_action = http_get(session, actions, f'{SLS_URL}/hardware')
networks_action = http_get(session, actions, f'{SLS_URL}/networks')
# Find xname in hardware and get aliases
found_hardware_for_xname = False
hardware_list = json.loads(hardware_action.response_body)
for hardware in hardware_list:
extra_properties = hardware.get('ExtraProperties', {})
if state.xname == hardware['Xname']:
type_string = hardware.get('TypeString')
role = extra_properties.get('Role')
sub_role = extra_properties.get('SubRole')
if type_string != 'Node' or role != 'Management' or sub_role not in ['Worker', 'Storage', 'Master']:
log_error_and_exit(
actions,
f'{state.xname} is Type: {type_string}, Role: {role}, SubRole: {sub_role}. ' +
'The node must be Type: Node, Role: Management, SubRole: one of Worker, Storage, or Master.')
found_hardware_for_xname = True
state.save(f'sls-hardware-{state.xname}', hardware)
state.parent = hardware.get('Parent')
hardware_action.log(
f'Found Hardware: Xname: {state.xname}, ' +
f'Parent: {state.parent}, ' +
f'TypeString: {hardware["TypeString"]}, ' +
f'Role: {hardware.get("ExtraProperties").get("Role")}')
state.aliases.update(extra_properties.get('Aliases', []))
hardware_action.log(f'Aliases: {state.aliases}')
alias_count = len(state.aliases)
if alias_count != 1:
log.warning(f'Expected to find only one alias. Instead found {state.aliases}')
if alias_count > 0:
state.ncn_name = list(state.aliases)[0]
log.info(f'xname: {state.xname}')
log.info(f'ncn name: {state.ncn_name}')
if state.ncn_name in NCN_DO_NOT_REMOVE_IPS:
state.remove_ips = False
# Requires that the parent is known.
# The loop through the hardware_list above finds the given node and parent
# That is why this must loop through the hardware list again after the loop above.
hardware_connectors = []
for hardware in hardware_list:
extra_properties = hardware.get('ExtraProperties', {})
# Check for nic connections
for nic in extra_properties.get("NodeNics", []):
if nic == state.parent:
hardware_connectors.append(hardware.get('Xname'))
state.save(f'sls-hardware-{hardware.get("Xname")}', hardware)
hardware_action.log(f'Found Connector Hardware: Xname: {hardware.get("Xname")}, NodeNic: {nic}')
type_string = hardware.get('TypeString')
role = extra_properties.get('Role')
sub_role = extra_properties.get('SubRole')
if type_string == 'Node' and role == 'Management' and sub_role in ['Worker', 'Storage', 'Master']:
aliases = extra_properties.get('Aliases', [])
for alias in aliases:
if alias not in state.aliases:
state.workers.add(alias)
for connector in hardware_connectors:
actions.append(HttpAction('delete', f'{SLS_URL}/hardware/{connector}'))
if found_hardware_for_xname:
actions.append(HttpAction('delete', f'{SLS_URL}/hardware/{state.xname}'))
state.save('sls-hardware', hardware_list)
else:
log_error_and_exit(actions, f'Failed to find sls hardware entry for xname: {state.xname}')
# Find network references for the aliases and the parent
networks = json.loads(networks_action.response_body)
state.save('sls-networks', networks)
for network in networks:
network_name = network.get("Name")
if network_name == 'HSN':
# Skip the HSN network. This network is owned by slingshot.
continue
logs = []
network_has_changes = False
extra_properties = network.get('ExtraProperties')
subnets = extra_properties['Subnets']
if subnets is None:
continue
for subnet in subnets:
ip_reservations = subnet.get('IPReservations')
if ip_reservations is None:
continue
new_ip_reservations = []
subnet_has_changes = False
for ip_reservation in ip_reservations:
rname = ip_reservation['Name']
if rname not in state.aliases and rname != state.parent and rname != state.xname:
new_ip_reservations.append(ip_reservation)
else:
subnet_has_changes = True
a = ip_reservation.get('Aliases')
if a:
state.ip_reservation_aliases.update(ip_reservation.get('Aliases'))
state.ip_reservation_aliases.add(ip_reservation.get('Name'))
state.ip_reservation_ips.add(ip_reservation.get('IPAddress'))
if state.remove_ips:
logs.append(
f'Removed IP Reservation in {network["Name"]} ' +
f'in subnet {subnet["Name"]} for {ip_reservation["Name"]}')
logs.append(
'IP Reservation Details: ' +
f'Name: {ip_reservation.get("Name")}, ' +
f'IPAddress: {ip_reservation.get("IPAddress")}, ' +
f'Aliases: {ip_reservation.get("Aliases")}')
state.save(f'sls-ip-reservation-{network["Name"]}-{subnet["Name"]}-{ip_reservation["Name"]}',
ip_reservation)
if state.remove_ips and subnet_has_changes:
network_has_changes = True
subnet['IPReservations'] = new_ip_reservations
if state.remove_ips and network_has_changes:
request_body = json.dumps(network)
action = HttpAction(
'put', f'{SLS_URL}/networks/{network["Name"]}', logs=logs, request_body=request_body)
actions.append(action)
return actions
def create_hsm_actions(session, state):
actions = []
# xname ethernet interfaces
ethernet_xname_action = http_get(session, actions,
f'{HSM_URL}/Inventory/EthernetInterfaces?ComponentId={state.xname}')
ethernet_list = json.loads(ethernet_xname_action.response_body)
for ethernet in ethernet_list:
ethernet_id = ethernet.get('ID')
actions.append(HttpAction('delete', f'{HSM_URL}/Inventory/EthernetInterfaces/{ethernet_id}'))
state.save(f'hsm-ethernet-interface-{ethernet_id}', ethernet)
mac = ethernet.get('MACAddress')
if mac:
state.hsm_macs.add(mac)
# bmc (parent) ethernet interfaces
ethernet_parent_action = http_get(session, actions,
f'{HSM_URL}/Inventory/EthernetInterfaces?ComponentId={state.parent}')
ethernet_list = json.loads(ethernet_parent_action.response_body)
for ethernet in ethernet_list:
ethernet_id = ethernet.get('ID')
actions.append(HttpAction('delete', f'{HSM_URL}/Inventory/EthernetInterfaces/{ethernet_id}'))
state.save(f'hsm-ethernet-interface-{ethernet_id}', ethernet)
mac = ethernet.get('MACAddress')
if mac:
state.hsm_macs.add(mac)
# delete parent redfish endpoints
add_delete_action_if_component_present(
actions, state, session, f'{HSM_URL}/Inventory/RedfishEndpoints/{state.parent}', f'hsm-redfish-endpoints-{state.parent}')
# delete xname from component state
add_delete_action_if_component_present(
actions, state, session, f'{HSM_URL}/State/Components/{state.xname}', f'hsm-component-{state.xname}')
# delete parent from component state
add_delete_action_if_component_present(
actions, state, session, f'{HSM_URL}/State/Components/{state.parent}', f'hsm-component-{state.parent}')
# delete node enclosure for parent from component state
node_enclosure_xname = node_bmc_to_enclosure(state.parent)
if node_enclosure_xname:
add_delete_action_if_component_present(
actions, state, session, f'{HSM_URL}/State/Components/{node_enclosure_xname}', f'hsm-component-{node_enclosure_xname}')
else:
log.error(f'failed to create enclosure xname for parent {state.parent}')
return actions
def bss_params_to_ifnames(params):
result = []
arg_list = params.split(' ')
for arg in arg_list:
if arg.startswith('ifname='):
key_value = arg.split('=', 2)
result.append(key_value[1])
return result
def create_bss_actions(session, state):
actions = []
global_bp_action = http_get(session, actions, f'{BSS_URL}/bootparameters?name=Global')
global_bp = json.loads(global_bp_action.response_body)
if len(global_bp) == 0:
log_error_and_exit(actions, "Failed to find Global bootparameters")
elif len(global_bp) > 1:
log.error("unexpectedly found more than one Global bootparameters. Continuing with the only the first entry")
boot_parameter = global_bp[0]
state.save('bss-bootparameters-global', boot_parameter)
# check that ncn being removed is not first master
first_master = boot_parameter.get('cloud-init', {}).get('meta-data', {}).get('first-master-hostname')
if first_master == state.ncn_name:
log_error_and_exit(actions,
'Cannot remove the first master. ' +
f'xname: {state.xname}, ncn-name: {state.ncn_name}, first-master-hostname: {first_master}')
else:
global_bp_action.log(f'first-master-hostname: {first_master}')
# remove host records from Global boot parameters
if state.remove_ips:
host_records = boot_parameter.get('cloud-init').get('meta-data').get('host_records')
new_host_records = []
has_changes = False
for host_record in host_records:
found_alias_match = False
for alias in host_record.get('aliases'):
for alias_prefix in state.aliases:
if alias.startswith(alias_prefix):
found_alias_match = True
if found_alias_match:
has_changes = True
else:
new_host_records.append(host_record)
if has_changes:
boot_parameter.get('cloud-init').get('meta-data')['host_records'] = new_host_records
global_request_body = json.dumps(boot_parameter)
state.save('new-bss-bootparameters-global', boot_parameter)
actions.append(HttpAction('put', f'{BSS_URL}/bootparameters', request_body=global_request_body))
# remove boot parameters for xname
xname_bp_action = http_get(session, actions, f'{BSS_URL}/bootparameters?name={state.xname}')
if xname_bp_action.success:
xname_bp_list = json.loads(xname_bp_action.response_body)
xname_bp = xname_bp_list[0] if len(xname_bp_list) > 0 else {}
state.save(f'bss-bootparameters-{state.xname}', xname_bp_list)
# create delete action
delete_request_body = '{ "hosts" : [ "' + state.xname + '" ] }'
xname_bp_delete_action = HttpAction('delete', f'{BSS_URL}/bootparameters', request_body=delete_request_body)
xname_bp_delete_action.log(delete_request_body)
actions.append(xname_bp_delete_action)
# save interfaces from params
params = xname_bp.get('params')
if params:
state.ifnames = bss_params_to_ifnames(params)
return actions
def create_kea_actions(session, state):
actions = []
for mac in sorted(state.hsm_macs):
request_body = '{"command": "lease4-get-by-hw-address", "service": [ "dhcp4" ], "arguments": {"hw-address": "' + mac + '"}}'
action = HttpAction('post', f'{KEA_URL}', request_body=request_body)
actions.append(action)
action.log(f'Request body: {request_body}')
run_action(session, action)
if state.verbose:
action.log(f'Response body: {action.response_body}')
if action.success:
response = action.get_response_body('[]')
response_json = json.loads(response)
for r in response_json:
leases = r.get("arguments", {}).get("leases", [])
for lease in leases:
ip = lease.get('ip-address')
if ip:
if ip not in state.ip_reservation_ips:
state.ip_reservation_ips.add(ip)
action.log(f'Added {ip} to the list of kea leases to remove')
for ip in sorted(state.ip_reservation_ips):
request_body = '{"command": "lease4-del", "service": [ "dhcp4" ], "arguments": {"ip-address": "' + ip + '"}}'
action = HttpAction('post', f'{KEA_URL}', request_body=request_body)
actions.append(action)
action.log(f'Request body: {request_body}')
return actions
def run_command(command):
cmd = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = cmd.communicate()
stdout = None if not result[0] else result[0].decode('utf-8')
stderr = None if not result[1] else result[1].decode('utf-8')
return cmd.returncode, stdout, stderr
def run_command_action(command):
command.return_code, command.stdout, command.stderr = run_command(command.command)
command.has_run = True
def run_command_actions(command_actions):
for command in command_actions:
if command.has_run:
pass
else:
run_command_action(command)
print_command_action(command)
def check_for_running_pods_on_ncn(state):
for alias in state.aliases:
command_wide = ['kubectl', 'get', 'pods', '--all-namespaces', '--field-selector', f'spec.nodeName={alias}', '-o', 'wide']
command = ['kubectl', 'get', 'pods', '--all-namespaces', '--field-selector', f'spec.nodeName={alias}', '-o', 'json']
return_code, stdout, stderr = run_command(command)
log.info("Ran: " + ' '.join(command))
if return_code != 0:
log.error('kubectl command failed')
log.info(f'Return code: {return_code}')
log.info(f'Standard out:\n{stdout}')
log.info(f'Standard err:\n{stderr}')
sys.exit(1)
if stdout:
response = json.loads(stdout)
items = response.get('items')
if items is not None:
if len(items) != 0:
log.info(' '.join(command_wide))
_, wide_stdout, wide_stderr = run_command(command_wide)
log.info(wide_stdout)
if wide_stderr:
log.info(wide_stderr)
log.error(f'there are pods on {alias}.')
sys.exit(1)
else:
print(
f'Warning: Could not determine if {alias} is running services. Command did not return the expected json')
else:
print(f'Warning: Could not determine if {alias} is running services. Command returned no output.')
def create_restart_bss_restart_actions():
return [
CommandAction(['kubectl', '-n', 'services', 'rollout', 'restart', 'deployment', 'cray-bss']),
]
def create_restart_bss_wait_actions():
return [
CommandAction(['kubectl', '-n', 'services', 'rollout', 'status', 'deployment', 'cray-bss', '--timeout=600s']),
]
def create_update_etc_hosts_actions(state):
command_actions = []
if state.remove_ips:
sorted_workers = sorted(state.workers)
for worker in sorted_workers:
scp_action = CommandAction(['scp', f'{worker}:/etc/hosts', f'{state.directory}/etc-hosts-{worker}'])
command_actions.append(scp_action)
hosts = ','.join(sorted_workers)
cp_backup_action = CommandAction(['pdsh', '-w', hosts,
'cp', '/etc/hosts', f'/tmp/hosts.backup.{state.xname}.{state.ncn_name}'])
command_actions.append(cp_backup_action)
for ip in sorted(state.ip_reservation_ips):
sed_action = CommandAction(['pdsh', '-w', hosts,
'sed', '-i', f'/^{ip}[[:blank:]]/d', f'/etc/hosts'])
command_actions.append(sed_action)
else:
print('Leaving /etc/hosts unchanged')
return command_actions
def create_ipmitool_set_bmc_to_dhcp_actions(state):
command_actions = []
if not state.run_ipmitool:
return command_actions
if not state.ncn_name or not state.ipmi_password or not state.ipmi_username:
# hitting this case is a programming error.
# these values should have been checked by calling validate_ipmi_config(state)
log.error('Unexpected state. Missing one of these values: ncn_name: ' +
f'"{state.ncn_name}", ipmi_username: "{state.ipmi_username}", ipmi_password: "****"')
return command_actions
mc_info_action = CommandAction([
'ipmitool', '-I', 'lanplus', '-U', state.ipmi_username, '-E', '-H', f'{state.ncn_name}-mgmt',
'mc', 'info'],
verbose=state.verbose)
command_actions.append(mc_info_action)
run_command_action(mc_info_action)
lan = '1'
if mc_info_action.stdout:
manufacturer_lines = [line for line in mc_info_action.stdout.split('\n') if 'manufacturer name' in line.lower()]
for line in manufacturer_lines:
if 'intel' in line.lower():
lan = '3'
# Set the BMC to DHCP
change_dhcp_action = CommandAction([
'ipmitool', '-I', 'lanplus', '-U', state.ipmi_username, '-E', '-H', f'{state.ncn_name}-mgmt',
'lan', 'set', lan, "ipsrc", "dhcp"],
verbose=state.verbose)
command_actions.append(change_dhcp_action)
# Restart BMC
restart_bmc_action = CommandAction([
'ipmitool', '-I', 'lanplus', '-U', state.ipmi_username, '-E', '-H', f'{state.ncn_name}-mgmt',
'mc', 'reset', "cold"],
verbose=state.verbose)
command_actions.append(restart_bmc_action)
return command_actions
def create_ipmitool_bmc_mac_actions(state):
command_actions = []
if not state.run_ipmitool:
return command_actions
if not state.ncn_name or not state.ipmi_password or not state.ipmi_username:
# hitting this case is a programming error.
# these values should have been checked by calling validate_ipmi_config(state)
log.error('Unexpected state. Missing one of these values: ncn_name: ' +
f'"{state.ncn_name}", ipmi_username: "{state.ipmi_username}", ipmi_password: "****"')
return command_actions
mc_info_action = CommandAction([
'ipmitool', '-I', 'lanplus', '-U', state.ipmi_username, '-E', '-H', f'{state.ncn_name}-mgmt',
'mc', 'info'],
verbose=state.verbose)
command_actions.append(mc_info_action)
run_command_action(mc_info_action)
lan = '1'
if mc_info_action.stdout:
manufacturer_lines = [line for line in mc_info_action.stdout.split('\n') if 'manufacturer name' in line.lower()]
for line in manufacturer_lines:
if 'intel' in line.lower():
lan = '3'
mac_action = CommandAction([
'ipmitool', '-I', 'lanplus', '-U', state.ipmi_username, '-E', '-H', f'{state.ncn_name}-mgmt',
'lan', 'print', lan],
verbose=state.verbose)
command_actions.append(mac_action)
run_command_action(mac_action)
if mac_action.return_code == 0 and mac_action.stdout:
mac_lines = [line for line in mac_action.stdout.split('\n') if 'mac address' in line.lower()]
for line in mac_lines:
key_value = line.split(':', 1)
if len(key_value) == 2:
state.bmc_mac = key_value[1].strip()
return command_actions
def is_2xx(http_status):
return http_status // 200 == 1
def run_action(session, action):
method = action.method
url = action.url
r = None
if method == 'get':
r = session.get(url)
elif method == 'delete':
r = session.delete(url, data=action.request_body)
elif method == 'put':
r = session.put(url, action.request_body)
elif method == 'post':
r = session.post(url, action.request_body)
else:
print(f"Unknown method {method}")
print("FAILED")
sys.exit(1)
if r:
action.response_body = r.text
action.completed = True
action.success = is_2xx(r.status_code)
def run_actions(session, actions):
for action in actions:
if action.completed:
print_action(action)
else:
run_action(session, action)
print_action(action)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--xname', help='The xname of the ncn to remove', required=True)
parser.add_argument('--dry-run', action='store_true', help='Do a dry run where nothing is modified')
parser.add_argument('--log-dir', '-l', default='/tmp/remove_management_ncn',
help='Directory where to log and save current state.')
parser.add_argument("-v", action="store_true", help="Print verbose output")
# hidden arguments used for testing
parser.add_argument('--base-url', help=argparse.SUPPRESS) # Base url.
parser.add_argument('--sls-url', help=argparse.SUPPRESS) # Base url for sls. Overrides --base-url
parser.add_argument('--hsm-url', help=argparse.SUPPRESS) # Base url for hsm. Overrides --base-url
parser.add_argument('--bss-url', help=argparse.SUPPRESS) # Base url for bss. Overrides --base-url
parser.add_argument('-t', '--test-urls', action='store_true', help=argparse.SUPPRESS) # Use test urls
parser.add_argument('--skip-kea', action='store_true', help=argparse.SUPPRESS) # skip kea actions
parser.add_argument('--skip-etc-hosts', action='store_true', help=argparse.SUPPRESS) # skip /etc/hosts actions
parser.add_argument('--force', action='store_true', help=argparse.SUPPRESS) # skip asking 'are you sure' question
args = parser.parse_args()
state = State(xname=args.xname, directory=args.log_dir, dry_run=args.dry_run, verbose=args.v)
log.init_logger(os.path.join(state.directory, 'log'), verbose=state.verbose)
setup_urls(args)
print_urls()
with requests.Session() as session:
session.verify = False
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
token = os.environ.get('TOKEN')
if token is not None:
session.headers.update({'Authorization': f'Bearer {token}'})
elif not args.test_urls and token is None:
log.error('The TOKEN environment variable is not set.')
log.info('Run the following to set the TOKEN:')
log.info('''export TOKEN=$(curl -s -S -d grant_type=client_credentials \\
-d client_id=admin-client -d client_secret=`kubectl get secrets admin-client-auth \\
-o jsonpath='{.data.client-secret}' | base64 -d` \\
https://api-gw-service-nmn.local/keycloak/realms/shasta/protocol/openid-connect/token \\
| jq -r '.access_token')
''')
sys.exit(1)
session.headers.update({'Content-Type': 'application/json'})
state.ipmi_password = os.environ.get('IPMI_PASSWORD')
state.ipmi_username = os.environ.get('IPMI_USERNAME')
if not state.ipmi_username:
state.ipmi_username = 'root'
log.info(f'Using the default IPMI username. Set the IPMI_USERNAME environment variable to change this.')
log.info(f'ipmi username: {state.ipmi_username}')
sls_actions = create_sls_actions(session, state)
print_actions(sls_actions)
# ncn-m001 does not use DHCP. It is assigned a static IP.
state.run_ipmitool = state.ncn_name != 'ncn-m001'
validate_ipmi_config(state)
bss_actions = create_bss_actions(session, state)
print_actions(bss_actions)
hsm_actions = create_hsm_actions(session, state)
print_actions(hsm_actions)
kea_actions = []
if not args.skip_kea:
kea_actions = create_kea_actions(session, state)
print_actions(kea_actions)
restart_bss_restart_actions = create_restart_bss_restart_actions()
print_command_actions(restart_bss_restart_actions)
restart_bss_wait_actions = create_restart_bss_wait_actions()
print_command_actions(restart_bss_wait_actions)
etc_hosts_actions = []
if not args.skip_etc_hosts:
etc_hosts_actions = create_update_etc_hosts_actions(state)
print_command_actions(etc_hosts_actions)
ipmitool_bmc_mac_actions = create_ipmitool_bmc_mac_actions(state)
print_command_actions(ipmitool_bmc_mac_actions)
ipmitool_set_dhcp_actions = create_ipmitool_set_bmc_to_dhcp_actions(state)
print_command_actions(ipmitool_set_dhcp_actions)
check_for_running_pods_on_ncn(state)
log.info('')
print_summary(state)
if not args.dry_run:
if not args.force:
print()
response = input(f'Permanently remove {state.xname} - {state.ncn_name} (y/n)? ')
if response.lower() != 'y':
log.info('Operation aborted. Nothing was removed.')
exit(0)
print()
log.info(f'Removing {args.xname}')
run_actions(session, bss_actions)
run_actions(session, hsm_actions)
run_actions(session, sls_actions)
run_actions(session, kea_actions)
log.info('Restarting cray-bss')
run_command_actions(restart_bss_restart_actions)
log.info('Waiting for cray-bss to start.')
log.info('Do not kill this script. The wait will timeout in 10 minutes if bss does not fully start up.')
run_command_actions(restart_bss_wait_actions)
# Set the BMC to DHCP
run_command_actions(ipmitool_set_dhcp_actions)
run_command_actions(etc_hosts_actions)
log.info('')
print_summary(state)
if not args.dry_run:
log.info('')
log.info(f'Successfully removed {state.xname} - {state.ncn_name}')
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 1.601563 | 2 |
drl/agents/integration/agent.py | lucaslingle/pytorch_drl | 0 | 12760218 | <reponame>lucaslingle/pytorch_drl<filename>drl/agents/integration/agent.py
from typing import List, Mapping, Any, Union
import torch as tc
from drl.agents.preprocessing import Preprocessing
from drl.agents.architectures.stateless.abstract import StatelessArchitecture
from drl.agents.heads import Head
class Agent(tc.nn.Module):
def __init__(
self,
preprocessing: List[Preprocessing],
architecture: StatelessArchitecture,
predictors: Mapping[str, Head],
detach_input: bool = True):
"""
Args:
preprocessing (List[Preprocessing]): List of `Preprocessing` instances.
architecture (StatelessArchitecture): `StatelessArchitecture` instance.
predictors (Mapping[str, Head]): Dictionary of prediction `Head`s,
keyed by predictor name. There should be only one `PolicyHead`
predictor and its name should be 'policy'. There can be multiple
`ValueHead`/`ActionValueHead` predictors. Their names should
start with 'value_' or 'action_value_', and end with the
appropriate reward name.
detach_input (bool): Detach input or not? Default: True.
This is a no-op when used with emulator-generated observation tensors,
since these are outside the model.parameters() passed to the
optimizer and moreover default to have requires_grad=False.
"""
super().__init__()
self._preprocessing = tc.nn.Sequential(*preprocessing)
self._architecture = architecture
self._predictors = tc.nn.ModuleDict(predictors)
self._detach_input = detach_input
self._check_predict_keys()
def _check_predict_keys(self):
for key in self.keys:
if key == 'policy':
continue
if key.startswith('value_'):
continue
if key.startswith('action_value_'):
continue
raise ValueError(f"Prediction key {key} not supported.")
@property
def keys(self):
return self._predictors.keys()
def forward(
self,
observations: tc.Tensor,
predict: List[str],
**kwargs: Mapping[str, Any]
) -> Mapping[str, Union[tc.Tensor, tc.distributions.Distribution]]:
"""
Args:
observations (torch.Tensor): Batch of observations
predict (List[str]): Names of predictors to apply.
kwargs (Mapping[str, Any]): Keyword arguments.
Returns:
Mapping[str, Union[tc.Tensor, tc.distributions.Distribution]]:
Dictionary of predictions.
"""
if self._detach_input:
observations = observations.detach()
preprocessed = self._preprocessing(observations)
features = self._architecture(preprocessed, **kwargs)
predictions = {
key: self._predictors[key](features, **kwargs) for key in predict
}
return predictions
| 2.1875 | 2 |
utils/extract_sdp.py | boknilev/nmt-repr-analysis | 33 | 12760219 | # -*- coding: utf-8 -*-
"""
Extract dependencies from a Semantic Dependency Parsing treebank.
Usage: extract_sdp.py [--sep sep] [--first_arg_col first_arg_col] IN_FILE OUT_TEXT_FILE OUT_HEAD_FILE OUT_DEPREL_FILE
Arguments:
IN_FILE SDP file in sdp format
OUT_TEXT_FILE File to write raw texts, one sentence per line
OUT_HEAD_FILE File to write heads, which are either an ID (1-indexed) or 0 (for no dependency)
If a word has more than one head, then its heads will be sparated by --sep
OUT_DEPREL_FILE File to write UD relations to the head
IF a word has more than one head, then its relations to the heads will be separated by --sep
Options:
-h, --help show this help message
--sep sep separator for multiple heads (Default: "|")
--first_arg_col first_arg_col first argument column id (0-indexed) (Default: 7)
"""
from docopt import docopt
import codecs
def run(sdp_file, out_text_file, out_head_file, out_deprel_file, sep, first_arg_col, encoding='UTF-8'):
with codecs.open(sdp_file, encoding=encoding) as f_sdp:
with codecs.open(out_text_file, 'w', encoding=encoding) as f_out_text:
with codecs.open(out_head_file, 'w', encoding=encoding) as f_out_head:
with codecs.open(out_deprel_file, 'w', encoding=encoding) as f_out_deprel:
words, rels, preds, pred_ids = [], [], [], []
tok_id = 0
for line in f_sdp:
#print line
if line.startswith('#'):
continue
if line.strip() == '':
# map pred order to id, then join multiple heads
heads = []
for cur_preds in preds:
if len(cur_preds) > 0:
heads.append(sep.join([str(pred_ids[cur_pred]) for cur_pred in cur_preds]))
else:
heads.append('0')
if len(words) > 0:
f_out_text.write(' '.join(words) + '\n')
f_out_deprel.write(' '.join(rels) + '\n')
f_out_head.write(' '.join(heads) + '\n')
words, rels, preds, pred_ids = [], [], [], []
tok_id = 0
continue
splt = line.strip().split('\t')
tok_id += 1
# is predicate
if splt[5] == '+':
pred_ids.append(tok_id)
words.append(splt[1])
cur_preds, cur_rels = [], []
# look for arguments
for i in xrange(first_arg_col, len(splt)):
# is argument
if splt[i] != '_':
# get the pred's order
cur_preds.append(i-first_arg_col)
cur_rels.append(splt[i])
preds.append(cur_preds)
if len(cur_rels) > 0:
rels.append(sep.join(cur_rels))
else:
rels.append('_')
if __name__ == '__main__':
args = docopt(__doc__)
sep = '|'
if args['--sep']:
sep = args['--sep']
first_arg_col = 7
if args['--first_arg_col']:
first_arg_col = args['--first_arg_col']
run(args['IN_FILE'], args['OUT_TEXT_FILE'], args['OUT_HEAD_FILE'], args['OUT_DEPREL_FILE'], sep=sep, first_arg_col=first_arg_col)
| 2.75 | 3 |
robot/robot/src/common/autonomous_helper.py | frc1418/2014 | 1 | 12760220 | <filename>robot/robot/src/common/autonomous_helper.py
#
# TODO: Still need to add comprehensive documentation for this stuff, and
# then roll it into pyfrc.
#
try:
import wpilib
except ImportError:
from pyfrc import wpilib
import functools
import inspect
# use this to track ordering of functions, so that we can display them
# properly in the tuning widget on the dashboard
__global_cnt_serial = [0]
def __get_state_serial():
__global_cnt_serial[0] = __global_cnt_serial[0] + 1
return __global_cnt_serial[0]
#
# Decorators:
#
# timed_state
#
def timed_state(f=None, duration=None, next_state=None, first=False):
if f is None:
return functools.partial(timed_state, duration=duration, next_state=next_state, first=first)
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
# store state variables here
wrapper.first = first
wrapper.name = f.__name__
wrapper.description = f.__doc__
wrapper.next_state = next_state
wrapper.duration = duration
wrapper.expires = 0xffffffff
wrapper.ran = False
wrapper.serial = __get_state_serial()
# inspect the args, provide a correct call implementation
args, varargs, keywords, defaults = inspect.getargspec(f)
if keywords is not None or varargs is not None:
raise ValueError("Invalid function parameters for function %s" % wrapper.name)
# TODO: there has to be a better way to do this. oh well.
if len(args) == 1:
wrapper.run = lambda self, tm, state_tm: f(self)
elif len(args) == 2:
if args[1] == 'tm':
wrapper.run = lambda self, tm, state_tm: f(self, tm)
elif args[1] == 'state_tm':
wrapper.run = lambda self, tm, state_tm: f(self, state_tm)
else:
raise ValueError("Invalid parameter name for function %s" % wrapper.name)
elif args == ['self', 'tm', 'state_tm']:
wrapper.run = lambda self, tm, state_tm: f(self, tm, state_tm)
elif args == ['self', 'state_tm', 'tm']:
wrapper.run = lambda self, tm, state_tm: f(self, state_tm, tm)
else:
raise ValueError("Invalid parameter names for function %s" % wrapper.name)
# provide a default docstring?
return wrapper
class StatefulAutonomous(object):
'''
TODO: document this
'''
__built = False
__done = False
def __init__(self, components):
if not hasattr(self, 'MODE_NAME'):
raise ValueError("Must define MODE_NAME class variable")
for k,v in components.items():
setattr(self, k, v)
self.__table = wpilib.SmartDashboard
self.__sd_args = []
self.__build_states()
self.__tunables = wpilib.StringArray()
def register_sd_var(self, name, default, add_prefix=True, vmin=-1, vmax=1):
is_number = self.__register_sd_var_internal(name, default, add_prefix, True)
if not add_prefix:
return
# communicate the min/max value for numbers to the dashboard
if is_number:
name = '%s|%0.3f|%0.3f' % (name, vmin, vmax)
self.__tunables.add(name)
self.__table.PutValue(self.MODE_NAME + '_tunables', self.__tunables)
def __register_sd_var_internal(self, name, default, add_prefix, readback):
is_number = False
sd_name = name
if add_prefix:
sd_name = '%s\\%s' % (self.MODE_NAME, name)
if isinstance(default, bool):
self.__table.PutBoolean(sd_name, default)
args = (name, sd_name, self.__table.GetBoolean)
elif isinstance(default, int) or isinstance(default, float):
self.__table.PutNumber(sd_name, default)
args = (name, sd_name, self.__table.GetNumber)
is_number = True
elif isinstance(default, str):
self.__table.PutString(sd_name, default)
args = (name, sd_name, self.__table.GetString)
else:
raise ValueError("Invalid default value")
if readback:
self.__sd_args.append(args)
return is_number
def __build_states(self):
has_first = False
states = {}
#for each state function:
for name in dir(self.__class__):
state = getattr(self.__class__, name)
if name.startswith('__') or not hasattr(state, 'next_state'):
continue
# find a pre-execute function if available
state.pre = getattr(self.__class__, 'pre_%s' % name, None)
# is this the first state to execute?
if state.first:
if has_first:
raise ValueError("Multiple states were specified as the first state!")
self.__first = name
has_first = True
# problem: how do we expire old entries?
# -> what if we just use json? more flexible, but then we can't tune it
# via SmartDashboard
# make the time tunable
if state.duration is not None:
self.__register_sd_var_internal(state.name + '_duration', state.duration, True, True)
description = ''
if state.description is not None:
description = state.description
states[state.serial] = (state.name, description)
# problem: the user interface won't know which entries are the
# current variables being used by the robot. So, we setup
# an array with the names, and the dashboard uses that
# to determine the ordering too
sorted_states = sorted(states.items())
array = wpilib.StringArray()
for k, (name, desc) in sorted_states:
array.add(name)
self.__table.PutValue(self.MODE_NAME + '_durations', array)
array = wpilib.StringArray()
for k, (name, desc) in sorted_states:
array.add(desc)
self.__table.PutValue(self.MODE_NAME + '_descriptions', array)
if not has_first:
raise ValueError("Starting state not defined! Use first=True on a state decorator")
self.__built = True
def _validate(self):
# TODO: make sure the state machine can be executed
# - run at robot time? Probably not. Run this as part of a unit test
pass
# how long does introspection take? do this in the constructor?
# can do things like add all of the timed states, and project how long
# it will take to execute it (don't forget about cycles!)
def on_enable(self):
if not self.__built:
raise ValueError('super().__init__(components) was never called!')
# print out the details of this autonomous mode, and any tunables
self.battery_voltage = wpilib.DriverStation.GetInstance().GetBatteryVoltage()
print("Battery voltage: %.02fv" % self.battery_voltage)
print("Tunable values:")
# read smart dashboard values, print them
for name, sd_name, fn in self.__sd_args:
val = fn(sd_name)
setattr(self, name, val)
print("-> %25s: %s" % (name, val))
# set the starting state
self.next_state(self.__first)
self.__done = False
def on_disable(self):
'''Called when the autonomous mode is disabled'''
pass
def next_state(self, name):
'''Call this function to transition to the next state'''
if name is not None:
self.__state = getattr(self.__class__, name)
else:
self.__state = None
if self.__state is None:
return
self.__state.ran = False
def update(self, tm):
# state: first, name, pre, time
# if you get an error here, then you probably overrode on_enable,
# but didn't call super().on_enable()
try:
state = self.__state
except AttributeError:
raise ValueError("super().on_enable was never called!")
# we adjust this so that if we have states chained together,
# then the total time it runs is the amount of time of the
# states. Otherwise, the time drifts.
new_state_start = tm
# determine if the time has passed to execute the next state
if state is not None and state.expires < tm:
self.next_state(state.next_state)
new_state_start = state.expires
state = self.__state
if state is None:
if not self.__done:
print("%.3fs: Done with autonomous mode" % tm)
self.__done = True
return
# is this the first time this was executed?
if not state.ran:
state.ran = True
state.start_time = new_state_start
state.expires = state.start_time + getattr(self, state.name + '_duration')
print("%.3fs: Entering state:" % tm, state.name)
# execute the pre state if it exists
if state.pre is not None:
state.pre(self, tm)
# execute the state
state.run(self, tm, tm - state.start_time)
| 2.21875 | 2 |
laptimes/management/commands/seed_cars.py | ev-agelos/acr-server | 1 | 12760221 | import os
import json
from django.core.management.base import BaseCommand, CommandError
from django.db import IntegrityError
from laptimes.models import Car
class Command(BaseCommand):
help = 'Seed the database with AC cars.'
def add_arguments(self, parser):
parser.add_argument('--path', type=str)
parser.add_argument(
'--save',
action='store_true',
help='Save cars in the database, defaults to False.'
)
def handle(self, *args, **options):
if not os.path.isdir(options['path']):
raise ValueError('Given path is invalid.')
errors, saved_cars = [], 0
car_folders = os.listdir(options['path'])
for car_folder in sorted(car_folders):
try:
car_data = load_car_ui(options['path'], car_folder)
except ValueError as err:
errors.append(str(err)) # Show errors last
continue
model = get_model_from_name(car_data)
upgrade = get_upgrade_from_folder_name(car_folder)
car = Car(ac_name=car_folder, brand=car_data['brand'],
model=model, upgrade=upgrade)
if options['save'] is True:
try:
car.save()
except IntegrityError:
print("{} already exists".format(car))
continue
saved_cars += 1
if options['save'] is True:
msg = "\n{} cars inserted"
else:
msg = "\n{} cars would have been inserted"
print(msg.format(saved_cars))
for error in errors:
print('\nERROR: ', error)
def load_car_ui(cars_path, car):
"""
Read filepath's contents and return them as json.
Use utf-8-sig encoding when reading to ignore BOM. Also ignore \n and \t.
"""
filepath = find_ui_filepath(cars_path, car)
with open(filepath, encoding='utf-8-sig') as fob:
data = fob.read().replace('\n', '').replace('\t', '')
return json.loads(data)
def find_ui_filepath(cars_path, car):
"""
Return the filepath of car's ui file which holds its information.
Handle the case of the file having the prefix "dlc_" which means it is not
downloaded yet.
"""
ui_dir = os.path.join(cars_path, car, 'ui')
ui_file = os.path.join(ui_dir, 'ui_car.json')
if os.path.isfile(ui_file):
return ui_file
ui_file = os.path.join(ui_dir, 'dlc_ui_car.json')
if os.path.isfile(ui_file):
return ui_file
raise ValueError('UI file not found for {}'.format(car))
def get_model_from_name(car_data):
name = car_data['name'].lstrip(car_data['brand']) # remove brand
# remove possible upgrade from the end of name
upgrades = [upgrade_tuple[1] for upgrade_tuple in Car.upgrades]
upgrades.extend(['Stage 1', 'Stage 2', 'Stage 3'])
upgrade_variations = []
for upgrade in upgrades:
# because of incosistent data include all typed versions
upgrade_variations.extend(
[upgrade, upgrade.lower(), upgrade.replace(' ', ''),
upgrade.lower().replace(' ', '')]
)
for upgrade in upgrade_variations:
if name.endswith(upgrade):
name = name.replace(upgrade, '')
break
name = name.strip()
return name
def get_upgrade_from_folder_name(car_folder):
"""Return the upgrade found in the given folder name."""
*_, upgrade = car_folder.rpartition('_')
if any(upgrade == upgrade_pair[0] for upgrade_pair in Car.upgrades):
return upgrade
return None
| 2.5 | 2 |
sympy/ntheory/tests/test_bbp_pi.py | iamabhishek0/sympy | 2 | 12760222 | <reponame>iamabhishek0/sympy
from random import randint
from sympy.ntheory.bbp_pi import pi_hex_digits
from sympy.utilities.pytest import raises
# http://www.herongyang.com/Cryptography/Blowfish-First-8366-Hex-Digits-of-PI.html
# There are actually 8336 listed there; with the prepended 3 there are 8337
# below
dig=''.join('''
3243f6a8885a308d313198a2e03707344a4093822299f31d0082efa98ec4e6c89452821e638d013
77be5466cf34e90c6cc0ac29b7c97c50dd3f84d5b5b54709179216d5d98979fb1bd1310ba698dfb5
ac2ffd72dbd01adfb7b8e1afed6a267e96ba7c9045f12c7f9924a19947b3916cf70801f2e2858efc
16636920d871574e69a458fea3f4933d7e0d95748f728eb658718bcd5882154aee7b54a41dc25a59
b59c30d5392af26013c5d1b023286085f0ca417918b8db38ef8e79dcb0603a180e6c9e0e8bb01e8a
3ed71577c1bd314b2778af2fda55605c60e65525f3aa55ab945748986263e8144055ca396a2aab10
b6b4cc5c341141e8cea15486af7c72e993b3ee1411636fbc2a2ba9c55d741831f6ce5c3e169b8793
1eafd6ba336c24cf5c7a325381289586773b8f48986b4bb9afc4bfe81b6628219361d809ccfb21a9
91487cac605dec8032ef845d5de98575b1dc262302eb651b8823893e81d396acc50f6d6ff383f442
392e0b4482a484200469c8f04a9e1f9b5e21c66842f6e96c9a670c9c61abd388f06a51a0d2d8542f
68960fa728ab5133a36eef0b6c137a3be4ba3bf0507efb2a98a1f1651d39af017666ca593e82430e
888cee8619456f9fb47d84a5c33b8b5ebee06f75d885c12073401a449f56c16aa64ed3aa62363f77
061bfedf72429b023d37d0d724d00a1248db0fead349f1c09b075372c980991b7b25d479d8f6e8de
f7e3fe501ab6794c3b976ce0bd04c006bac1a94fb6409f60c45e5c9ec2196a246368fb6faf3e6c53
b51339b2eb3b52ec6f6dfc511f9b30952ccc814544af5ebd09bee3d004de334afd660f2807192e4b
b3c0cba85745c8740fd20b5f39b9d3fbdb5579c0bd1a60320ad6a100c6402c7279679f25fefb1fa3
cc8ea5e9f8db3222f83c7516dffd616b152f501ec8ad0552ab323db5fafd23876053317b483e00df
829e5c57bbca6f8ca01a87562edf1769dbd542a8f6287effc3ac6732c68c4f5573695b27b0bbca58
c8e1ffa35db8f011a010fa3d98fd2183b84afcb56c2dd1d35b9a53e479b6f84565d28e49bc4bfb97
90e1ddf2daa4cb7e3362fb1341cee4c6e8ef20cada36774c01d07e9efe2bf11fb495dbda4dae9091
98eaad8e716b93d5a0d08ed1d0afc725e08e3c5b2f8e7594b78ff6e2fbf2122b648888b812900df0
1c4fad5ea0688fc31cd1cff191b3a8c1ad2f2f2218be0e1777ea752dfe8b021fa1e5a0cc0fb56f74
e818acf3d6ce89e299b4a84fe0fd13e0b77cc43b81d2ada8d9165fa2668095770593cc7314211a14
77e6ad206577b5fa86c75442f5fb9d35cfebcdaf0c7b3e89a0d6411bd3ae1e7e4900250e2d2071b3
5e226800bb57b8e0af2464369bf009b91e5563911d59dfa6aa78c14389d95a537f207d5ba202e5b9
c5832603766295cfa911c819684e734a41b3472dca7b14a94a1b5100529a532915d60f573fbc9bc6
e42b60a47681e6740008ba6fb5571be91ff296ec6b2a0dd915b6636521e7b9f9b6ff34052ec58556
6453b02d5da99f8fa108ba47996e85076a4b7a70e9b5b32944db75092ec4192623ad6ea6b049a7df
7d9cee60b88fedb266ecaa8c71699a17ff5664526cc2b19ee1193602a575094c29a0591340e4183a
3e3f54989a5b429d656b8fe4d699f73fd6a1d29c07efe830f54d2d38e6f0255dc14cdd20868470eb
266382e9c6021ecc5e09686b3f3ebaefc93c9718146b6a70a1687f358452a0e286b79c5305aa5007
373e07841c7fdeae5c8e7d44ec5716f2b8b03ada37f0500c0df01c1f040200b3ffae0cf51a3cb574
b225837a58dc0921bdd19113f97ca92ff69432477322f547013ae5e58137c2dadcc8b576349af3dd
a7a94461460fd0030eecc8c73ea4751e41e238cd993bea0e2f3280bba1183eb3314e548b384f6db9
086f420d03f60a04bf2cb8129024977c795679b072bcaf89afde9a771fd9930810b38bae12dccf3f
2e5512721f2e6b7124501adde69f84cd877a5847187408da17bc9f9abce94b7d8cec7aec3adb851d
fa63094366c464c3d2ef1c18473215d908dd433b3724c2ba1612a14d432a65c45150940002133ae4
dd71dff89e10314e5581ac77d65f11199b043556f1d7a3c76b3c11183b5924a509f28fe6ed97f1fb
fa9ebabf2c1e153c6e86e34570eae96fb1860e5e0a5a3e2ab3771fe71c4e3d06fa2965dcb999e71d
0f803e89d65266c8252e4cc9789c10b36ac6150eba94e2ea78a5fc3c531e0a2df4f2f74ea7361d2b
3d1939260f19c279605223a708f71312b6ebadfe6eeac31f66e3bc4595a67bc883b17f37d1018cff
28c332ddefbe6c5aa56558218568ab9802eecea50fdb2f953b2aef7dad5b6e2f841521b628290761
70ecdd4775619f151013cca830eb61bd960334fe1eaa0363cfb5735c904c70a239d59e9e0bcbaade
14eecc86bc60622ca79cab5cabb2f3846e648b1eaf19bdf0caa02369b9655abb5040685a323c2ab4
b3319ee9d5c021b8f79b540b19875fa09995f7997e623d7da8f837889a97e32d7711ed935f166812
810e358829c7e61fd696dedfa17858ba9957f584a51b2272639b83c3ff1ac24696cdb30aeb532e30
548fd948e46dbc312858ebf2ef34c6ffeafe28ed61ee7c3c735d4a14d9e864b7e342105d14203e13
e045eee2b6a3aaabeadb6c4f15facb4fd0c742f442ef6abbb5654f3b1d41cd2105d81e799e86854d
c7e44b476a3d816250cf62a1f25b8d2646fc8883a0c1c7b6a37f1524c369cb749247848a0b5692b2
85095bbf00ad19489d1462b17423820e0058428d2a0c55f5ea1dadf43e233f70613372f0928d937e
41d65fecf16c223bdb7cde3759cbee74604085f2a7ce77326ea607808419f8509ee8efd85561d997
35a969a7aac50c06c25a04abfc800bcadc9e447a2ec3453484fdd567050e1e9ec9db73dbd3105588
cd675fda79e3674340c5c43465713e38d83d28f89ef16dff20153e21e78fb03d4ae6e39f2bdb83ad
f7e93d5a68948140f7f64c261c94692934411520f77602d4f7bcf46b2ed4a20068d40824713320f4
6a43b7d4b7500061af1e39f62e9724454614214f74bf8b88404d95fc1d96b591af70f4ddd366a02f
45bfbc09ec03bd97857fac6dd031cb850496eb27b355fd3941da2547e6abca0a9a28507825530429
f40a2c86dae9b66dfb68dc1462d7486900680ec0a427a18dee4f3ffea2e887ad8cb58ce0067af4d6
b6aace1e7cd3375fecce78a399406b2a4220fe9e35d9f385b9ee39d7ab3b124e8b1dc9faf74b6d18
5626a36631eae397b23a6efa74dd5b43326841e7f7ca7820fbfb0af54ed8feb397454056acba4895
2755533a3a20838d87fe6ba9b7d096954b55a867bca1159a58cca9296399e1db33a62a4a563f3125
f95ef47e1c9029317cfdf8e80204272f7080bb155c05282ce395c11548e4c66d2248c1133fc70f86
dc07f9c9ee41041f0f404779a45d886e17325f51ebd59bc0d1f2bcc18f41113564257b7834602a9c
60dff8e8a31f636c1b0e12b4c202e1329eaf664fd1cad181156b2395e0333e92e13b240b62eebeb9
2285b2a20ee6ba0d99de720c8c2da2f728d012784595b794fd647d0862e7ccf5f05449a36f877d48
fac39dfd27f33e8d1e0a476341992eff743a6f6eabf4f8fd37a812dc60a1ebddf8991be14cdb6e6b
0dc67b55106d672c372765d43bdcd0e804f1290dc7cc00ffa3b5390f92690fed0b667b9ffbcedb7d
9ca091cf0bd9155ea3bb132f88515bad247b9479bf763bd6eb37392eb3cc1159798026e297f42e31
2d6842ada7c66a2b3b12754ccc782ef11c6a124237b79251e706a1bbe64bfb63501a6b101811caed
fa3d25bdd8e2e1c3c9444216590a121386d90cec6ed5abea2a64af674eda86a85fbebfe98864e4c3
fe9dbc8057f0f7c08660787bf86003604dd1fd8346f6381fb07745ae04d736fccc83426b33f01eab
71b08041873c005e5f77a057bebde8ae2455464299bf582e614e58f48ff2ddfda2f474ef388789bd
c25366f9c3c8b38e74b475f25546fcd9b97aeb26618b1ddf84846a0e79915f95e2466e598e20b457
708cd55591c902de4cb90bace1bb8205d011a862487574a99eb77f19b6e0a9dc09662d09a1c43246
33e85a1f0209f0be8c4a99a0251d6efe101ab93d1d0ba5a4dfa186f20f2868f169dcb7da83573906
fea1e2ce9b4fcd7f5250115e01a70683faa002b5c40de6d0279af88c27773f8641c3604c0661a806
b5f0177a28c0f586e0006058aa30dc7d6211e69ed72338ea6353c2dd94c2c21634bbcbee5690bcb6
deebfc7da1ce591d766f05e4094b7c018839720a3d7c927c2486e3725f724d9db91ac15bb4d39eb8
fced54557808fca5b5d83d7cd34dad0fc41e50ef5eb161e6f8a28514d96c51133c6fd5c7e756e14e
c4362abfceddc6c837d79a323492638212670efa8e406000e03a39ce37d3faf5cfabc277375ac52d
1b5cb0679e4fa33742d382274099bc9bbed5118e9dbf0f7315d62d1c7ec700c47bb78c1b6b21a190
45b26eb1be6a366eb45748ab2fbc946e79c6a376d26549c2c8530ff8ee468dde7dd5730a1d4cd04d
c62939bbdba9ba4650ac9526e8be5ee304a1fad5f06a2d519a63ef8ce29a86ee22c089c2b843242e
f6a51e03aa9cf2d0a483c061ba9be96a4d8fe51550ba645bd62826a2f9a73a3ae14ba99586ef5562
e9c72fefd3f752f7da3f046f6977fa0a5980e4a91587b086019b09e6ad3b3ee593e990fd5a9e34d7
972cf0b7d9022b8b5196d5ac3a017da67dd1cf3ed67c7d2d281f9f25cfadf2b89b5ad6b4725a88f5
4ce029ac71e019a5e647b0acfded93fa9be8d3c48d283b57ccf8d5662979132e28785f0191ed7560
55f7960e44e3d35e8c15056dd488f46dba03a161250564f0bdc3eb9e153c9057a297271aeca93a07
2a1b3f6d9b1e6321f5f59c66fb26dcf3197533d928b155fdf5035634828aba3cbb28517711c20ad9
f8abcc5167ccad925f4de817513830dc8e379d58629320f991ea7a90c2fb3e7bce5121ce64774fbe
32a8b6e37ec3293d4648de53696413e680a2ae0810dd6db22469852dfd09072166b39a460a6445c0
dd586cdecf1c20c8ae5bbef7dd1b588d40ccd2017f6bb4e3bbdda26a7e3a59ff453e350a44bcb4cd
d572eacea8fa6484bb8d6612aebf3c6f47d29be463542f5d9eaec2771bf64e6370740e0d8de75b13
57f8721671af537d5d4040cb084eb4e2cc34d2466a0115af84e1b0042895983a1d06b89fb4ce6ea0
486f3f3b823520ab82011a1d4b277227f8611560b1e7933fdcbb3a792b344525bda08839e151ce79
4b2f32c9b7a01fbac9e01cc87ebcc7d1f6cf0111c3a1e8aac71a908749d44fbd9ad0dadecbd50ada
380339c32ac69136678df9317ce0b12b4ff79e59b743f5bb3af2d519ff27d9459cbf97222c15e6fc
2a0f91fc719b941525fae59361ceb69cebc2a8645912baa8d1b6c1075ee3056a0c10d25065cb03a4
42e0ec6e0e1698db3b4c98a0be3278e9649f1f9532e0d392dfd3a0342b8971f21e1b0a74414ba334
8cc5be7120c37632d8df359f8d9b992f2ee60b6f470fe3f11de54cda541edad891ce6279cfcd3e7e
6f1618b166fd2c1d05848fd2c5f6fb2299f523f357a632762393a8353156cccd02acf081625a75eb
b56e16369788d273ccde96629281b949d04c50901b71c65614e6c6c7bd327a140a45e1d006c3f27b
9ac9aa53fd62a80f00bb25bfe235bdd2f671126905b2040222b6cbcf7ccd769c2b53113ec01640e3
d338abbd602547adf0ba38209cf746ce7677afa1c52075606085cbfe4e8ae88dd87aaaf9b04cf9aa
7e1948c25c02fb8a8c01c36ae4d6ebe1f990d4f869a65cdea03f09252dc208e69fb74e6132ce77e2
5b578fdfe33ac372e6'''.split())
def test_hex_pi_nth_digits():
assert pi_hex_digits(0) == '3243f6a8885a30'
assert pi_hex_digits(1) == '243f6a8885a308'
assert pi_hex_digits(10000) == '68ac8fcfb8016c'
assert pi_hex_digits(13) == '08d313198a2e03'
assert pi_hex_digits(0, 3) == '324'
assert pi_hex_digits(0, 0) == ''
raises(ValueError, lambda: pi_hex_digits(-1))
raises(ValueError, lambda: pi_hex_digits(3.14))
# this will pick a random segment to compute every time
# it is run. If it ever fails, there is an error in the
# computation.
n = randint(0, len(dig))
prec = randint(0, len(dig) - n)
assert pi_hex_digits(n, prec) == dig[n: n + prec]
| 1.898438 | 2 |
museum_site/tool_views.py | DrDos0016/museum-of-zzt | 2 | 12760223 | <gh_stars>1-10
import codecs
import grp
import gzip
import os
import pwd
import shutil
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.shortcuts import render
from .common import *
from .constants import *
from io import StringIO
from zipfile import ZipFile
from .models import *
from internetarchive import upload
from PIL import Image
try:
import zookeeper
HAS_ZOOKEEPER = True
except ImportError:
HAS_ZOOKEEPER = False
@staff_member_required
def add_livestream(request, pk):
""" Returns page to add a livestream VOD article """
data = {
"title": "Tools",
"file": File.objects.get(pk=pk),
"today": str(datetime.now())[:10]
}
print(SITE_ROOT)
if request.POST.get("action"):
if request.POST.get("pk"):
a = Article.objects.get(pk=int(request.POST["pk"]))
else:
a = Article()
# Convert video URL
url = request.POST.get("url")
if request.POST.get("url").startswith("http"):
url = url.replace("https://youtu.be/", "")
url = url.replace("https://www.youtube.com/watch?v=", "")
url = url.replace("https://studio.youtube.com/video/", "")
url = url.replace("/edit", "")
if "&" in url:
url = url[:url.find("&")]
data["video_id"] = url
a.title = "Livestream - " + request.POST.get("title")
a.author = request.POST.get("author")
a.category = "Livestream"
a.schema = "django"
a.publish_date = request.POST.get("date")
a.published = int(request.POST.get("published", 1))
a.summary = request.POST.get("summary")
a.static_directory = "ls-" + data["video_id"]
a.allow_comments = True
# Open the template
file_path = os.path.join(
SITE_ROOT, "tools", "data", "youtube_template.html"
)
with open(file_path) as fh:
template = fh.read()
# Process the description
final_desc = request.POST.get("desc")
final_desc = final_desc[:final_desc.find("Download:")]
final_desc = "<p>" + final_desc.replace("\r\n", "</p>\n<p>")
final_desc = final_desc[:-3]
final_desc = final_desc.replace("<p></p>", "")
a.content = template.format(video_id=data["video_id"], desc=final_desc)
a.save()
data["article_pk"] = str(a.id)
# Upload the preview
if request.FILES.get("preview"):
folder = os.path.join(
SITE_ROOT, "museum_site", "static",
"articles", request.POST.get("date")[:4], a.static_directory
)
os.mkdir(folder)
# Save the file to the uploaded folder
file_path = os.path.join(folder, "preview.png")
with open(file_path, 'wb+') as fh:
for chunk in request.FILES["preview"].chunks():
fh.write(chunk)
a.save()
# Chop off the sidebar if needed
if request.POST.get("480crop"):
image = Image.open(file_path)
image = image.crop((0, 0, 480, 350))
image.save(file_path)
# Associate the article with the relevant file
data["file"].articles.add(a)
data["file"].save()
return render(request, "museum_site/tools/add_livestream.html", data)
@staff_member_required
def audit_genres(request):
data = {
"title": "Genre Audit",
}
data["canon_genres"] = GENRE_LIST
all_genres = File.objects.all().only("genre")
observed = {}
for raw in all_genres:
gs = raw.genre.split("/")
for g in gs:
observed[g] = True
data["observed"] = list(observed.keys())
data["observed"].sort()
return render(
request, "museum_site/tools/audit_genres.html", data
)
@staff_member_required
def audit_zeta_config(request):
data = {
"title": "Zeta Config Audit",
}
data["special"] = File.objects.filter(
details__in=[DETAIL_ZZT, DETAIL_SZZT]).exclude(
Q(zeta_config_id=None) |
Q(zeta_config_id=1) |
Q(zeta_config_id=4)
).order_by("zeta_config")
return render(request, "museum_site/tools/audit_zeta_config.html", data)
@staff_member_required
def calculate(request, field, pk):
f = File.objects.get(pk=pk)
data = {
"title": "Calculate " + field.title(),
}
return render(request, "museum_site/tools/calculate.html", data)
@staff_member_required
def crediting_preferences(request):
p = Profile.objects.filter(patron=True)
data = {
"title": "Crediting Preferences",
"patrons": p,
}
return render(request, "museum_site/tools/crediting-preferences.html", data)
@staff_member_required
def extract_font(request, pk):
data = {"title": "Extract Font"}
f = File.objects.get(pk=pk)
data["file"] = f
zip_file = zipfile.ZipFile(f.phys_path())
data["files"] = zip_file.namelist()
data["files"].sort(key=str.lower)
if request.GET.get("font"):
# Extract the file
zip_file.extract(request.GET["font"], path=DATA_PATH)
charset_name = os.path.splitext(
os.path.basename(request.GET["font"])
)[0]
try:
f_id = ("0000"+str(f.id))[-4:]
z = zookeeper.Zookeeper()
z.export_font(
os.path.join(DATA_PATH, request.GET["font"]),
os.path.join(
CHARSET_PATH, "{}-{}.png".format(f_id, charset_name)
),
1
)
data["result"] = "Ripped {}-{}.png".format(f_id, charset_name)
except Exception as e:
data["result"] = "Could not rip font!"
print(e)
# Remove the file
os.remove(os.path.join(DATA_PATH, request.GET["font"]))
return render(request, "museum_site/tools/extract_font.html", data)
@staff_member_required
def log_viewer(request):
data = {
"title": "Log Viewer",
"range": range(1, 16),
"logs": [
"access", "backup", "cron", "discord", "error", "mass_dl",
"wozztbot"
]
}
if request.GET.get("log"):
path = os.path.join(SITE_ROOT, "log", request.GET["log"])
print(path)
stat = os.stat(path)
data["size"] = stat.st_size
data["modified"] = datetime.fromtimestamp(stat.st_mtime)
if request.GET["log"].endswith(".gz"):
with gzip.open(path) as fh:
data["text"] = fh.read().decode("utf-8")
data["size"] = len(data["text"])
else:
with open(path) as fh:
data["text"] = fh.read()
return render(request, "museum_site/tools/log_viewer.html", data)
@staff_member_required
def mirror(request, pk):
""" Returns page to publish file on Archive.org """
f = File.objects.get(pk=pk)
data = {
"title": "Archive.org Mirror",
"file": f,
"ret": None,
"packages": PACKAGE_PROFILES,
"collection": ARCHIVE_COLLECTION,
}
package = int(request.GET.get("package", 0))
data["package"] = PACKAGE_PROFILES[package]
data["split"] = math.ceil(len(data["packages"]) // 2)
zip_file = zipfile.ZipFile(os.path.join(SITE_ROOT, f.download_url()[1:]))
file_list = zip_file.namelist()
file_list.sort(key=str.lower)
data["file_list"] = file_list
# Mirror the file
if request.POST.get("mirror"):
if request.POST.get("package") != "NONE":
package = PACKAGE_PROFILES[int(request.POST.get("package", 0))]
# Advanced settings
if request.POST.get("upload_name"):
upload_name = request.POST["upload_name"]
zip_name = upload_name + ".zip"
else:
zip_name = package["prefix"] + f.filename
upload_name = zip_name[:-4]
# Copy the base package zip
shutil.copy(
SITE_ROOT + f.download_url(),
os.path.join(TEMP_PATH, zip_name)
)
# Handle alternative Zip upload
if request.FILES.get("alt_src"):
with open(os.path.join(TEMP_PATH, zip_name), "wb") as fh:
fh.write(request.FILES["alt_src"].read())
temp_zip = os.path.join(TEMP_PATH, zip_name)
# Open the WIP zip
with ZipFile(temp_zip, "a") as z:
# Insert the base files
to_add = glob.glob(
os.path.join(BASE_PATH, package["directory"], "*")
)
for a in to_add:
z.write(a, arcname=os.path.basename(a))
# Create ZZT.CFG if needed
if package.get("use_cfg"):
# Remove .ZZT extension
config_content = request.POST.get("launch")[:-4].upper()
if package["registered"]:
config_content += "\r\nREGISTERED"
z.writestr("ZZT.CFG", config_content)
# Create description
description = "{}\n\n{}".format(
package["auto_desc"], request.POST.get("description", "")
)
# Determine the launch command
if request.POST.get("alt_launch"):
launch_command = request.POST["alt_launch"]
else:
launch_command = package["executable"] + " " + request.POST.get(
"launch", ""
).upper()
# Zip file is completed, prepare the upload
meta = {
"title": request.POST.get("title"),
"mediatype": "software",
"collection": ARCHIVE_COLLECTION,
"emulator": "dosbox",
"emulator_ext": "zip",
"emulator_start": launch_command,
"year": str(f.release_date)[:4],
"subject": [package["engine"]] + f.genre.split("/"),
"creator": f.author.split("/"),
"description": description
}
if DEBUG:
upload_name = "test-" + upload_name
upload_name = upload_name.replace(" ", "_")
file_path = os.path.join(TEMP_PATH, zip_name)
try:
r = upload(
upload_name,
files=[file_path],
metadata=meta,
access_key=IA_ACCESS,
secret_key=IA_SECRET,
)
except Exception as e:
r = None
data["status"] = "FAILURE"
data["error"] = e
if r and r[0].status_code == 200:
data["status"] = "SUCCESS"
f.archive_name = upload_name
f.save()
os.remove(os.path.join(TEMP_PATH, zip_name))
else:
data["status"] = "FAILURE"
data["archive_resp"] = r
return render(request, "museum_site/tools/mirror.html", data)
@staff_member_required
def patron_input(request):
""" Returns page listing patron users' suggestions/nominations/input """
data = {
"title": "Patron Input",
"users": User.objects.order_by("-id")
}
category = request.GET.get("category", "stream-poll-nominations")
data["category"] = category.replace("-", " ").title()
patrons = Profile.objects.filter(
patron=True
).order_by("user__username")
data["patrons"] = []
print("CAT IS", category)
for p in patrons:
if category == "stream-poll-nominations":
value = p.stream_poll_nominations
elif category == "stream-selections":
value = p.stream_selections
elif category == "closer-look-nominations":
value = p.closer_look_nominations
elif category == "guest-stream-selections":
value = p.guest_stream_selections
elif category == "closer-look-selections":
value = p.closer_look_selections
else:
value = p.bkzzt_topics
data["patrons"].append(
{"username": p.user.username, "value": value}
)
return render(request, "museum_site/tools/patron-input.html", data)
@staff_member_required
def publication_pack_file_associations(request):
data = {
"title": "Publication Pack File Associations",
"packs": Article.objects.publication_packs(),
"count": 0,
"ids": []
}
if request.GET.get("pk"):
a = Article.objects.get(pk=request.GET["pk"])
for line in a.content.split("\n"):
if "|get_files_by_id" in line:
ids = line[line.find('"') + 1:]
ids = ids[:ids.find('"')]
data["ids"] = ids.split(",")
break
print(ids)
if data["ids"]:
for i in data["ids"]:
print(i)
f = File.objects.filter(pk=int(i)).first()
if f:
f.articles.add(int(request.GET["pk"]))
f.save()
data["count"] += 1
return render(request, "museum_site/tools/publication-packs.html", data)
@staff_member_required
def publish(request, pk):
""" Returns page to publish a file marked as uploaded """
data = {
"title": "Publish",
"file": File.objects.get(pk=pk),
"file_list": [],
"suggested_button": True, # Show "Suggested" button after detail list
"hints": [],
"hint_ids": [],
}
data["detail_cats"] = Detail.objects.advanced_search_categories()
if request.POST.get("publish"):
# Move the file
src = SITE_ROOT + data["file"].download_url()
dst = "{}/zgames/{}/{}".format(
SITE_ROOT, data["file"].letter, data["file"].filename
)
shutil.move(src, dst)
# Adjust the details
data["file"].details.remove(Detail.objects.get(pk=DETAIL_UPLOADED))
for detail in request.POST.getlist("details"):
data["file"].details.add(Detail.objects.get(pk=detail))
# Save
data["file"].spotlight = request.POST.get("spotlight", False)
data["file"].publish_date = datetime.now()
data["file"].save()
# Increment publish count for users
upload = Upload.objects.get(file__id=data["file"].id)
if upload.user_id:
profile = Profile.objects.get(pk=upload.user_id)
profile.files_published += 1
profile.save()
# Redirect
return redirect("tool_list", pk=pk)
with ZipFile(SITE_ROOT + data["file"].download_url(), "r") as zf:
data["file_list"] = zf.namelist()
data["file_list"].sort()
# Get suggested fetails based on the file list
unknown_extensions = []
for name in data["file_list"]:
ext = os.path.splitext(os.path.basename(name).upper())
if ext[1] == "":
ext = ext[0]
else:
ext = ext[1]
if ext in EXTENSION_HINTS:
suggest = (EXTENSION_HINTS[ext][1])
data["hints"].append((name, EXTENSION_HINTS[ext][0], suggest))
data["hint_ids"] += EXTENSION_HINTS[ext][1]
elif ext == "": # Folders hit this
continue
data["unknown_extensions"] = unknown_extensions
data["hint_ids"] = set(data["hint_ids"])
return render(request, "museum_site/tools/publish.html", data)
@staff_member_required
def queue_removal(request, letter, filename):
data = {"title": "Queue Removal"}
qs = File.objects.identifier(letter=letter, filename=filename)
if len(qs) != 1:
data["results"] = len(qs)
else:
data["file"] = qs[0]
if (
request.POST.get("action") == "queue-removal" and
request.POST.get("confirm")
):
# Remove the physical file
path = data["file"].phys_path()
print(path)
if os.path.isfile(path):
os.remove(path)
# Remove the Upload object
qs = Upload.objects.filter(file_id=data["file"].id)
if qs:
upload = qs[0]
print(upload)
upload.delete()
# Remove the preview image
screenshot_path = data["file"].screenshot_phys_path()
if screenshot_path:
print(screenshot_path)
if os.path.isfile(screenshot_path):
os.remove(screenshot_path)
# Remove the file object
data["file"].delete()
data["success"] = True
return render(request, "museum_site/tools/queue-removal.html", data)
@staff_member_required
def reletter(request, pk):
data = {"title": "Re-Letter Zip"}
data["file"] = File.objects.get(pk=pk)
if request.POST.get("new_letter"):
letter = request.POST["new_letter"].lower()
old_letter = data["file"].letter
# Validate letter
if letter not in "abcdefghijklmnopqrstuvwxyz1":
data["results"] = "Invalid letter specified"
return render(request, "museum_site/tools/reletter.html", data)
# Validate that nothing will be clobbered
dst = os.path.join(SITE_ROOT, "zgames", letter, data["file"].filename)
if os.path.isfile(dst):
data["results"] = ("A zip with the same name already exists in "
"that letter!")
return render(request, "museum_site/tools/reletter.html", data)
# Copy the file to the new letter directory
src = data["file"].phys_path()
dst = os.path.join(SITE_ROOT, "zgames", letter, data["file"].filename)
try:
shutil.copy(src, dst)
shutil.copystat(src, dst)
except FileNotFoundError as e:
data["results"] = "Copy failure!"
data["error"] = str(e)
return render(request, "museum_site/tools/reletter.html", data)
# Remove the old zipfile
try:
os.remove(src)
except FileNotFoundError as e:
data["results"] = "Failed to remove {}.".format(src)
data["error"] = str(e)
return render(request, "museum_site/tools/reletter.html", data)
# Copy the screenshot to the new letter directory
src = data["file"].screenshot_phys_path()
dst = os.path.join(
STATIC_PATH, "images", "screenshots", letter,
data["file"].screenshot
)
try:
shutil.copy(src, dst)
shutil.copystat(src, dst)
except FileNotFoundError as e:
data["results"] = "Screenshot copy failure!"
data["error"] = str(e)
return render(request, "museum_site/tools/reletter.html", data)
# Remove the old screenshot
try:
os.remove(src)
except FileNotFoundError as e:
data["results"] = "Failed to remove {}.".format(src)
data["error"] = str(e)
return render(request, "museum_site/tools/reletter.html", data)
data["results"] = ("Successfully Re-Lettered from <b>{}</b> to "
"<b>{}</b>").format(
old_letter.upper(),
letter.upper()
)
# Update the database entry
data["file"].letter = letter
data["file"].save()
return render(request, "museum_site/tools/reletter.html", data)
@staff_member_required
def replace_zip(request, pk):
""" Returns page with latest Museum scan results"""
data = {"title": "Replace Zip"}
data["file"] = File.objects.get(pk=pk)
# Original file info
data["stat"] = os.stat(data["file"].phys_path())
data["mtime"] = datetime.fromtimestamp(data["stat"].st_mtime)
data["file_user"] = pwd.getpwuid(data["stat"].st_uid)
data["file_group"] = grp.getgrgid(data["stat"].st_gid)
if request.POST.get("action") == "replace-zip":
file_path = data["file"].phys_path()
print(request.FILES)
with open(file_path, 'wb+') as fh:
for chunk in request.FILES["replacement"].chunks():
fh.write(chunk)
data["new_file"] = File.objects.get(pk=pk)
# Update checksum
if request.POST.get("update-checksum"):
data["new_file"].calculate_checksum()
if request.POST.get("update-board-count"):
data["new_file"].calculate_boards()
if request.POST.get("update-size"):
data["new_file"].calculate_size()
data["new_file"].save()
data["new_stat"] = os.stat(data["file"].phys_path())
data["new_mtime"] = datetime.fromtimestamp(data["stat"].st_mtime)
data["new_file_user"] = pwd.getpwuid(data["stat"].st_uid)
data["new_file_group"] = grp.getgrgid(data["stat"].st_gid)
return render(request, "museum_site/tools/replace_zip.html", data)
@staff_member_required
def scan(request):
""" Returns page with latest Museum scan results"""
data = {"title": "Museum Scan"}
try:
with codecs.open(
os.path.join(STATIC_PATH, "data", "scan.log"), "r", "utf-8"
) as fh:
data["scan"] = fh.read()
except FileNotFoundError:
data["scan"] = ""
return render(request, "museum_site/tools/scan.html", data)
@staff_member_required
def tool_index(request):
data = {"title": "Tool Index"}
data["file"] = {"id": 1}
return render(request, "museum_site/tools/tool_index.html", data)
@staff_member_required
def tool_list(request, pk):
""" Returns page listing tools available for a file and file information """
data = {
"title": "Tools",
"file": File.objects.get(pk=pk)
}
letters = "1abcdefghijklmnopqrstuvwxyz"
data["upload_info"] = Upload.objects.filter(file_id=data["file"]).first()
# Simple validation tools
data["valid_letter"] = True if data["file"].letter in letters else False
data["valid_filename"] = True if data["file"].phys_path() else False
if request.GET.get("recalculate"):
field = request.GET["recalculate"]
if field == "sort-title":
data["file"].calculate_sort_title()
data["new"] = data["file"].sort_title
elif field == "size":
data["file"].calculate_size()
data["new"] = data["file"].size
elif field == "reviews":
data["file"].calculate_reviews()
data["new"] = "{}/{}".format(
data["file"].review_count, data["file"].rating
)
elif field == "articles":
data["file"].calculate_article_count()
data["new"] = data["file"].article_count
elif field == "checksum":
data["file"].calculate_checksum()
data["new"] = data["file"].checksum
elif field == "boards":
data["file"].calculate_boards()
data["new"] = "{}/{}".format(
data["file"].playable_boards, data["file"].total_boards
)
data["file"].basic_save()
return render(request, "museum_site/tools/tool-file-tools.html", data)
@staff_member_required
def user_list(request):
""" Returns page listing users and info for reference """
data = {
"title": "User List",
"users": User.objects.order_by("-id")
}
return render(request, "museum_site/tools/user-list.html", data)
@staff_member_required
def set_screenshot(request, pk):
""" Returns page to generate and set a file's screenshot """
data = {
"title": "Set Screenshot",
}
zfile = File.objects.get(pk=pk)
data["file"] = zfile
data["file_list"] = []
if not HAS_ZOOKEEPER:
return HttpResponse("Zookeeper library not found.")
with ZipFile(SITE_ROOT + zfile.download_url(), "r") as zf:
all_files = zf.namelist()
for f in all_files:
if f.lower().endswith(".zzt"):
data["file_list"].append(f)
data["file_list"].sort()
if request.GET.get("file"):
with ZipFile(SITE_ROOT + zfile.download_url(), "r") as zf:
zf.extract(
request.GET["file"],
path=SITE_ROOT + "/museum_site/static/data/"
)
z = zookeeper.Zookeeper(
SITE_ROOT + "/museum_site/static/data/" + request.GET["file"]
)
data["board_list"] = []
for board in z.boards:
print(board.title)
data["board_list"].append(board.title)
if request.GET.get("board"):
data["board_num"] = int(request.GET["board"])
if data["board_num"] != 0:
z.boards[data["board_num"]].screenshot(
SITE_ROOT + "/museum_site/static/data/temp"
)
else:
z.boards[data["board_num"]].screenshot(
SITE_ROOT + "/museum_site/static/data/temp", title_screen=True
)
data["show_preview"] = True
image_path = ""
if request.POST.get("save"):
src = SITE_ROOT + "/museum_site/static/data/temp.png"
image_path = zfile.screenshot_phys_path()
shutil.copyfile(src, image_path)
zfile.screenshot = zfile.filename[:-4] + ".png"
zfile.save()
elif request.POST.get("b64img"):
raw = request.POST.get("b64img").replace(
"data:image/png;base64,", "", 1
)
from io import BytesIO
import base64
image = Image.open(BytesIO(base64.b64decode(raw)))
image = image.crop((0, 0, 480, 350))
image_path = zfile.screenshot_phys_path()
if image_path:
image.save(image_path)
else:
image_path = os.path.join(
SITE_ROOT +
"/museum_site/static/images/screenshots/{}/{}".format(
zfile.letter, zfile.filename[:-4]
) + ".png")
image.save(image_path)
zfile.screenshot = zfile.filename[:-4] + ".png"
zfile.basic_save()
if os.path.isfile(
SITE_ROOT + "/museum_site/static/data/" + request.GET.get("file", "")
):
os.remove(SITE_ROOT + "/museum_site/static/data/" + request.GET["file"])
# Optimize the image
optimize_image(image_path)
return render(request, "museum_site/tools/set_screenshot.html", data)
| 2.203125 | 2 |
python-packages/nolearn-0.5/build/lib.linux-x86_64-2.7/nolearn/console.py | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | 2 | 12760224 | """The console module contains the :class:`Command` class that's
useful for building command-line scripts.
Consider a function `myfunc` that you want to call directly from the
command-line, but you want to avoid writing glue that deals with
argument parsing, converting those arguments to Python types and
passing them to other functions. Here's how `myfunc` could look like:
.. code-block:: python
def myfunc(a_string, a_list):
print a_string in a_list
`myfunc` takes two arguments, one is expeced to be a string, the other
one a list.
Let's use :class:`Command` to build a console script:
.. code-block:: python
from nolearn.console import Command
__doc__ = '''
Usage:
myprogram myfunc <config_file> [options]
'''
schema = '''
[myfunc]
a_string = string
a_list = listofstrings
'''
class Main(Command):
__doc__ = __doc__
schema = schema
funcs = [myfunc]
main = Main()
Note how we define a `schema` that has a definition of `myfunc`'s
arguments and their types. See :mod:`nolearn.inischema` for more
details on that.
We can then include this `main` function in our `setup.py` to get a
console script:
.. code-block:: python
setup(
name='myprogram',
# ...
entry_points='''
[console_scripts]
myprogram = myprogram.mymodule.main
''',
)
With this in place, you can now call the `myprogram` script like so:
.. code-block:: bash
$ myprogram myfunc args.ini
Where `args.ini` might look like:
.. code-block:: ini
[myfunc]
a_string = needle
a_list = haystack haystack needle haystack haystack
These constitute the two named arguments that will be passed into
`myfunc`. Passing of values is always done through `.ini` files.
You may also call your script with a `--profile=<fn>` option, which
you can use to profile your program using Python's standard
:mod:`cProfile` module.
A `--pdb` option is also available which allows you to automatically
enter post-mortem debugging when your script exits abnormally.
"""
import cProfile
import pdb
import os
import sys
import traceback
import warnings
import docopt
from .inischema import parse_config
warnings.warn("""
The nolearn.console module will be removed in nolearn 0.6. If you
want to continue using this module, please consider copying the code
into your own project. And take a look at alternatives like the click
library.
""")
DEFAULT_OPTIONS = """
Options:
-h --help Show this screen
--pdb Do post mortem debugging on errors
--profile=<fn> Save a profile to <fn>
"""
class Command(object):
__doc__ = None
schema = None
funcs = []
def __init__(self, **kwargs):
vars(self).update(kwargs)
def doc(self):
doc = self.__doc__
if 'Options:' not in doc:
doc = doc + DEFAULT_OPTIONS
return doc
def __call__(self, argv=sys.argv):
doc = self.doc()
arguments = docopt.docopt(doc, argv=argv[1:])
self.arguments = arguments
for func in self.funcs:
if arguments[func.__name__]:
break
else: # pragma: no cover
raise KeyError("No function found to call.")
with open(arguments['<config_file>']) as config_file:
self.config = parse_config(self.schema, config_file.read())
env = self.config.get('env', {})
for key, value in env.items():
os.environ[key.upper()] = value
kwargs = self.config.get(func.__name__, {})
# If profiling, wrap the function with another one that does the
# profiling:
if arguments.get('--profile'):
func_ = func
def prof(**kwargs):
cProfile.runctx(
'func(**kwargs)',
globals(),
{'func': func_, 'kwargs': kwargs},
filename=arguments['--profile'],
)
func = prof
# If debugging, call pdb.post_mortem() in the except clause:
try:
func(**kwargs)
except:
if arguments.get('--pdb'):
traceback.print_exc()
pdb.post_mortem(sys.exc_traceback)
else: # pragma: no cover
raise
| 3.90625 | 4 |
pywinauto/unittests/test_findwindows.py | rla006/pywinauto | 1 | 12760225 | # GUI Application automation and testing library
# Copyright (C) 2006-2018 <NAME> and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for findwindows.py"""
from __future__ import print_function
import unittest
import sys, os
sys.path.append(".")
from pywinauto.application import Application
from pywinauto.sysinfo import is_x64_Python
from pywinauto.findwindows import find_window, find_windows
from pywinauto.findwindows import WindowNotFoundError
from pywinauto.findwindows import WindowAmbiguousError
from pywinauto.timings import Timings
mfc_samples_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\MFC_samples")
if is_x64_Python():
mfc_samples_folder = os.path.join(mfc_samples_folder, 'x64')
mfc_app_1 = os.path.join(mfc_samples_folder, u"CmnCtrl2.exe")
class FindWindowsTestCases(unittest.TestCase):
"""Unit tests for findwindows.py module"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
# start the application
self.app = Application(backend='win32')
self.app = self.app.Start(mfc_app_1)
self.dlg = self.app.CommonControlsSample
def tearDown(self):
"""Close the application after tests"""
self.app.kill_()
def test_find_window(self):
"""Test if function find_window() works as expected including raising the exceptions"""
ctrl = self.dlg.OK.WrapperObject()
handle = find_window(process=self.app.process, best_match='OK', top_level_only=False)
self.assertEqual(handle, ctrl.handle)
self.assertRaises(WindowNotFoundError, find_window, process=self.app.process, class_name='OK')
self.assertRaises(WindowAmbiguousError, find_window,
process=self.app.process, class_name='Button', top_level_only=False)
def test_find_windows(self):
"""Test if function find_windows() works as expected including raising the exceptions"""
ctrl_hwnds = [elem.handle for elem in self.dlg.children() if elem.class_name() == 'Edit']
handles = find_windows(process=self.app.process, class_name='Edit', top_level_only=False)
self.assertEqual(set(handles), set(ctrl_hwnds))
self.assertRaises(WindowNotFoundError, find_windows,
process=self.app.process, class_name='FakeClassName', found_index=1)
if __name__ == "__main__":
unittest.main()
| 1.484375 | 1 |
generated-libraries/python/netapp/aggr/aggrraidtype.py | radekg/netapp-ontap-lib-get | 2 | 12760226 | <reponame>radekg/netapp-ontap-lib-get
class Aggrraidtype(basestring):
"""
raid_dp|raid4
Possible values:
<ul>
<li> "raid_dp" ,
<li> "raid4" ,
<li> "raid0" ,
<li> "mixed_raid_type"
</ul>
"""
@staticmethod
def get_api_name():
return "aggrraidtype"
| 1.578125 | 2 |
code/python/setup.py | clburlison/vendored | 22 | 12760227 | <reponame>clburlison/vendored<gh_stars>10-100
"""Setup script to compile Python2 for macOS."""
# standard libs
from distutils.dir_util import mkpath
import os
import shutil
import sys
import inspect
import tempfile
import argparse
# our libs. kind of hacky since this isn't a valid python package.
CURRENT_DIR = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
PARENT_DIR = os.path.dirname(CURRENT_DIR)
sys.path.insert(0, PARENT_DIR)
from vendir import config # noqa
from vendir import hash_helper # noqa
from vendir import log # noqa
from vendir import package # noqa
from vendir import runner # noqa
from vendir import root # noqa
CONFIG = config.ConfigSectionMap()
PYTHON_BUILD_DIR = os.path.abspath(CONFIG['python_build_dir'])
BASE_INSTALL_PATH = CONFIG['base_install_path']
BASE_INSTALL_PATH_S = CONFIG['base_install_path'].lstrip('/')
PYTHON2_VERSION = CONFIG['python2_version']
PYTHON2_INSTALL = os.path.join(BASE_INSTALL_PATH, 'Python', '2.7')
PYTHON3_VERSION = CONFIG['python3_version']
PYTHON3_INSTALL = os.path.join(BASE_INSTALL_PATH, 'Python', '3.6')
OPENSSL_INSTALL_PATH = os.path.join(CONFIG['base_install_path'], 'openssl')
def dl_and_extract_python(dist_url, dist_hash):
"""Download Python distribution and extract it to PYTHON_BUILD_DIR."""
if os.path.isdir(PYTHON_BUILD_DIR):
shutil.rmtree(PYTHON_BUILD_DIR, ignore_errors=True)
mkpath(PYTHON_BUILD_DIR)
# Download Python
log.info("Downloading Python from: {}".format(dist_url))
temp_filename = os.path.join(tempfile.mkdtemp(), 'tempdata')
cmd = ['/usr/bin/curl', '--show-error', '--no-buffer',
'--fail', '--progress-bar',
'--speed-time', '30',
'--location',
'--url', dist_url,
'--output', temp_filename]
# We are calling os.system so we can get download progress live
rc = runner.system(cmd)
if rc == 0 or rc is True:
log.debug("Python download successful")
else:
log.error("Python download failed with exit code: '{}'".format(rc))
sys.exit(1)
# Verify Python download hash
download_hash = hash_helper.getsha256hash(temp_filename)
config_hash = dist_hash
if download_hash != config_hash:
log.error("Hash verification of Python download has failed. Download "
"hash of '{}' does not match config hash '{}'".format(
download_hash, config_hash))
sys.exit(1)
else:
log.detail("Hash verification of Python successful")
# Extract Python to the PYTHON_BUILD_DIR
log.info("Extracting Python...")
cmd = ['/usr/bin/tar', '-xf', temp_filename, '-C', PYTHON_BUILD_DIR,
'--strip-components', '1']
out = runner.Popen(cmd)
if out[2] == 0:
log.debug("Extraction completed successfully")
else:
log.error("Extraction has failed: {}".format(out[0]))
os.remove(temp_filename)
def build(py_version, py_install_path, skip):
"""Build custom Python from source."""
py_major_ver = py_version.split('.')[0]
log.debug("Currently building: {}".format(py_major_ver))
# Step 1: change into our build directory
os.chdir(PYTHON_BUILD_DIR)
# Don't compile Python if the skip option is passed
if skip:
log.info("Python compile skipped due to -skip option")
return
# Step 1.5: Add extra modules
if py_major_ver == '2':
setup_dist = os.path.join(PYTHON_BUILD_DIR, 'Modules/Setup.dist')
with open(setup_dist, "a") as f:
log.debug("Adding additional modules to be included...")
f.write("_socket socketmodule.c timemodule.c\n")
f.write("_ssl _ssl.c -DUSE_SSL "
"-I{0}/include -I{0}/include/openssl -L{0}/lib "
"-lssl -lcrypto".format(OPENSSL_INSTALL_PATH))
# Step 2: Run the Configure setup of Python to set correct paths
os.chdir(PYTHON_BUILD_DIR)
if os.path.isdir(py_install_path):
shutil.rmtree(py_install_path, ignore_errors=True)
mkpath(py_install_path)
log.info("Configuring Python...")
cmd = ['./configure',
'--prefix={}'.format(py_install_path),
# 'CPPFLAGS=-I{}/include'.format(OPENSSL_INSTALL_PATH),
# 'LDFLAGS=-L{}/lib'.format(OPENSSL_INSTALL_PATH),
'CFLAGS=-I{}/include'.format(OPENSSL_INSTALL_PATH),
'LDFLAGS=-L{}/lib'.format(OPENSSL_INSTALL_PATH),
'--enable-shared',
'--enable-toolbox-glue',
'--with-ensurepip=install',
'--enable-ipv6',
'--with-threads',
'--datarootdir={}/share'.format(py_install_path),
'--datadir={}/share'.format(py_install_path),
# '--enable-optimizations', # adding this flag will run tests
]
runner.Popen(cmd, stdout=sys.stdout)
# Step 3: compile Python. this will take a while.
# FIXME: We need to check return codes.
log.info("Compiling Python. This will take a while time...")
log.detail("Running Python make routine...")
cmd = ['/usr/bin/make']
runner.Popen(cmd, stdout=sys.stdout)
sys.stdout.flush() # does this help?
log.debug("Create some temp files thats")
log.detail("Running Python make install routine...")
cmd = ['/usr/bin/make', 'install']
runner.Popen(cmd, stdout=sys.stdout)
sys.stdout.flush() # does this help?
# Step 4: Install pip + requirements
os.chdir(os.path.join(py_install_path, 'bin'))
# Update pip to latest
log.info("Upgrading pip...")
if py_major_ver == '2':
cmd = ['./pip']
elif py_major_ver == '3':
cmd = ['./pip3']
cmd = cmd + ['install', '--upgrade', 'pip']
runner.Popen(cmd, stdout=sys.stdout)
# Install all pip modules from requirements.txt
log.info("Install requirements...")
if py_major_ver == '2':
cmd = ['./python2.7', '-m', 'pip', 'install', '-r',
os.path.join(CURRENT_DIR, 'requirements2.txt')]
elif py_major_ver == '3':
cmd = ['./python3.6', '-m', 'pip', 'install', '-r',
os.path.join(CURRENT_DIR, 'requirements3.txt')]
runner.Popen(cmd, stdout=sys.stdout)
def main():
"""Build and package Python2."""
parser = argparse.ArgumentParser(prog='Python setup',
description='This script will compile '
'Python 1.0.1+ and optionally create '
'a native macOS package.')
parser.add_argument('-b', '--build', action='store_true',
help='Compile the Python binary')
parser.add_argument('-s', '--skip', action='store_true',
help='Skip recompiling if possible. Only recommended '
'for development purposes.')
parser.add_argument('-p', '--pkg', action='store_true',
help='Package the Python output directory.')
parser.add_argument('-v', '--verbose', action='count', default=1,
help="Increase verbosity level. Repeatable up to "
"2 times (-vv)")
parser.add_argument('--py', default='2',
help='Python version to build. Accepts 2 or 3.')
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# set argument variables
log.verbose = args.verbose
skip = args.skip
root.root_check()
# Check for OpenSSL. If it isn't on disk in the proper location
# we can't link against it.
if not os.path.isdir(OPENSSL_INSTALL_PATH):
log.warn("OpenSSL must be installed to '{}' prior to compiling "
"Python.".format(OPENSSL_INSTALL_PATH))
sys.exit(1)
if str(args.py) == '2':
dist_url = CONFIG['python2_dist']
dist_hash = CONFIG['python2_dist_hash']
py_install_path = PYTHON2_INSTALL
py_version = PYTHON2_VERSION
elif str(args.py) == '3':
dist_url = CONFIG['python3_dist']
dist_hash = CONFIG['python3_dist_hash']
py_install_path = PYTHON3_INSTALL
py_version = PYTHON3_VERSION
else:
sys.stderr('Unsupported python version\n')
sys.exit(1)
if args.build:
log.info("Bulding Python...")
# When the skip option is passed and the build directory exists, skip
# download and compiling of Python. Note we still do linking.
if skip:
log.debug("Skip flag was provided. We will not compile Python "
"on this run.")
else:
dl_and_extract_python(dist_url, dist_hash)
# reset trigger flag as we needed to download Python
skip = False
build(py_version, py_install_path, skip=skip)
if args.pkg:
log.info("Building a package for Python...")
# Change back into our local directory so we can output our package
# via relative paths
os.chdir(CURRENT_DIR)
rc = package.pkg(root=py_install_path,
version=py_version,
identifier="{}.python".format(CONFIG['pkgid']),
install_location=py_install_path,
output='python-{}.pkg'.format(py_version),
)
if rc == 0:
log.info("Python packaged properly")
else:
log.error("Looks like package creation failed")
if __name__ == '__main__':
main()
| 2.078125 | 2 |
locations/spiders/primrose_schools.py | mfjackson/alltheplaces | 0 | 12760228 | <reponame>mfjackson/alltheplaces
import json
import scrapy
from locations.items import GeojsonPointItem
class PrimroseSchoolsSpider(scrapy.Spider):
name = "primrose_schools"
item_attributes = {"brand": "Primrose Schools"}
allowed_domains = ["primroseschools.com"]
start_urls = ["https://www.primroseschools.com/find-a-school/"]
def parse(self, response):
with open(
"./locations/searchable_points/us_centroids_50mile_radius.csv"
) as points:
next(points)
for point in points:
row = point.replace("\n", "").split(",")
lati = row[1]
long = row[2]
searchurl = "https://www.primroseschools.com/find-a-school/?search_string=USA&latitude={la}&longitude={lo}".format(
la=lati, lo=long
)
yield scrapy.Request(
response.urljoin(searchurl), callback=self.parse_search
)
def parse_search(self, response):
content = response.xpath('//script[@type="application/json"]/text()').get()
schools = json.loads(content)
for i in schools:
if i["address_1"]:
properties = {
"name": i["name"],
"addr_full": i["address_1"] + " " + i["address_2"],
"city": i["city"],
"state": i["state"],
"postcode": i["zip_code"],
"phone": i["phone"],
"ref": i["id"],
"website": "https://www.primroseschools.com" + i["url"],
"lat": float(i["latitude"]),
"lon": float(i["longitude"]),
}
yield GeojsonPointItem(**properties)
| 2.90625 | 3 |
app_entry/urls.py | veryqq/python_django_it | 14 | 12760229 | <filename>app_entry/urls.py<gh_stars>10-100
# encoding: utf-8
"""
Create on: 2018-09-22 下午9:56
author: sato
mail: <EMAIL>
life is short, you need python
"""
from django.contrib.auth.decorators import login_required
from django.urls import path, re_path
from app_entry import views
urlpatterns = [
# 主页面
path(r"", views.EntryView.as_view(), name="index"),
# 服务列表
path(r"servicelist/", login_required(views.ServiceListView.as_view(), login_url="/"), name="servicelist"),
# 安全中心
path("sysecurity/", login_required(views.SysecurityView.as_view(), login_url="/"), name="sysecurity"),
# file upload
path(r"upclient/", login_required(views.UpclientView.as_view(), login_url="/"), name="upclient"),
# 网卡相关
path(r"eth/", login_required(views.EthView.as_view(), login_url="/"), name="eth"),
# 获取网卡信息
re_path(r"eth/(check|commit)/", login_required(views.ethinfo, login_url="/"), name="ethcontrol"),
# 日志相关
path(r"log/", login_required(views.LogInfo.as_view(), login_url="/"), name="log"),
# terminal
path(r"terminal/", login_required(views.Terminal.as_view(), login_url="/"), name="terminal"),
]
| 1.976563 | 2 |
validate_gtfs.py | cuappdev/ithaca-transit-live-tracking | 3 | 12760230 | <reponame>cuappdev/ithaca-transit-live-tracking<filename>validate_gtfs.py
import csv
def validate_gtfs():
# Get all trip_id's from trips.txt
trip_ids = []
with open("./tcat-ny-us/trips.txt", "r") as trips_txt:
reader = csv.reader(trips_txt)
# Skip the first row
next(reader)
for row in reader:
trip_id = row[2]
trip_ids.append(trip_id)
# Get all trip_id's with a stop time by looking at stop_times.txt
trip_ids_w_stop_times = set()
with open("./tcat-ny-us/stop_times.txt", "r") as stop_times_txt:
reader = csv.reader(stop_times_txt)
# Skip the first row
next(reader)
for row in reader:
trip_id = row[0]
trip_ids_w_stop_times.add(trip_id)
missing_trip_ids = [trip_id for trip_id in trip_ids if trip_id not in trip_ids_w_stop_times]
if len(missing_trip_ids) == 0:
print("SUCCESS: All trip identifiers have stop times")
else:
print(f"ERROR: The following trip identifiers are missing stop times.\n{missing_trip_ids}")
if __name__ == "__main__":
validate_gtfs()
| 3.28125 | 3 |
pulser-core/pulser/parametrized/decorators.py | lvignoli/Pulser | 0 | 12760231 | <reponame>lvignoli/Pulser
# Copyright 2020 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decorators for adding parametrization support."""
from __future__ import annotations
from collections.abc import Callable
from functools import wraps
from itertools import chain
from typing import Any, TypeVar, cast
from pulser.parametrized import Parametrized, ParamObj
F = TypeVar("F", bound=Callable)
def parametrize(func: F) -> F:
"""Makes a function support parametrized arguments.
Note:
Designed for use in class methods. Usage in instance or static methods
is not supported, and in regular functions is not tested.
"""
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
for x in chain(args, kwargs.values()):
if isinstance(x, Parametrized):
return ParamObj(func, *args, **kwargs)
return func(*args, **kwargs)
return cast(F, wrapper)
| 2.078125 | 2 |
tests/test_config.py | ambrozic/http3 | 1 | 12760232 | <reponame>ambrozic/http3
import os
import ssl
import pytest
import http3
@pytest.mark.asyncio
async def test_load_ssl_config():
ssl_config = http3.SSLConfig()
context = await ssl_config.load_ssl_context()
assert context.verify_mode == ssl.VerifyMode.CERT_REQUIRED
@pytest.mark.asyncio
async def test_load_ssl_config_verify_non_existing_path():
ssl_config = http3.SSLConfig(verify="/path/to/nowhere")
with pytest.raises(IOError):
await ssl_config.load_ssl_context()
@pytest.mark.asyncio
async def test_load_ssl_config_verify_existing_file():
ssl_config = http3.SSLConfig(verify=http3.config.DEFAULT_CA_BUNDLE_PATH)
context = await ssl_config.load_ssl_context()
assert context.verify_mode == ssl.VerifyMode.CERT_REQUIRED
@pytest.mark.asyncio
async def test_load_ssl_config_verify_directory():
path = os.path.dirname(http3.config.DEFAULT_CA_BUNDLE_PATH)
ssl_config = http3.SSLConfig(verify=path)
context = await ssl_config.load_ssl_context()
assert context.verify_mode == ssl.VerifyMode.CERT_REQUIRED
@pytest.mark.asyncio
async def test_load_ssl_config_cert_and_key(cert_and_key_paths):
cert_path, key_path = cert_and_key_paths
ssl_config = http3.SSLConfig(cert=(cert_path, key_path))
context = await ssl_config.load_ssl_context()
assert context.verify_mode == ssl.VerifyMode.CERT_REQUIRED
@pytest.mark.asyncio
async def test_load_ssl_config_cert_without_key_raises(cert_and_key_paths):
cert_path, _ = cert_and_key_paths
ssl_config = http3.SSLConfig(cert=cert_path)
with pytest.raises(ssl.SSLError):
await ssl_config.load_ssl_context()
@pytest.mark.asyncio
async def test_load_ssl_config_no_verify(verify=False):
ssl_config = http3.SSLConfig(verify=False)
context = await ssl_config.load_ssl_context()
assert context.verify_mode == ssl.VerifyMode.CERT_NONE
def test_ssl_repr():
ssl = http3.SSLConfig(verify=False)
assert repr(ssl) == "SSLConfig(cert=None, verify=False)"
def test_timeout_repr():
timeout = http3.TimeoutConfig(timeout=5.0)
assert repr(timeout) == "TimeoutConfig(timeout=5.0)"
timeout = http3.TimeoutConfig(read_timeout=5.0)
assert (
repr(timeout)
== "TimeoutConfig(connect_timeout=None, read_timeout=5.0, write_timeout=None)"
)
def test_limits_repr():
limits = http3.PoolLimits(hard_limit=100)
assert (
repr(limits) == "PoolLimits(soft_limit=None, hard_limit=100, pool_timeout=None)"
)
def test_ssl_eq():
ssl = http3.SSLConfig(verify=False)
assert ssl == http3.SSLConfig(verify=False)
def test_timeout_eq():
timeout = http3.TimeoutConfig(timeout=5.0)
assert timeout == http3.TimeoutConfig(timeout=5.0)
def test_limits_eq():
limits = http3.PoolLimits(hard_limit=100)
assert limits == http3.PoolLimits(hard_limit=100)
def test_timeout_from_tuple():
timeout = http3.TimeoutConfig(timeout=(5.0, 5.0, 5.0))
assert timeout == http3.TimeoutConfig(timeout=5.0)
def test_timeout_from_config_instance():
timeout = http3.TimeoutConfig(timeout=(5.0))
assert http3.TimeoutConfig(timeout) == http3.TimeoutConfig(timeout=5.0)
| 2.046875 | 2 |
rxsci/operators/progress.py | maki-nage/rxsci | 3 | 12760233 | from timeit import default_timer as timer
import rx
import rxsci as rs
def progress(name, threshold, measure_throughput=True):
'''Prints the progress on item processing
Prints the number of items that have been processed every threshold items.
The source can be an Observable or a MuxObservable.
Args:
name: Name associated to this progress.
threshold: Period of display for the progress, in unit of item count.
Returns:
The source observable.
'''
def _progress(acc, i):
_, counter, countdown, prev_time = acc or (0, threshold, None)
counter += 1
countdown -= 1
if countdown <= 0:
countdown = threshold
if measure_throughput is True:
cur_time = timer()
if prev_time is not None:
mps = threshold // (cur_time - prev_time)
else:
mps = None
prev_time = cur_time
print("{} progress: {} ({} msg/s)".format(name, counter, mps))
else:
print("{} progress: {}".format(name, counter))
return (i, counter, countdown, prev_time)
return rx.pipe(
rs.ops.scan(_progress, seed=None),
rs.ops.map(lambda i: i[0]),
)
| 3.4375 | 3 |
ex06 - interploation search.py | neong83/algorithm_practices | 0 | 12760234 | <gh_stars>0
"""
interploation search
# interploation and binary are both require a SORTED list
let said you have the follow phone number prefix array, and you are looking for 1144
0011, 0022, 0033, 1144, 1166, 1188, 3322, 3344, 3399
instead of using binary search in the middle, or linear search from the left.
we only want to search the subset with same prefix, like inside [1144, 1166, 1188]
to calculate the mid
mid = low + ((high - low) / (A[high] - A[low])) * (x - A[low]) # x is the value we are seeking
"""
data = list(range(1_000_001))
search_value = 999_999
def linear_search(search_value, data):
for index, value in enumerate(data):
if value == search_value:
print(f"Element is found after {index} attempts")
def binary_search(search_value, data):
left = 0
right = len(data) - 1
attempts = 1
founded = False
while not founded:
mid = ((right - left) // 2) + left
# print(f"left {left}")
# print(f'right {right}')
# print(f"mid {mid}")
if data[mid] == search_value:
print(f"Element is found after {attempts} attempts")
founded = True
if search_value < data[mid]:
right = mid - 1
else:
left = mid + 1
attempts += 1
def interploation_search(search_value, data):
left = 0
right = len(data) - 1
attempts = 1
founded = False
while not founded:
mid = int(
left
+ ((right - left) / data[right] - data[left]) * (search_value - data[left])
)
# print(f"left {left}")
# print(f"right {right}")
# print(f"mid {mid}")
if data[mid] == search_value:
print(f"Element is found after {attempts} attempts")
founded = True
if search_value < data[mid]:
right = mid - 1
else:
left = mid + 1
attempts += 1
linear_search(search_value, data)
binary_search(search_value, data)
interploation_search(search_value, data)
| 3.5625 | 4 |
Screen.py | ytyaru/Pygame.line.201707171836 | 0 | 12760235 | <reponame>ytyaru/Pygame.line.201707171836
import pygame
class Screen:
def __init__(self, width=320, height=240, color=[0,0,0]):
self.__color = color
self.__size = (width, height)
self.__screen = pygame.display.set_mode(self.__size)
@property
def Screen(self): return self.__screen
@property
def Size(self): return self.__size
@property
def Color(self): return self.__color
def Fill(self): self.__screen.fill(self.__color)
| 3.109375 | 3 |
laser/laser.py | pilesofspam/fork-the-zombies | 0 | 12760236 | <filename>laser/laser.py
import cv2
import numpy as np
import screeninfo
import time
cap = cv2.VideoCapture(0) # video capture source camera (Here webcam of laptop)
screen = screeninfo.get_monitors()[0]
width, height = 1280,800
img = cv2.imread('./checker16x9_1280.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, original_corners = cv2.findChessboardCorners(gray,(15,9), None)
window_name = 'projector'
cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
cv2.moveWindow(window_name, screen.x - 1, screen.y - 1)
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
cv2.imshow('projector',img)
cv2.waitKey(2000)
ret,frame = cap.read() # return a single frame in variable `frame`
ret,frame = cap.read() # return a single frame in variable `frame`
ret,frame = cap.read() # return a single frame in variable `frame`
ret,frame = cap.read() # return a single frame in variable `frame`
ret,frame = cap.read() # return a single frame in variable `frame`
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#invgray=cv2.bitwise_not(gray)
(T, thresh) = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
ret, found_corners = cv2.findChessboardCorners(thresh, (15, 9), None)
# Let's show off some debug info!
image=cv2.drawChessboardCorners(frame, (15,9), found_corners, ret )
cv2.imshow('projector',image)
cv2.waitKey(2000)
print("Original Corners")
print(original_corners)
print("New corners")
print(found_corners)
h,status = cv2.findHomography(found_corners, original_corners)
# from her we can warp perspective
print("Homography map")
print(h)
blank = np.zeros((height,width,3), np.uint8)
run=True
mode=0
kernel = np.ones((5, 5), 'uint8')
OldX=0
OldY=0
while(run):
ret, frame=cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([0,0,200])
upper_red = np.array([255,110,255])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(frame,frame, mask= mask)
#erode_img = cv2.erode(res, kernel, iterations=1)
dilate_img = cv2.dilate(res, kernel, iterations=1)
im_dst = cv2.warpPerspective(dilate_img, h, (width,height))
gray = cv2.cvtColor(im_dst, cv2.COLOR_BGR2GRAY)
contours, hierarchy = cv2.findContours(image=gray, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_NONE)
if (mode == 0):
cv2.drawContours(image=blank, contours=contours, contourIdx=-1, color=(0, 255, 0), thickness=5, lineType=cv2.LINE_AA)
cv2.imshow('projector',blank)
blank = np.zeros((height,width,3), np.uint8)
elif (mode == 1):
if len(contours) != 0:
c = max(contours, key = cv2.contourArea)
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
if (OldX > 0 and OldY>0):
blank = cv2.line(blank, (OldX,OldY), (cX,cY), (0,255,0), 2)
OldX=cX
OldY=cY
cv2.imshow('projector',blank)
else:
OldX=0
OldY=0
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
elif (k==32):
blank = np.zeros((height,width,3), np.uint8)
mode=mode+1
if mode==2:
mode=0
| 2.859375 | 3 |
BOJ14493.py | INYEONGKIM/BOJ | 2 | 12760237 | <gh_stars>1-10
import sys
input=sys.stdin.readline
t=0
for _ in range(int(input())):
a,b=map(int,input().split())
if t%(b+a)<=b:
t+=b-t%(b+a)+1
else:
t+=1
print(t)
| 2.75 | 3 |
packages/pyright-internal/src/tests/samples/tryExcept9.py | not-my-profile/pyright | 1 | 12760238 | <gh_stars>1-10
# This sample tests the case where a finally clause contains some conditional
# logic that narrows the type of an expression. This narrowed type should
# persist after the finally clause.
def func1():
file = None
try:
file = open("test.txt")
except Exception:
return None
finally:
if file:
file.close()
reveal_type(file, expected_text="TextIOWrapper")
def func2():
file = None
try:
file = open("test.txt")
except Exception:
pass
finally:
if file:
file.close()
reveal_type(file, expected_text="TextIOWrapper | None")
def func3():
file = None
try:
file = open("test.txt")
finally:
pass
reveal_type(file, expected_text="TextIOWrapper")
| 3.203125 | 3 |
tests/test_study_info_script.py | sartography/cr-connect-workflow | 2 | 12760239 | <gh_stars>1-10
import io
import json
from tests.base_test import BaseTest
from crc.scripts.study_info import StudyInfo
from crc import app
from unittest.mock import patch
from crc.services.protocol_builder import ProtocolBuilderService
class TestStudyInfoScript(BaseTest):
test_uid = "dhf8r"
test_study_id = 1
def do_work(self, info_type):
app.config['PB_ENABLED'] = True
self.load_example_data()
self.workflow = self.create_workflow('study_info_script')
self.workflow_api = self.get_workflow_api(self.workflow)
# grab study_info directly from script
study_info = StudyInfo().do_task(self.workflow_api.study_id, self.workflow.study.id, self.workflow.id, info_type)
# grab study info through a workflow
first_task = self.workflow_api.next_task
self.complete_form(self.workflow, first_task, {'which': info_type})
workflow_api = self.get_workflow_api(self.workflow)
second_task = workflow_api.next_task
return study_info, second_task
def test_info_script_info(self):
study_info, second_task = self.do_work(info_type='info')
self.assertEqual(study_info['title'], second_task.data['info']['title'])
self.assertEqual(study_info['primary_investigator_id'], second_task.data['info']['primary_investigator_id'])
self.assertIn(study_info['title'], second_task.documentation)
def test_info_script_updated_study_info(self):
self.load_example_data()
short_name = "My Short Name"
proposal_name = "My Proposal Name"
workflow = self.create_workflow('update_study_info')
workflow_api = self.get_workflow_api(workflow)
task = workflow_api.next_task
workflow_api = self.complete_form(workflow, task, {'short_name': short_name, 'proposal_name': proposal_name})
task = workflow_api.next_task
# The workflow calls study_info('info') and puts the result in Element Documentation
# I create a dictionary of that info with `eval` to make the asserts easier to read
study_info = eval(task.documentation)
self.assertIn('short_name', study_info.keys())
self.assertEqual(short_name, study_info['short_name'])
self.assertIn('proposal_name', study_info.keys())
self.assertIn(proposal_name, study_info['proposal_name'])
@patch('crc.services.protocol_builder.requests.get')
def test_info_script_investigators(self, mock_get):
app.config['PB_ENABLED'] = True
mock_get.return_value.ok = True
mock_get.return_value.text = self.protocol_builder_response('investigators.json')
response = ProtocolBuilderService.get_investigators(self.test_study_id)
study_info, second_task = self.do_work(info_type='investigators')
for i in range(len(response)):
r = response[i]
s = second_task.data['info'][response[i]['INVESTIGATORTYPE']]
self.assertEqual(r['INVESTIGATORTYPEFULL'], s['label'])
# def test_info_script_roles(self):
# study_info, second_task = self.do_work(info_type='roles')
# self.assertEqual(study_info, second_task.data['info'])
@patch('crc.services.protocol_builder.requests.get')
def test_info_script_details(self, mock_get):
app.config['PB_ENABLED'] = True
mock_get.return_value.ok = True
mock_get.return_value.text = self.protocol_builder_response('study_details.json')
response = ProtocolBuilderService.get_study_details(self.test_study_id)
study_info, second_task = self.do_work(info_type='details')
self.assertEqual(response['IBC_NUMBER'], second_task.data['info']['IBC_NUMBER'])
self.assertEqual(response['IDE'], second_task.data['info']['IDE'])
self.assertEqual(response['IND_1'], second_task.data['info']['IND_1'])
self.assertEqual(response['IND_2'], second_task.data['info']['IND_2'])
self.assertEqual(response['IND_3'], second_task.data['info']['IND_3'])
def test_info_script_documents(self):
study_info, second_task = self.do_work(info_type='documents')
self.assertEqual(study_info, second_task.data['info'])
self.assertEqual(0, len(study_info['Grant_App']['files']), "Grant_App has not files yet.")
# Add a grant app file
data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')}
rv = self.app.post('/v1.0/file?study_id=%i&workflow_id=%s&task_spec_name=%s&form_field_key=%s' %
(self.workflow.study_id, self.workflow.id, second_task.name, 'Grant_App'), data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
file_data = json.loads(rv.get_data(as_text=True))
# Now get the study info again.
study_info = StudyInfo().do_task(self.workflow_api.study_id, self.workflow.study.id, self.workflow.id,
'documents')
# The data should contain a file.
self.assertEqual(1, len(study_info['Grant_App']['files']), "Grant_App has exactly one file.")
# This file data returned should be the same as what we get back about the file when we uploaded it,
# but the details on the document should be removed, because that would be recursive.
del file_data['document']
self.assertEqual(file_data, study_info['Grant_App']['files'][0])
@patch('crc.services.protocol_builder.requests.get')
def test_info_script_sponsors(self, mock_get):
app.config['PB_ENABLED'] = True
mock_get.return_value.ok = True
mock_get.return_value.text = self.protocol_builder_response('sponsors.json')
response = ProtocolBuilderService.get_sponsors(self.test_study_id)
study_info, second_task = self.do_work(info_type='sponsors')
for i in range(len(response)):
self.assertEqual(response[i]['SPONSOR_ID'], second_task.data['info'][i]['SPONSOR_ID'])
self.assertEqual(response[i]['SP_NAME'], second_task.data['info'][i]['SP_NAME'])
self.assertEqual(response[i]['SS_STUDY'], second_task.data['info'][i]['SS_STUDY'])
| 2.28125 | 2 |
tests/games/gridworld/test_gridworld.py | johink/willsmith | 0 | 12760240 | <gh_stars>0
from tests.games.mdp_testcase import MDPTestCase
from games.gridworld.grid import Grid
from games.gridworld.gridworld import Gridworld
from games.gridworld.gridworld_direction import GridworldDirection
from games.gridworld.gridworld_examples import *
class TestGridworld(MDPTestCase):
def setUp(self):
super().setUp()
self.mdp = Gridworld(Grid(simple_terminals, 0, simple_walls, (4,3)),
deterministic_transition, (0, 0), None)
self.test_action = GridworldDirection.UP
def test_mdp_equality(self):
self._test_mdp_equality()
def test_last_position_stays_in_sync(self):
self.assertFalse(self.mdp.previous_positions)
self.mdp.step(self.test_action)
self.assertEqual(self.mdp.previous_positions[0], (0, 0))
| 2.109375 | 2 |
scripts/gen_mmu_x86.py | SebastianBoe/fw-nrfconnect-zephyr | 16 | 12760241 | #!/usr/bin/env python3
import os
import sys
import struct
import parser
from collections import namedtuple
import ctypes
import argparse
import re
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
# global variables
pd_complete = ''
inputfile = ''
outputfile = ''
list_of_pde = {}
num_of_regions = 0
read_buff = ''
raw_info = []
mmu_region_details = namedtuple("mmu_region_details",
"pde_index page_entries_info")
valid_pages_inside_pde = namedtuple("valid_pages_inside_pde", "start_addr size \
pte_valid_addr_start \
pte_valid_addr_end \
permissions")
mmu_region_details_pdpt = namedtuple("mmu_region_details_pdpt",
"pdpte_index pd_entries")
page_tables_list = []
pd_tables_list = []
pd_start_addr = 0
validation_issue_memory_overlap = [False, 0, -1]
output_offset = 0
print_string_pde_list = ''
pde_pte_string = {}
FourMB = (1024 * 4096) # In Bytes
# Constants
PAGE_ENTRY_PRESENT = 1
PAGE_ENTRY_READ_WRITE = 1 << 1
PAGE_ENTRY_USER_SUPERVISOR = 1 << 2
PAGE_ENTRY_PWT = 0 << 3
PAGE_ENTRY_PCD = 0 << 4
PAGE_ENTRY_ACCESSED = 0 << 5 # this is a read only field
PAGE_ENTRY_DIRTY = 0 << 6 # this is a read only field
PAGE_ENTRY_PAT = 0 << 7
PAGE_ENTRY_GLOBAL = 0 << 8
PAGE_ENTRY_ALLOC = 1 << 9
PAGE_ENTRY_CUSTOM = 0 << 10
#############
#*****************************************************************************#
# class for 4Kb Mode
class PageMode_4kb:
total_pages = 1023
write_page_entry_bin = "I"
size_addressed_per_pde = (1024 * 4096) # 4MB In Bytes
# return the page directory number for the give address
def get_pde_number(self, value):
return (value >> 22) & 0x3FF
# return the page table number for the given address
def get_pte_number(self, value):
return (value >> 12) & 0x3FF
# get the total number of pd available
def get_number_of_pd(self):
return len(list_of_pde.keys())
# the return value will have the page address and it is assumed
# to be a 4096 boundary
# hence the output of this API will be a 20bit address of the page table
def address_of_page_table(self, page_table_number):
global pd_start_addr
# location from where the Page tables will be written
PT_start_addr = pd_start_addr + 4096
return ((PT_start_addr +
(page_tables_list.index(page_table_number) * 4096) >> 12))
# union x86_mmu_pde_pt {
# u32_t value;
# struct {
# u32_t p:1;
# u32_t rw:1;
# u32_t us:1;
# u32_t pwt:1;
# u32_t pcd:1;
# u32_t a:1;
# u32_t ignored1:1;
# u32_t ps:1;
# u32_t ignored2:4;
# u32_t page_table:20;
# };
# };
def get_binary_pde_value(self, value):
perms = value.page_entries_info[0].permissions
present = PAGE_ENTRY_PRESENT
read_write = check_bits(perms, [1, 29]) << 1
user_mode = check_bits(perms, [2, 28]) << 2
pwt = PAGE_ENTRY_PWT
pcd = PAGE_ENTRY_PCD
a = PAGE_ENTRY_ACCESSED
ps = 0 << 7 # this is a read only field
page_table = self.address_of_page_table(value.pde_index) << 12
return (present |
read_write |
user_mode |
pwt |
pcd |
a |
ps |
page_table)
# union x86_mmu_pte {
# u32_t value;
# struct {
# u32_t p:1;
# u32_t rw:1;
# u32_t us:1;
# u32_t pwt:1;
# u32_t pcd:1;
# u32_t a:1;
# u32_t d:1;
# u32_t pat:1;
# u32_t g:1;
# u32_t alloc:1;
# u32_t custom:2;
# u32_t page:20;
# };
# };
def get_binary_pte_value(self, value, pte, perm_for_pte):
present = PAGE_ENTRY_PRESENT
read_write = ((perm_for_pte >> 1) & 0x1) << 1
user_mode = ((perm_for_pte >> 2) & 0x1) << 2
pwt = PAGE_ENTRY_PWT
pcd = PAGE_ENTRY_PCD
a = PAGE_ENTRY_ACCESSED
d = PAGE_ENTRY_DIRTY
pat = PAGE_ENTRY_PAT
g = PAGE_ENTRY_GLOBAL
alloc = PAGE_ENTRY_ALLOC
custom = PAGE_ENTRY_CUSTOM
# This points to the actual memory in the HW
# totally 20 bits to rep the phy address
# first 10 is the number got from pde and next 10 is pte
page_table = ((value.pde_index << 10) | pte) << 12
binary_value = (present | read_write | user_mode |
pwt | pcd | a | d | pat | g | alloc | custom |
page_table)
return binary_value
def populate_required_structs(self):
for region in raw_info:
pde_index = self.get_pde_number(region[0])
pte_valid_addr_start = self.get_pte_number(region[0])
# Get the end of the page table entries
# Since a memory region can take up only a few entries in the Page
# table, this helps us get the last valid PTE.
pte_valid_addr_end = self.get_pte_number(region[0] +
region[1] - 1)
mem_size = region[1]
# In-case the start address aligns with a page table entry other
# than zero and the mem_size is greater than (1024*4096) i.e 4MB
# in case where it overflows the currenty PDE's range then limit the
# PTE to 1024 and so make the mem_size reflect the actual size taken
# up in the current PDE
if (region[1] + (pte_valid_addr_start * 4096)) >= \
(self.size_addressed_per_pde):
pte_valid_addr_end = self.total_pages
mem_size = (((self.total_pages + 1) -
pte_valid_addr_start) * 4096)
self.set_pde_pte_values(pde_index, region[0], mem_size,
pte_valid_addr_start,
pte_valid_addr_end,
region[2])
if pde_index not in page_tables_list:
page_tables_list.append(pde_index)
# IF the current pde couldn't fit the entire requested region size
# then there is a need to create new PDEs to match the size.
# Here the overflow_size represents the size that couldn't be fit
# inside the current PDE, this is will now to used to create a
# new PDE/PDEs so the size remaining will be
# requested size - allocated size(in the current PDE)
overflow_size = region[1] - mem_size
# create all the extra PDEs needed to fit the requested size
# this loop starts from the current pde till the last pde that is
# needed the last pde is calcualted as the (start_addr + size) >>
# 22
if overflow_size != 0:
for extra_pde in range(pde_index + 1, self.get_pde_number(
region[0] + region[1]) + 1):
# new pde's start address
# each page directory entry has a addr range of (1024 *4096)
# thus the new PDE start address is a multiple of that
# number
extra_pde_start_address = (extra_pde *
(self.size_addressed_per_pde))
# the start address of and extra pde will always be 0
# and the end address is calculated with the new pde's start
# address and the overflow_size
extra_pte_valid_addr_end = self.get_pte_number(
extra_pde_start_address + overflow_size - 1)
# if the overflow_size couldn't be fit inside this new pde
# then need another pde and so we now need to limit the end
# of the PTE to 1024 and set the size of this new region to
# the max possible
extra_region_size = overflow_size
if overflow_size >= (self.size_addressed_per_pde):
extra_region_size = self.size_addressed_per_pde
extra_pte_valid_addr_end = self.total_pages
# load the new PDE's details
self.set_pde_pte_values(extra_pde,
extra_pde_start_address,
extra_region_size,
0,
extra_pte_valid_addr_end,
region[2])
# for the next iteration of the loop the size needs to
# decreased.
overflow_size -= extra_region_size
# print(hex_32(overflow_size),extra_pde)
if extra_pde not in page_tables_list:
page_tables_list.append(extra_pde)
if overflow_size == 0:
break
page_tables_list.sort()
# update the tuple values for the memory regions needed
def set_pde_pte_values(self, pde_index, address, mem_size,
pte_valid_addr_start, pte_valid_addr_end, perm):
pages_tuple = valid_pages_inside_pde(
start_addr=address,
size=mem_size,
pte_valid_addr_start=pte_valid_addr_start,
pte_valid_addr_end=pte_valid_addr_end,
permissions=perm)
mem_region_values = mmu_region_details(pde_index=pde_index,
page_entries_info=[])
mem_region_values.page_entries_info.append(pages_tuple)
if pde_index in list_of_pde.keys():
# this step adds the new page info to the exsisting pages info
list_of_pde[pde_index].page_entries_info.append(pages_tuple)
else:
list_of_pde[pde_index] = mem_region_values
def page_directory_create_binary_file(self):
global output_buffer
global output_offset
for pde in range(self.total_pages + 1):
binary_value = 0 # the page directory entry is not valid
# if i have a valid entry to populate
if pde in sorted(list_of_pde.keys()):
value = list_of_pde[pde]
binary_value = self.get_binary_pde_value(value)
self.pde_verbose_output(pde, binary_value)
struct.pack_into(self.write_page_entry_bin,
output_buffer,
output_offset,
binary_value)
output_offset += struct.calcsize(self.write_page_entry_bin)
def page_table_create_binary_file(self):
global output_buffer
global output_offset
for key, value in sorted(list_of_pde.items()):
for pte in range(self.total_pages + 1):
binary_value = 0 # the page directory entry is not valid
valid_pte = 0
for i in value.page_entries_info:
temp_value = ((pte >= i.pte_valid_addr_start) and
(pte <= i.pte_valid_addr_end))
if temp_value:
perm_for_pte = i.permissions
valid_pte |= temp_value
# if i have a valid entry to populate
if valid_pte:
binary_value = self.get_binary_pte_value(value,
pte,
perm_for_pte)
self.pte_verbose_output(key, pte, binary_value)
struct.pack_into(self.write_page_entry_bin,
output_buffer,
output_offset,
binary_value)
output_offset += struct.calcsize(self.write_page_entry_bin)
# To populate the binary file the module struct needs a buffer of the
# excat size. This returns the size needed for the given set of page
# tables.
def set_binary_file_size(self):
binary_size = ctypes.create_string_buffer((4096) +
(len(list_of_pde.keys()) *
4096))
return binary_size
# prints the details of the pde
def verbose_output(self):
print("\nTotal Page directory entries " + str(self.get_number_of_pd()))
count = 0
for key, value in list_of_pde.items():
for i in value.page_entries_info:
count += 1
print("In Page directory entry " +
format_string(value.pde_index) +
": valid start address = " +
hex_32(i.start_addr) + ", end address = " +
hex_32((i.pte_valid_addr_end + 1) * 4096 - 1 +
(value.pde_index * (FourMB))))
# print all the tables for a given page table mode
def print_all_page_table_info(self):
self.pde_print_elements()
self.pte_print_elements()
def pde_verbose_output(self, pde, binary_value):
if args.verbose < 2:
return
global print_string_pde_list
present = format_string(binary_value & 0x1)
read_write = format_string((binary_value >> 1) & 0x1)
user_mode = format_string((binary_value >> 2) & 0x1)
pwt = format_string((binary_value >> 3) & 0x1)
pcd = format_string((binary_value >> 4) & 0x1)
a = format_string((binary_value >> 5) & 0x1)
ignored1 = format_string(0)
ps = format_string((binary_value >> 7) & 0x1)
ignored2 = format_string(0000)
page_table_addr = format_string(hex((binary_value >> 12) & 0xFFFFF))
print_string_pde_list += (format_string(str(pde)) +
" | " +
(present) +
" | " +
(read_write) + " | " +
(user_mode) + " | " +
(pwt) + " | " +
(pcd) + " | " +
(a) + " | " +
(ps) + " | " +
page_table_addr + "\n"
)
def pde_print_elements(self):
global print_string_pde_list
print("PAGE DIRECTORY ")
print(format_string("PDE") + " | " +
format_string('P') + " | " +
format_string('rw') + " | " +
format_string('us') + " | " +
format_string('pwt') + " | " +
format_string('pcd') + " | " +
format_string('a') + " | " +
format_string('ps') + " | " +
format_string('Addr page table'))
print(print_string_pde_list)
print("END OF PAGE DIRECTORY")
def pte_verbose_output(self, pde, pte, binary_value):
global pde_pte_string
present = format_string((binary_value >> 0) & 0x1)
read_write = format_string((binary_value >> 1) & 0x1)
user_mode = format_string((binary_value >> 2) & 0x1)
pwt = format_string((binary_value >> 3) & 0x1)
pcd = format_string((binary_value >> 4) & 0x1)
a = format_string((binary_value >> 5) & 0x1)
d = format_string((binary_value >> 6) & 0x1)
pat = format_string((binary_value >> 7) & 0x1)
g = format_string((binary_value >> 8) & 0x1)
alloc = format_string((binary_value >> 9) & 0x1)
custom = format_string((binary_value >> 10) & 0x3)
page_table_addr = hex_20((binary_value >> 12) & 0xFFFFF)
print_string_list = (format_string(str(pte)) + " | " +
(present) + " | " +
(read_write) + " | " +
(user_mode) + " | " +
(pwt) + " | " +
(pcd) + " | " +
(a) + " | " +
(d) + " | " +
(pat) + " | " +
(g) + " | " +
(alloc) + " | " +
(custom) + " | " +
page_table_addr + "\n"
)
if pde in pde_pte_string.keys():
pde_pte_string[pde] += (print_string_list)
else:
pde_pte_string[pde] = print_string_list
def pte_print_elements(self):
global pde_pte_string
for pde, print_string in sorted(pde_pte_string.items()):
print("\nPAGE TABLE " + str(pde))
print(format_string("PTE") + " | " +
format_string('P') + " | " +
format_string('rw') + " | " +
format_string('us') + " | " +
format_string('pwt') + " | " +
format_string('pcd') + " | " +
format_string('a') + " | " +
format_string('d') + " | " +
format_string('pat') + " | " +
format_string('g') + " | " +
format_string('alloc') + " | " +
format_string('custom') + " | " +
format_string('page addr'))
print(print_string)
print("END OF PAGE TABLE " + str(pde))
#*****************************************************************************#
# class for PAE 4KB Mode
class PageMode_PAE:
total_pages = 511
write_page_entry_bin = "Q"
size_addressed_per_pde = (512 * 4096) # 2MB In Bytes
size_addressed_per_pdpte = (512 * size_addressed_per_pde) # In Bytes
list_of_pdpte = {}
pdpte_print_string = {}
print_string_pdpte_list = ''
# TODO enable all page tables on just a flag
def __init__(self):
for i in range(4):
self.list_of_pdpte[i] = mmu_region_details_pdpt(pdpte_index=i,
pd_entries={})
# return the pdpte number for the give address
def get_pdpte_number(self, value):
return (value >> 30) & 0x3
# return the page directory number for the give address
def get_pde_number(self, value):
return (value >> 21) & 0x1FF
# return the page table number for the given address
def get_pte_number(self, value):
return (value >> 12) & 0x1FF
def get_number_of_pd(self):
return len(self.get_pdpte_list())
def get_pdpte_list(self):
return list({temp[0] for temp in pd_tables_list})
# the return value will have the page address and it is assumed to be a 4096
# boundary.hence the output of this API will be a 20bit address of the page
# table
def address_of_page_table(self, pdpte, page_table_number):
global pd_start_addr
# first page given to page directory pointer
# and 2nd page till 5th page are used for storing the page directories.
# set the max pdpte used. this tells how many pd are needed after
# that we start keeping the pt
PT_start_addr = self.get_number_of_pd() * 4096 +\
pd_start_addr + 4096
return (PT_start_addr +
(pd_tables_list.index([pdpte, page_table_number]) *
4096) >> 12)
# union x86_mmu_pae_pde {
# u64_t value;
# struct {
# u64_t p:1;
# u64_t rw:1;
# u64_t us:1;
# u64_t pwt:1;
# u64_t pcd:1;
# u64_t a:1;
# u64_t ignored1:1;
# u64_t ps:1;
# u64_t ignored2:4;
# u64_t page_table:20;
# u64_t igonred3:29;
# u64_t xd:1;
# };
# };
def get_binary_pde_value(self, pdpte, value):
perms = value.page_entries_info[0].permissions
present = PAGE_ENTRY_PRESENT
read_write = check_bits(perms, [1, 29]) << 1
user_mode = check_bits(perms, [2, 28]) << 2
pwt = PAGE_ENTRY_PWT
pcd = PAGE_ENTRY_PCD
a = PAGE_ENTRY_ACCESSED
ps = 0 << 7 # set to make sure that the phy page is 4KB
page_table = self.address_of_page_table(pdpte, value.pde_index) << 12
xd = 0
return (present |
read_write |
user_mode |
pwt |
pcd |
a |
ps |
page_table |
xd)
# union x86_mmu_pae_pte {
# u64_t value;
# struct {
# u64_t p:1;
# u64_t rw:1;
# u64_t us:1;
# u64_t pwt:1;
# u64_t pcd:1;
# u64_t a:1;
# u64_t d:1;
# u64_t pat:1;
# u64_t g:1;
# u64_t ignore:3;
# u64_t page:20;
# u64_t igonred3:29;
# u64_t xd:1;
# };
# };
def get_binary_pte_value(self, value, pde, pte, perm_for_pte):
present = PAGE_ENTRY_PRESENT
read_write = perm_for_pte & PAGE_ENTRY_READ_WRITE
user_mode = perm_for_pte & PAGE_ENTRY_USER_SUPERVISOR
pwt = PAGE_ENTRY_PWT
pcd = PAGE_ENTRY_PCD
a = PAGE_ENTRY_ALLOC
d = PAGE_ENTRY_DIRTY
pat = PAGE_ENTRY_PAT
g = PAGE_ENTRY_GLOBAL
# This points to the actual memory in the HW
# totally 20 bits to rep the phy address
# first 2bits is from pdpte then 9bits is the number got from pde and
# next 9bits is pte
page_table = ((value.pdpte_index << 18) | (pde << 9) | pte) << 12
xd = ((perm_for_pte >> 63) & 0x1) << 63
binary_value = (present | read_write | user_mode |
pwt | pcd | a | d | pat | g |
page_table | xd)
return binary_value
def clean_up_unused_pdpte(self):
self.list_of_pdpte = {key: value for key, value in
self.list_of_pdpte.items()
if value.pd_entries != {}}
# update the tuple values for the memory regions needed
def set_pde_pte_values(self, pdpte, pde_index, address, mem_size,
pte_valid_addr_start, pte_valid_addr_end, perm):
pages_tuple = valid_pages_inside_pde(
start_addr=address,
size=mem_size,
pte_valid_addr_start=pte_valid_addr_start,
pte_valid_addr_end=pte_valid_addr_end,
permissions=perm)
mem_region_values = mmu_region_details(pde_index=pde_index,
page_entries_info=[])
mem_region_values.page_entries_info.append(pages_tuple)
if pde_index in self.list_of_pdpte[pdpte].pd_entries.keys():
# this step adds the new page info to the exsisting pages info
self.list_of_pdpte[pdpte].pd_entries[pde_index].\
page_entries_info.append(pages_tuple)
else:
self.list_of_pdpte[pdpte].pd_entries[pde_index] = mem_region_values
def populate_required_structs(self):
for region in raw_info:
pdpte_index = self.get_pdpte_number(region[0])
pde_index = self.get_pde_number(region[0])
pte_valid_addr_start = self.get_pte_number(region[0])
# Get the end of the page table entries
# Since a memory region can take up only a few entries in the Page
# table, this helps us get the last valid PTE.
pte_valid_addr_end = self.get_pte_number(region[0] +
region[1] - 1)
mem_size = region[1]
# In-case the start address aligns with a page table entry other
# than zero and the mem_size is greater than (1024*4096) i.e 4MB
# in case where it overflows the currenty PDE's range then limit the
# PTE to 1024 and so make the mem_size reflect the actual size
# taken up in the current PDE
if (region[1] + (pte_valid_addr_start * 4096)) >= \
(self.size_addressed_per_pde):
pte_valid_addr_end = self.total_pages
mem_size = (((self.total_pages + 1) -
pte_valid_addr_start) * 4096)
self.set_pde_pte_values(pdpte_index,
pde_index,
region[0],
mem_size,
pte_valid_addr_start,
pte_valid_addr_end,
region[2])
if [pdpte_index, pde_index] not in pd_tables_list:
pd_tables_list.append([pdpte_index, pde_index])
# IF the current pde couldn't fit the entire requested region
# size then there is a need to create new PDEs to match the size.
# Here the overflow_size represents the size that couldn't be fit
# inside the current PDE, this is will now to used to create a new
# PDE/PDEs so the size remaining will be
# requested size - allocated size(in the current PDE)
overflow_size = region[1] - mem_size
# create all the extra PDEs needed to fit the requested size
# this loop starts from the current pde till the last pde that is
# needed the last pde is calcualted as the (start_addr + size) >>
# 22
if overflow_size != 0:
for extra_pdpte in range(pdpte_index,
self.get_pdpte_number(region[0] +
region[1]) + 1):
for extra_pde in range(pde_index + 1, self.get_pde_number(
region[0] + region[1]) + 1):
# new pde's start address
# each page directory entry has a addr range of
# (1024 *4096) thus the new PDE start address is a
# multiple of that number
extra_pde_start_address = (
extra_pde * (self.size_addressed_per_pde))
# the start address of and extra pde will always be 0
# and the end address is calculated with the new
# pde's start address and the overflow_size
extra_pte_valid_addr_end = (
self.get_pte_number(extra_pde_start_address +
overflow_size - 1))
# if the overflow_size couldn't be fit inside this new
# pde then need another pde and so we now need to limit
# the end of the PTE to 1024 and set the size of this
# new region to the max possible
extra_region_size = overflow_size
if overflow_size >= (self.size_addressed_per_pde):
extra_region_size = self.size_addressed_per_pde
extra_pte_valid_addr_end = self.total_pages
# load the new PDE's details
self.set_pde_pte_values(extra_pdpte,
extra_pde,
extra_pde_start_address,
extra_region_size,
0,
extra_pte_valid_addr_end,
region[2])
# for the next iteration of the loop the size needs
# to decreased
overflow_size -= extra_region_size
if [extra_pdpte, extra_pde] not in pd_tables_list:
pd_tables_list.append([extra_pdpte, extra_pde])
if overflow_size == 0:
break
pd_tables_list.sort()
self.clean_up_unused_pdpte()
def pdpte_create_binary_file(self):
global output_buffer
global output_offset
global pd_start_addr
# pae needs a pdpte at 32byte aligned address
# Even though we have only 4 entries in the pdpte we need to move
# the output_offset variable to the next page to start pushing
# the pd contents
for pdpte in range(self.total_pages + 1):
if pdpte in self.get_pdpte_list():
present = 1 << 0
pwt = 0 << 3
pcd = 0 << 4
addr_of_pd = (((pd_start_addr + 4096) +
self.get_pdpte_list().index(pdpte) *
4096) >> 12) << 12
binary_value = (present | pwt | pcd | addr_of_pd)
self.pdpte_verbose_output(pdpte, binary_value)
else:
binary_value = 0
struct.pack_into(self.write_page_entry_bin,
output_buffer,
output_offset,
binary_value)
output_offset += struct.calcsize(self.write_page_entry_bin)
def page_directory_create_binary_file(self):
global output_buffer
global output_offset
pdpte_number_count = 0
for pdpte, pde_info in self.list_of_pdpte.items():
pde_number_count = 0
for pde in range(self.total_pages + 1):
binary_value = 0 # the page directory entry is not valid
# if i have a valid entry to populate
# if pde in sorted(list_of_pde.keys()):
if pde in sorted(pde_info.pd_entries.keys()):
value = pde_info.pd_entries[pde]
binary_value = self.get_binary_pde_value(pdpte, value)
self.pde_verbose_output(pdpte, pde, binary_value)
pde_number_count += 1
struct.pack_into(self.write_page_entry_bin,
output_buffer,
output_offset,
binary_value)
output_offset += struct.calcsize(self.write_page_entry_bin)
def page_table_create_binary_file(self):
global output_buffer
global output_offset
pdpte_number_count = 0
for pdpte, pde_info in sorted(self.list_of_pdpte.items()):
pdpte_number_count += 1
for pde, pte_info in sorted(pde_info.pd_entries.items()):
pte_number_count = 0
for pte in range(self.total_pages + 1):
binary_value = 0 # the page directory entry is not valid
valid_pte = 0
# go through all the valid pages inside the pde to
# figure out if we need to populate this pte
for i in pte_info.page_entries_info:
temp_value = ((pte >= i.pte_valid_addr_start) and
(pte <= i.pte_valid_addr_end))
if temp_value:
perm_for_pte = i.permissions
valid_pte |= temp_value
# if i have a valid entry to populate
if valid_pte:
binary_value = self.get_binary_pte_value(pde_info,
pde,
pte,
perm_for_pte)
pte_number_count += 1
self.pte_verbose_output(pdpte, pde, pte, binary_value)
# print(binary_value, (self.write_page_entry_bin))
struct.pack_into(self.write_page_entry_bin,
output_buffer,
output_offset,
binary_value)
output_offset += struct.calcsize(self.write_page_entry_bin)
# To populate the binary file the module struct needs a buffer of the
# excat size This returns the size needed for the given set of page tables.
def set_binary_file_size(self):
pages_for_pdpte = 1
pages_for_pd = self.get_number_of_pd()
pages_for_pt = len(pd_tables_list)
binary_size = ctypes.create_string_buffer((pages_for_pdpte +
pages_for_pd +
pages_for_pt) * 4096)
return binary_size
# prints the details of the pde
def verbose_output(self):
print("\nTotal Page directory Page pointer entries " +
str(self.get_number_of_pd()))
count = 0
for pdpte, pde_info in sorted(self.list_of_pdpte.items()):
print(
"In page directory page table pointer " +
format_string(pdpte))
for pde, pte_info in sorted(pde_info.pd_entries.items()):
for pte in pte_info.page_entries_info:
count += 1
print(" In Page directory entry " + format_string(pde) +
": valid start address = " +
hex_32(pte.start_addr) + ", end address = " +
hex_32((pte.pte_valid_addr_end + 1) * 4096 - 1 +
(pde * (self.size_addressed_per_pde)) +
(pdpte * self.size_addressed_per_pdpte)))
def pdpte_verbose_output(self, pdpte, binary_value):
if args.verbose < 2:
return
present = format_string(binary_value & 0x1)
pwt = format_string((binary_value >> 3) & 0x1)
pcd = format_string((binary_value >> 4) & 0x1)
page_table_addr = format_string(hex((binary_value >> 12) & 0xFFFFF))
self.print_string_pdpte_list += (format_string(str(pdpte)) +
" | " + (present) + " | " +
(pwt) + " | " +
(pcd) + " | " +
page_table_addr + "\n")
def pdpte_print_elements(self):
print("\nPAGE DIRECTORIES POINTER ")
print(format_string("PDPTE") + " | " +
format_string('P') + " | " +
format_string('pwt') + " | " +
format_string('pcd') + " | " +
format_string('Addr'))
print(self.print_string_pdpte_list)
print("END OF PAGE DIRECTORY POINTER")
def pde_verbose_output(self, pdpte, pde, binary_value):
if args.verbose < 2:
return
global print_string_pde_list
present = format_string(binary_value & 0x1)
read_write = format_string((binary_value >> 1) & 0x1)
user_mode = format_string((binary_value >> 2) & 0x1)
pwt = format_string((binary_value >> 3) & 0x1)
pcd = format_string((binary_value >> 4) & 0x1)
a = format_string((binary_value >> 5) & 0x1)
ignored1 = format_string(0)
ps = format_string((binary_value >> 7) & 0x1)
ignored2 = format_string(0000)
page_table_addr = format_string(hex((binary_value >> 12) & 0xFFFFF))
xd = format_string((binary_value >> 63) & 0x1)
print_string_pde_list = (format_string(str(pde)) + " | " +
(present) + " | " +
(read_write) + " | " +
(user_mode) + " | " +
(pwt) + " | " +
(pcd) + " | " +
(a) + " | " +
(ps) + " | " +
page_table_addr + " | " +
(xd) + "\n")
if pdpte in self.pdpte_print_string.keys():
self.pdpte_print_string[pdpte] += (print_string_pde_list)
else:
self.pdpte_print_string[pdpte] = print_string_pde_list
# print all the tables for a given page table mode
def print_all_page_table_info(self):
self.pdpte_print_elements()
self.pde_print_elements()
self.pte_print_elements()
def pde_print_elements(self):
global print_string_pde_list
for pdpte, print_string in sorted(self.pdpte_print_string.items()):
print("\n PAGE DIRECTORIES for PDPT " + str(pdpte))
print(format_string("PDE") + " | " +
format_string('P') + " | " +
format_string('rw') + " | " +
format_string('us') + " | " +
format_string('pwt') + " | " +
format_string('pcd') + " | " +
format_string('a') + " | " +
format_string('ps') + " | " +
format_string('Addr') + " | " +
format_string('xd'))
print(print_string)
print("END OF PAGE DIRECTORIES for PDPT " + str(pdpte))
def pte_verbose_output(self, pdpte, pde, pte, binary_value):
global pde_pte_string
present = format_string((binary_value >> 0) & 0x1)
read_write = format_string((binary_value >> 1) & 0x1)
user_mode = format_string((binary_value >> 2) & 0x1)
pwt = format_string((binary_value >> 3) & 0x1)
pcd = format_string((binary_value >> 4) & 0x1)
a = format_string((binary_value >> 5) & 0x1)
d = format_string((binary_value >> 6) & 0x1)
pat = format_string((binary_value >> 7) & 0x1)
g = format_string((binary_value >> 8) & 0x1)
page_table_addr = hex_20((binary_value >> 12) & 0xFFFFF)
xd = format_string((binary_value >> 63) & 0x1)
print_string_list = (format_string(str(pte)) + " | " +
(present) + " | " +
(read_write) + " | " +
(user_mode) + " | " +
(pwt) + " | " +
(pcd) + " | " +
(a) + " | " +
(d) + " | " +
(pat) + " | " +
(g) + " | " +
page_table_addr + " | " +
(xd) + "\n"
)
if (pdpte, pde) in pde_pte_string.keys():
pde_pte_string[(pdpte, pde)] += (print_string_list)
else:
pde_pte_string[(pdpte, pde)] = print_string_list
def pte_print_elements(self):
global pde_pte_string
for (pdpte, pde), print_string in sorted(pde_pte_string.items()):
print(
"\nPAGE TABLE for PDPTE = " +
str(pdpte) +
" and PDE = " +
str(pde))
print(format_string("PTE") + " | " +
format_string('P') + " | " +
format_string('rw') + " | " +
format_string('us') + " | " +
format_string('pwt') + " | " +
format_string('pcd') + " | " +
format_string('a') + " | " +
format_string('d') + " | " +
format_string('pat') + " | " +
format_string('g') + " | " +
format_string('Page Addr') + " | " +
format_string('xd'))
print(print_string)
print("END OF PAGE TABLE " + str(pde))
#*****************************************************************************#
def print_list_of_pde(list_of_pde):
for key, value in list_of_pde.items():
print(key, value)
print('\n')
# read the binary from the input file and populate a dict for
# start address of mem region
# size of the region - so page tables entries will be created with this
# read write permissions
def read_mmu_list_marshal_param(page_mode):
global read_buff
global page_tables_list
global pd_start_addr
global validation_issue_memory_overlap
read_buff = input_file.read()
input_file.close()
# read contents of the binary file first 2 values read are
# num_of_regions and page directory start address both calculated and
# populated by the linker
num_of_regions, pd_start_addr = struct.unpack_from(
header_values_format, read_buff, 0)
# a offset used to remember next location to read in the binary
size_read_from_binary = struct.calcsize(header_values_format)
# for each of the regions mentioned in the binary loop and populate all the
# required parameters
for region in range(num_of_regions):
basic_mem_region_values = struct.unpack_from(struct_mmu_regions_format,
read_buff,
size_read_from_binary)
size_read_from_binary += struct.calcsize(struct_mmu_regions_format)
# ignore zero sized memory regions
if basic_mem_region_values[1] == 0:
continue
# validate for memory overlap here
for i in raw_info:
start_location = basic_mem_region_values[0]
end_location = basic_mem_region_values[0] + \
basic_mem_region_values[1]
overlap_occurred = ((start_location >= i[0]) and
(start_location <= (i[0] + i[1]))) and \
((end_location >= i[0]) and
(end_location <= i[0] + i[1]))
if overlap_occurred:
validation_issue_memory_overlap = [
True,
start_location,
page_mode.get_pde_number(start_location)]
return
# add the retrived info another list
raw_info.append(basic_mem_region_values)
def validate_pde_regions():
# validation for correct page alignment of the regions
for key, value in list_of_pde.items():
for pages_inside_pde in value.page_entries_info:
if pages_inside_pde.start_addr & (0xFFF) != 0:
print("Memory Regions are not page aligned",
hex(pages_inside_pde.start_addr))
sys.exit(2)
# validation for correct page alignment of the regions
if pages_inside_pde.size & (0xFFF) != 0:
print("Memory Regions size is not page aligned",
hex(pages_inside_pde.size))
sys.exit(2)
# validation for spiling of the regions across various
if validation_issue_memory_overlap[0] == True:
print("Memory Regions are overlapping at memory address " +
str(hex(validation_issue_memory_overlap[1])) +
" with Page directory Entry number " +
str(validation_issue_memory_overlap[2]))
sys.exit(2)
def check_bits(val, bits):
for b in bits:
if val & (1 << b):
return 1
return 0
# Read the parameters passed to the file
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-e", "--big-endian", action="store_true",
help="Target encodes data in big-endian format"
"(little endian is the default)")
parser.add_argument("-i", "--input",
help="Input file from which MMU regions are read.")
parser.add_argument("-k", "--kernel",
help="Zephyr kernel image")
parser.add_argument(
"-o", "--output",
help="Output file into which the page tables are written.")
parser.add_argument("-v", "--verbose", action="count", default=0,
help="Print debugging information. Multiple "
"invocations increase verbosity")
args = parser.parse_args()
if "VERBOSE" in os.environ:
args.verbose = 1
# the format for writing in the binary file would be decided by the
# endian selected
def set_struct_endian_format(page_mode):
endian_string = "<"
if args.big_endian is True:
endian_string = ">"
global struct_mmu_regions_format
global header_values_format
struct_mmu_regions_format = endian_string + "IIQ"
header_values_format = endian_string + "II"
page_mode.write_page_entry_bin = (endian_string +
page_mode.write_page_entry_bin)
def format_string(input_str):
output_str = '{0: <5}'.format(str(input_str))
return output_str
# format for 32bit hex value
def hex_32(input_value):
output_value = "{0:#0{1}x}".format(input_value, 10)
return output_value
# format for 20bit hex value
def hex_20(input_value):
output_value = "{0:#0{1}x}".format(input_value, 7)
return output_value
def verbose_output(page_mode):
if args.verbose == 0:
return
print("\nMemory Regions as defined:")
for info in raw_info:
print("Memory region start address = " + hex_32(info[0]) +
", Memory size = " + hex_32(info[1]) +
", Permission = " + hex(info[2]))
page_mode.verbose_output()
if args.verbose > 1:
page_mode.print_all_page_table_info()
# build sym table
def get_symbols(obj):
for section in obj.iter_sections():
if isinstance(section, SymbolTableSection):
return {sym.name: sym.entry.st_value
for sym in section.iter_symbols()}
raise LookupError("Could not find symbol table")
# determine which paging mode was selected
def get_page_mode():
with open(args.kernel, "rb") as fp:
kernel = ELFFile(fp)
sym = get_symbols(kernel)
try:
return sym["CONFIG_X86_PAE_MODE"]
except BaseException:
return 0
def main():
global output_buffer
parse_args()
# select the page table needed
if get_page_mode():
page_mode = PageMode_PAE()
else:
page_mode = PageMode_4kb()
set_struct_endian_format(page_mode)
global input_file
input_file = open(args.input, 'rb')
global binary_output_file
binary_output_file = open(args.output, 'wb')
# inputfile= file_name
read_mmu_list_marshal_param(page_mode)
# populate the required structs
page_mode.populate_required_structs()
# validate the inputs
validate_pde_regions()
# The size of the output buffer has to match the number of bytes we write
# this corresponds to the number of page tables gets created.
output_buffer = page_mode.set_binary_file_size()
try:
page_mode.pdpte_create_binary_file()
except BaseException:
pass
page_mode.page_directory_create_binary_file()
page_mode.page_table_create_binary_file()
# write the binary data into the file
binary_output_file.write(output_buffer)
binary_output_file.close()
# verbose output needed by the build system
verbose_output(page_mode)
if __name__ == "__main__":
main()
| 2.03125 | 2 |
TrainingCenter/TrainingCenter.py | JordanYeomans/DeepLearning | 0 | 12760242 | <reponame>JordanYeomans/DeepLearning<gh_stars>0
import tensorflow as tf
import numpy as np
from DeepLearning.Misc.ProgBar import new_prog_bar, update_prog_bar
import DeepLearning.Tensorflow_Base_Functions.evaluation as tfEval
import DeepLearning.Tensorflow_Base_Functions.loss as tfLoss
import DeepLearning.Tensorflow_Base_Functions.optimizers as tfOptimizers
import time
class TrainingCenter():
def __init__(self):
self.sess = tf.Session()
self.saver = tf.train.Saver()
self.model_save_folder = './saved_model/'
self.model_save_name = 'model'
self.model_load_folder = './saved_model'
self.model_load_name = 'model'
self.prog_bar = new_prog_bar()
self.switch_load_model = False
self.val_metrics = ['predict_val_acc']
self.update_val_metrics_on_batch_update = False
self.update_val_metrics_on_n_batches = False
self.predict_train_acc_num_batches = 4
self.loss = 'categorical_cross_entropy'
self.optimizer = 'adam'
# Internal Parameters
self._batch_num = 0
self.val_acc = 0
self.eval_acc = 0
self.train_acc = 0
self.best_val_acc = 0
self.epoch = 0
self.save_on_best = False
self.save_on_best_metric='val_acc'
# Tensorboard Parameters
self.tb_epoch_train_loss_var = tf.Variable(0, dtype=tf.float32)
self.tb_epoch_train_acc_var = tf.Variable(0, dtype=tf.float32)
self.tb_epoch_val_acc_var = tf.Variable(0, dtype=tf.float32)
self.tb_epoch_eval_acc_var = tf.Variable(0, dtype=tf.float32)
self.tb_time_train_loss_var = tf.Variable(0, dtype=tf.float32)
self.tb_time_val_acc_var = tf.Variable(0, dtype=tf.float32)
self.tb_time_eval_acc_var = tf.Variable(0, dtype=tf.float32)
self.time_train_loss = 10 # Initialise in case first time assign is at batch_num == 0
self.tb_suffix = ''
self.start_time = time.time()
self.last_time = time.time()
self.time_count = 0
self.tb_time_step = 1 # Minutes
def initialize_sess(self):
self.sess.run(tf.global_variables_initializer())
def update_val_metrics(self, DataCenter, model):
# Epoch Train Loss
self.epoch_loss_latest = np.mean(self.epoch_loss)
# Prediction Accuracy
if 'predict_val_acc' in self.val_metrics:
self.val_acc = tfEval.prediction_accuracy(DataCenter, model,
DataCenter.val_input_batches,
DataCenter.val_output_batches)
print('Validation Acc = {}'.format(self.val_acc))
if 'predict_eval_acc' in self.val_metrics:
self.eval_acc = tfEval.prediction_accuracy(DataCenter, model,
DataCenter.eval_input_batches,
DataCenter.eval_output_batches)
print('Evaluation Acc = {}'.format(self.eval_acc))
if 'predict_train_acc' in self.val_metrics:
self.train_acc = tfEval.prediction_accuracy(DataCenter, model,
DataCenter.train_input_batches[:self.predict_train_acc_num_batches],
DataCenter.train_output_batches[:self.predict_train_acc_num_batches])
print('Training Acc = {}'.format(self.train_acc))
if 'export_val_predictions' in self.val_metrics or 'export_onehot_val_predictions' in self.val_metrics:
val_predictions = tfEval.export_val_one_hot_predictions(DataCenter, model)
np.savetxt(self.model_save_folder + self.model_save_name + 'val_onehot_predictions.csv', val_predictions, delimiter=',')
if 'export_mse_val_predictions' in self.val_metrics:
val_predictions, val_true = tfEval.export_val_mse_predictions(DataCenter, model)
np.savetxt(self.model_save_folder + self.model_save_name + 'val_mse_predictions.csv', val_predictions,delimiter=',')
np.savetxt(self.model_save_folder + self.model_save_name + 'val_mse_true.csv', val_true, delimiter=',')
def create_epoch_tensorboard(self):
self.tb_epoch_train_loss = tf.summary.scalar('Epoch - Training Loss', self.tb_epoch_train_loss_var)
self.tb_epoch_train_acc = tf.summary.scalar('Epoch - Training Acc', self.tb_epoch_train_acc_var)
self.tb_epoch_val_acc = tf.summary.scalar('Epoch - Validation Accuracy', self.tb_epoch_val_acc_var)
self.tb_epoch_eval_acc = tf.summary.scalar('Epoch - Evaluation Accuracy', self.tb_epoch_eval_acc_var)
self.tb_epoch_merged = tf.summary.merge([self.tb_epoch_train_loss,
self.tb_epoch_train_acc,
self.tb_epoch_val_acc,
self.tb_epoch_eval_acc])
self.tb_epoch_train_writer = tf.summary.FileWriter(self.model_save_folder + 'epoch_tb' + self.tb_suffix)
def create_time_tensorboard(self):
self.tb_time_train_loss = tf.summary.scalar('Time - Training Loss', self.tb_time_train_loss_var)
self.tb_time_val_acc = tf.summary.scalar('Time - Validation Accuracy', self.tb_time_val_acc_var)
self.tb_time_eval_acc = tf.summary.scalar('Time - Evaluation Accuracy', self.tb_time_eval_acc_var)
self.tb_time_merged = tf.summary.merge([self.tb_time_train_loss,
self.tb_time_val_acc])
self.tb_time_train_writer = tf.summary.FileWriter(self.model_save_folder + 'time_tb' + self.tb_suffix)
def update_epoch_tensorboard(self):
# Assign Variables
self.sess.run([self.tb_epoch_train_acc_var.assign(self.train_acc),
self.tb_epoch_val_acc_var.assign(self.val_acc),
self.tb_epoch_eval_acc_var.assign(self.eval_acc),
self.tb_epoch_train_loss_var.assign(self.epoch_loss_latest)])
summary = self.sess.run(self.tb_epoch_merged)
self.tb_epoch_train_writer.add_summary(summary, self.epoch)
def update_time_tensorboard(self, DataCenter, model):
self.time_step_sec = self.tb_time_step * 60
next_time = self.last_time + self.time_step_sec
if time.time() > next_time:
self.last_time = time.time()
self.update_time_tensorboard_vars(DataCenter, model)
# Assign Variables
self.sess.run([self.tb_time_val_acc_var.assign(self.time_val_acc),
self.tb_time_eval_acc_var.assign(self.time_eval_acc),
self.tb_time_train_loss_var.assign(self.time_train_loss)])
summary = self.sess.run(self.tb_time_merged)
self.tb_time_train_writer.add_summary(summary, self.time_count)
self.time_count += 1
def update_time_tensorboard_vars(self, DataCenter, model):
# Update Validation Parameters
rand_choice = np.random.randint(0,DataCenter.val_input_batches.shape[0], size=5)
x_data = DataCenter.val_input_batches[rand_choice]
y_data = DataCenter.val_output_batches[rand_choice]
self.time_val_acc = tfEval.prediction_accuracy(DataCenter, model, x_data, y_data)
# Update Evaluation Parameters
rand_choice = np.random.randint(0, DataCenter.eval_input_batches.shape[0], size=5)
x_data = DataCenter.eval_input_batches[rand_choice]
y_data = DataCenter.eval_output_batches[rand_choice]
self.time_eval_acc = tfEval.prediction_accuracy(DataCenter, model, x_data, y_data)
# Update Training Parameters
if self._batch_num != 0: # Skip if _batch_num = 0. Loss will hold same value as last time
self.time_train_loss = np.mean(self.epoch_loss[:self._batch_num])
def prog_bar_update(self):
update = self._batch_num/self.num_train_batches
update_prog_bar(self.prog_bar, update)
def load_model(self, load):
if load is True:
new_saver = tf.train.import_meta_graph(self.model_load_folder + self.model_load_name + '.meta')
new_saver.restore(self.sess, tf.train.latest_checkpoint(self.model_load_folder))
def save_sess(self):
print('Saving Model')
self.saver.save(self.sess, self.model_save_folder + self.model_save_name)
def save_model(self, save=True, save_on_best=True):
if save is True and self.save_on_best is False:
self.save_sess()
elif save is True and self.save_on_best is True:
self.calc_best_model()
if self.best_model is True:
self.save_sess()
def calc_best_model(self):
self.best_model = False # Set best model to false
# If save Metric is Validation Accuracy
if self.save_on_best_metric == 'val_acc':
if self.val_acc > self.best_val_acc:
self.best_val_acc = self.val_acc # Record best val acc
self.best_model = True # Set best model to true
# Add save metrics here:
def initialize_loss(self, DataCenter, model):
if self.loss == 'categorical_cross_entropy':
print('Setting the Loss to: Categorical Cross Entropy')
self._loss = tfLoss.categorical_cross_entropy(DataCenter, model)
if self.loss == 'mean_squared_error':
print('Setting the Loss to: Mean Squared Error')
self._loss = tfLoss.mean_squared_error(DataCenter, model)
def initialize_optimizer(self, DataCenter):
if self.optimizer == 'adam':
print('Setting the Optimizer to: Adam')
self._optimizer = tfOptimizers.adam_optimizer(DataCenter.learning_rate)
def train_step(self):
_, self.step_c = self.sess.run([self.learning_step, self._loss],
feed_dict={self.x: self.train_input_batch,
self.y: self.train_output_batch})
self.update_epoch_loss()
def set_train_batch(self, DataCenter):
self.train_input_batch = DataCenter.train_input_batches[self._batch_num] # Get Input Data for batch
self.train_output_batch = DataCenter.train_output_batches[self._batch_num] # Get Output Data for batch
def set_placeholders(self, DataCenter):
self.x = DataCenter.x_placeholder
self.y = DataCenter.y_placeholder
def set_learning_step(self, DataCenter, model):
self.initialize_loss(DataCenter, model)
self.initialize_optimizer(DataCenter)
self.learning_step = self._optimizer.minimize(self._loss)
def reset_epoch_loss(self):
self.epoch_loss = np.zeros(self.num_train_batches)
def update_epoch_loss(self):
self.epoch_loss[self._batch_num] = self.step_c
def update_consol(self):
self.prog_bar_update()
print('\n' + str(self.step_c))
def train_model(self, DataCenter, model, save=True, load=False):
self.set_placeholders(DataCenter) # Create Placeholders
self.set_learning_step(DataCenter, model) # Create Cost, Optimiser and learning step
self.num_train_batches = DataCenter.num_train_batches # Set training batches per epoch
with tf.Session() as self.sess:
self.sess.run(tf.global_variables_initializer())
self.load_model(load) # Check if we need to load model. If so, load
self.create_epoch_tensorboard() # Create Tensorboard Parameters
self.create_time_tensorboard() # Create Time Based Tensorboard
for self.epoch in range(DataCenter.epochs): # Iterate over all epochs
DataCenter.reset_train_batches() # Reset DataCenter Training Batches
self.reset_epoch_loss() # Reset Epoch Loss
for self._batch_num in range(self.num_train_batches): # Iterate over all batches
self.set_train_batch(DataCenter) # Set training batch data
self.train_step() # Run 1 step
self.update_consol() # Update Progress Bar
self.update_time_tensorboard(DataCenter, model) # Update time based tensorboard if needed
if self.update_val_metrics_on_n_batches is not False:
if self._batch_num != 0 and self._batch_num%self.update_val_metrics_on_n_batches == 0:
self.update_val_metrics(DataCenter, model)
self.update_epoch_tensorboard()
# End of Iteration functions
self.update_val_metrics(DataCenter, model) # Update Validation Metrics
self.update_epoch_tensorboard() # Update Epoch Tensorboard
self.save_model(save) # Check if we need to Save model. If so, Save | 2.1875 | 2 |
POSITIONMANAGE/api_position.py | CJuanvip/quant-trading-system | 281 | 12760243 | import json
def api_position(db,cursor,temp,principal5,principal30,principal60,principal300,principal900,principal1800,coin_number5,coin_number30,coin_number60,coin_number300,coin_number900,coin_number1800,judge_position,sell_amount,buy_amount,current_price):
all_buyamount = 0
all_sellamount = 0
trade_amonut = {}
flee = 0.0025
for i in temp:
if(i == '5'):
trade_amonut['5'] = position(coin_number5,principal5,buy_amount,sell_amount,flee,judge_position,temp[i],current_price)
if(trade_amonut['5']['action'] == 'buy'):
principal5 = trade_amonut['5']['value']['principal']
coin_number5 = trade_amonut['5']['value']['coin_number']
all_buyamount += trade_amonut['5']['value']['buy_amount']
if(trade_amonut['5']['action'] == 'sell'):
principal5 = trade_amonut['5']['value']['principal']
coin_number5 = trade_amonut['5']['value']['coin_number']
all_sellamount += trade_amonut['5']['value']['sell_amount']
if(i == '30'):
trade_amonut['30'] = position(coin_number30,principal30,buy_amount,sell_amount,flee,judge_position,temp[i],current_price)
if (trade_amonut['30']['action'] == 'buy'):
principal30 = trade_amonut['30']['value']['principal']
coin_number30 = trade_amonut['30']['value']['coin_number']
all_buyamount += trade_amonut['30']['value']['buy_amount']
if (trade_amonut['30']['action'] == 'sell'):
principal30 = trade_amonut['30']['value']['principal']
coin_number30 = trade_amonut['30']['value']['coin_number']
all_sellamount += trade_amonut['30']['value']['sell_amount']
if (i == '60'):
trade_amonut['60'] = position(coin_number60,principal60,buy_amount,sell_amount,flee,judge_position,temp[i],current_price)
if (trade_amonut['60']['action'] == 'buy'):
principal60 = trade_amonut['60']['value']['principal']
coin_number60 = trade_amonut['60']['value']['coin_number']
all_buyamount += trade_amonut['60']['value']['buy_amount']
if (trade_amonut['60']['action'] == 'sell'):
principal60 = trade_amonut['60']['value']['principal']
coin_number60 = trade_amonut['60']['value']['coin_number']
all_sellamount += trade_amonut['60']['value']['sell_amount']
if (i == '300'):
trade_amonut['300'] = position(coin_number300,principal300,buy_amount,sell_amount,flee,judge_position,temp[i],current_price)
if (trade_amonut['300']['action'] == 'buy'):
principal300 = trade_amonut['300']['value']['principal']
coin_number300 = trade_amonut['300']['value']['coin_number']
all_buyamount += trade_amonut['300']['value']['buy_amount']
if (trade_amonut['300']['action'] == 'sell'):
principal300 = trade_amonut['300']['value']['principal']
coin_number300 = trade_amonut['300']['value']['coin_number']
all_sellamount += trade_amonut['300']['value']['sell_amount']
if (i == '900'):
trade_amonut['900'] = position(coin_number900,principal900,buy_amount,sell_amount,flee,judge_position,temp[i],current_price)
if (trade_amonut['900']['action'] == 'buy'):
principal900 = trade_amonut['900']['value']['principal']
coin_number900 = trade_amonut['900']['value']['coin_number']
all_buyamount += trade_amonut['900']['value']['buy_amount']
if (trade_amonut['900']['action'] == 'sell'):
principal900 = trade_amonut['900']['value']['principal']
coin_number900 = trade_amonut['900']['value']['coin_number']
all_sellamount += trade_amonut['900']['value']['sell_amount']
if (i == '1800'):
trade_amonut['1800'] = position(coin_number1800,principal1800,buy_amount,sell_amount,flee,judge_position,temp[i],current_price)
if (trade_amonut['1800']['action'] == 'buy'):
principal1800 = trade_amonut['1800']['value']['principal']
coin_number1800 = trade_amonut['1800']['value']['coin_number']
all_buyamount += trade_amonut['1800']['value']['buy_amount']
if (trade_amonut['1800']['action'] == 'sell'):
principal1800 = trade_amonut['1800']['value']['principal']
coin_number1800 = trade_amonut['1800']['value']['coin_number']
all_sellamount += trade_amonut['1800']['value']['sell_amount']
if(all_buyamount > all_sellamount):
uid = exec('buy', all_buyamount - all_sellamount)
sql = "INSERT INTO order_table(uid , valuess , timess) VALUES ('%s', '%s', '%s')" % (str(uid), json.dumps(
{'principal5': principal5, 'coin_number5': coin_number5, 'principal30': principal30,
'coin_number30': coin_number30, 'principal60': principal60, 'coin_number60': coin_number60,
'principal300': principal300, 'coin_number300': coin_number300, 'principal900': principal900,
'coin_number900': coin_number900, 'principal1800': principal1800, 'coin_number1800': coin_number1800,
'result': trade_amonut, 'current_price': current_price}), 0)
cursor.execute(sql)
db.commit()
if(all_sellamount > all_buyamount):
uid = exec('sell',all_sellamount-all_buyamount)
sql = "INSERT INTO order_table(uid , valuess , timess) VALUES ('%s', '%s', '%s')" % (str(uid), json.dumps(
{'principal5': principal5, 'coin_number5': coin_number5, 'principal30': principal30,
'coin_number30': coin_number30, 'principal60': principal60, 'coin_number60': coin_number60,
'principal300': principal300, 'coin_number300': coin_number300, 'principal900': principal900,
'coin_number900': coin_number900, 'principal1800': principal1800, 'coin_number1800': coin_number1800,
'result': trade_amonut, 'current_price': current_price}), 0)
cursor.execute(sql)
db.commit()
return {'principal5': principal5, 'coin_number5': coin_number5, 'principal30': principal30,
'coin_number30': coin_number30, 'principal60': principal60, 'coin_number60': coin_number60,
'principal300': principal300, 'coin_number300': coin_number300, 'principal900': principal900,
'coin_number900': coin_number900, 'principal1800': principal1800, 'coin_number1800': coin_number1800}
def position(coin_number,principal,buy_amount,sell_amount,flee,judge_position,index,current_price):
sposition = ( coin_number * current_price ) / (principal + ( coin_number * current_price ))
if ((index['buy_index'] > index['sell_index']) and (judge_position > sposition)):
buy_amount2 = (index['buy_index'] / (index['buy_index'] + index['sell_index'])) * buy_amount
if(buy_amount2 < principal):
coin_number = ((buy_amount2 - buy_amount2 * flee) / current_price) + coin_number
principal = principal - buy_amount2
else:
buy_amount2 = principal
coin_number = ((principal - principal * flee) / current_price) + coin_number
principal = 0
return {'action':'buy','value':{'buy_amount':buy_amount2,'principal':principal,'coin_number':coin_number}}
if (index['buy_index'] < index['sell_index'] and (sposition > 0)):
sell_amount2 = (index['sell_index'] / (index['buy_index'] + index['sell_index'])) * sell_amount
if((sell_amount2 / current_price) < coin_number):
coin_number = coin_number - (sell_amount2 / current_price)
principal = principal + (sell_amount2 - sell_amount2 * flee)
else:
sell_amount2 = coin_number * current_price
principal = principal + (coin_number - coin_number * flee) * current_price
coin_number = 0
return {'action':'sell','value': {'sell_amount': sell_amount2, 'principal': principal, 'coin_number': coin_number}}
return {'action': 'none'}
def exec(action,buy_amount):
return 23231321 | 2.8125 | 3 |
tests/test_server.py | jre21/mindmeld | 1 | 12760244 | import pytest
import json
from mindmeld.server import MindMeldServer
from mindmeld.app_manager import ApplicationManager
@pytest.fixture
def app_manager(kwik_e_mart_app_path, kwik_e_mart_nlp):
return ApplicationManager(kwik_e_mart_app_path, nlp=kwik_e_mart_nlp)
@pytest.fixture
def client(app_manager):
server = MindMeldServer(app_manager)._server.test_client()
yield server
def test_parse_endpoint(client):
test_request = {
'text': 'where is the restaurant on 12th ave'
}
response = client.post('/parse', data=json.dumps(test_request),
content_type='application/json',
follow_redirects=True)
assert response.status == '200 OK'
assert json.loads(response.data.decode(
'utf8'))['request']['entities'][0]['value'][0]['cname'] == '12th Avenue'
assert set(json.loads(response.data.decode('utf8')).keys()) == {
'version', 'history', 'params', 'frame', 'dialogue_state',
'request_id', 'response_time', 'request', 'directives', 'slots'}
def test_parse_endpoint_fail(client):
response = client.post('/parse')
assert response.status == '415 UNSUPPORTED MEDIA TYPE'
def test_status_endpoint(client):
response = client.get('/_status')
assert response.status == '200 OK'
assert set(json.loads(response.data.decode('utf8')).keys()) == {
'package_version', 'status', 'response_time', 'version'}
| 2.0625 | 2 |
modern3.py | yanapermana/metadecryptor | 49 | 12760245 | from check_RSA3 import *
class Modern3:
def __init__(self):
pass
def check_RSAs(self, target):
check_RSA(target) | 1.5625 | 2 |
temboardagent/plugins/monitoring/output.py | tilkow/temboard-agent | 1 | 12760246 | <reponame>tilkow/temboard-agent
def remove_passwords(instances):
clean_instances = []
for instance in instances:
clean_instance = {}
for k in instance.keys():
if k != 'password':
clean_instance[k] = instance[k]
clean_instances.append(clean_instance)
return clean_instances
| 2.875 | 3 |
main.py | davidlandry93/miv | 0 | 12760247 | <filename>main.py
#!/usr/bin/env python
import argparse
from miv import VimBuffer, reverse_engineer
parser = argparse.ArgumentParser( description='Reverse engineer a way to \
create a selection in a Vim buffer.')
parser.add_argument(
'inputFile',
type=file,
help='A file containing the text to play with'
)
parser.add_argument(
'line',
type=int,
help='The line where the cursor is.'
)
parser.add_argument(
'column',
type=int,
help='The column where the cursor is.'
)
parser.add_argument(
'targetLine',
type=int,
help='The line we want the cursor to end up in'
)
parser.add_argument(
'targetColumn',
type=int,
help='The column we want the cursor to end up in'
)
ns = parser.parse_args()
textArray = []
for line in ns.inputFile:
textArray.append(line)
vimBuffer = VimBuffer(textArray, (ns.line, ns.column))
commandSequence = reverse_engineer(
vimBuffer,
((ns.line, ns.column), (ns.targetLine, ns.targetColumn))
)
print("Soltn: ")
for command in commandSequence:
print(command)
| 3.5 | 4 |
nswebdav/parse.py | vanbas/nswebdav | 5 | 12760248 | from datetime import datetime
from lxml import etree
from urllib.parse import unquote
def parse_ls(xml_content):
t = etree.fromstring(xml_content)
responses = t.findall(".//d:response", t.nsmap)
results = []
for response in responses:
href = response.findtext(".//d:href", None, t.nsmap)
href = unquote(href) if href else None
display_name = response.findtext(".//d:displayname", None, t.nsmap)
is_dir = response.find(".//d:resourcetype/d:collection", t.nsmap) is not None
content_length = response.findtext(".//d:getcontentlength", None, t.nsmap)
content_length = int(content_length) if content_length else None
last_modified = response.findtext(".//d:getlastmodified", None, t.nsmap)
last_modified = datetime.strptime(
last_modified,
"%a, %d %b %Y %H:%M:%S %Z"
).timestamp() if last_modified else None
owner = response.findtext(".//d:owner", None, t.nsmap)
mime_type = response.findtext(".//d:getcontenttype", None, t.nsmap)
readable = response.find(".//d:privilege/d:read", t.nsmap) is not None
writable = response.find(".//d:privilege/d:write", t.nsmap) is not None
full_privilege = response.find(".//d:privilege/d:all", t.nsmap) is not None
read_acl = response.find(".//d:privilege/d:read_acl", t.nsmap) is not None
write_acl = response.find(".//d:privilege/d:write_acl", t.nsmap) is not None
entity = Entity(href=href,
display_name=display_name,
is_dir=is_dir,
content_length=content_length,
last_modified=last_modified,
owner=owner,
mime_type=mime_type,
readable=readable,
writable=writable,
full_privilege=full_privilege,
read_acl=read_acl,
write_acl=write_acl)
results.append(entity)
return results
def parse_search(xml_content):
t = etree.fromstring(xml_content)
responses = t.findall(".//d:response", t.nsmap)
results = []
for response in responses:
href = response.findtext(".//d:href", None, t.nsmap)
href = unquote(href) if href else None
is_dir = response.find(".//d:resourcetype/d:collection", t.nsmap) is not None
last_modified = response.findtext(".//d:getlastmodified", None, t.nsmap)
last_modified = datetime.strptime(
last_modified,
"%a, %d %b %Y %H:%M:%S %Z"
).timestamp() if last_modified else None
content_length = response.findtext(".//d:getcontentlength", None, t.nsmap)
content_length = int(content_length) if content_length else None
resource_perm = response.findtext(".//s:resourceperm", None, t.nsmap)
owner = response.findtext(".//d:owner", None, t.nsmap)
mime_type = response.findtext(".//d:getcontenttype", None, t.nsmap)
entity = Entity(href=href,
is_dir=is_dir,
last_modified=last_modified,
owner=owner,
mime_type=mime_type,
resource_perm=resource_perm,
content_length=content_length)
results.append(entity)
return results
def parse_share_link(xml_content):
t = etree.fromstring(xml_content)
share_link = t.findtext("s:sharelink", "", t.nsmap).strip()
return share_link
def parse_acl(xml_content):
t = etree.fromstring(xml_content)
results = Entity({
"users": Entity(),
"groups": Entity()
})
acls = t.findall("s:acl", t.nsmap)
for acl in acls:
user = acl.findtext("s:username", None, t.nsmap)
perm = acl.findtext("s:perm", "", t.nsmap)
if user is not None:
results.users[user] = perm
else:
group = acl.findtext("s:group", "", t.nsmap)
results.groups[group] = perm
return results
def parse_latest_cursor(xml_content):
t = etree.fromstring(xml_content)
cursor = t.findtext("s:cursor", None, t.nsmap)
return int(cursor, 16) if cursor is not None else None
def parse_cp_shared_object(xml_content):
t = etree.fromstring(xml_content)
return t.findtext("s:copy_uuid", "", t.nsmap)
def parse_content_url(xml_content):
t = etree.fromstring(xml_content)
href = unquote(t.findtext("s:href", "", t.nsmap))
return href
def parse_team_members(xml_content):
t = etree.fromstring(xml_content)
results = []
for member in t.getchildren():
is_admin = bool("admin" in member.tag)
user_name = member.findtext("s:username", None, t.nsmap)
nickname = member.findtext("s:nickname", None, t.nsmap)
storage_quota = member.findtext("s:storage_quota", None, t.nsmap)
storage_quota = int(storage_quota) if storage_quota else None
ldap_user = member.findtext("s:ldap_user", None, t.nsmap)
ldap_user = ldap_user == "true" if ldap_user else None
disabled = member.findtext("s:disabled", None, t.nsmap)
disabled = disabled == "true" if disabled else None
entity = Entity(admin=is_admin,
user_name=user_name,
nickname=nickname,
storage_quota=storage_quota,
ldap_user=ldap_user,
disabled=disabled)
results.append(entity)
return results
def parse_history(xml_content):
t = etree.fromstring(xml_content)
reset = t.findtext("s:reset", None, t.nsmap)
reset = reset == "true" if reset else None
cursor = t.findtext("s:cursor", None, t.nsmap)
cursor = int(cursor, 16) if cursor else None
has_more = t.findtext("s:hasMore", None, t.nsmap)
has_more = has_more == "true" if has_more else None
history = Entity(reset=reset,
cursor=cursor,
has_more=has_more,
deltas=[])
entries = t.findall("s:delta/s:entry", t.nsmap)
for entry in entries:
path = entry.findtext(".//s:path", None, t.nsmap)
size = entry.findtext(".//s:size", None, t.nsmap)
size = int(size) if size else None
is_deleted = entry.findtext(".//s:isDeleted", None, t.nsmap)
is_deleted = is_deleted == "true" if is_deleted else None
is_dir = entry.findtext(".//s:isDir", None, t.nsmap)
is_dir = is_dir == "true" if is_dir else None
modified = entry.findtext(".//s:modified", None, t.nsmap)
modified = datetime.strptime(
modified,
"%a, %d %b %Y %H:%M:%S %Z"
).timestamp() if modified else None
revision = entry.findtext(".//s:revision", None, t.nsmap)
revision = int(revision) if revision else None
entity = Entity(path=path,
size=size,
is_deleted=is_deleted,
is_dir=is_dir,
modified=modified,
revision=revision)
history.deltas.append(entity)
return history
def parse_user_info(xml_content):
t = etree.fromstring(xml_content)
user_name = t.findtext("s:username", None, t.nsmap)
state = t.findtext("s:account_state", None, t.nsmap)
storage_quota = t.findtext("s:storage_quota", None, t.nsmap)
storage_quota = int(storage_quota) if storage_quota else None
used_storage = t.findtext("s:used_storage", None, t.nsmap)
used_storage = int(used_storage) if used_storage else None
is_admin = t.findtext("s:team/s:is_admin", None, t.nsmap)
is_admin = is_admin.text == "true" if is_admin else None
team_id = t.findtext("s:team/s:id", None, t.nsmap)
expire_time = t.findtext("s:expire_time", None, t.nsmap)
expire_time = int(expire_time) / 1000 if expire_time else None # convert to timestamp
_collections = t.findall("s:collection", t.nsmap)
collections = []
for _collection in _collections:
href = _collection.findtext(".//s:href", None, t.nsmap)
href = unquote(href) if href else None
used_storage = _collection.findtext(".//s:used_storage", None, t.nsmap)
used_storage = int(used_storage) if used_storage else None
is_owner = _collection.findtext(".//s:owner", None, t.nsmap)
is_owner = is_owner == "true" if is_owner else None
collection = Entity(href=href,
used_storage=used_storage,
is_owner=is_owner)
collections.append(collection)
entity = Entity(user_name=user_name,
is_admin=is_admin,
state=state,
team_id=team_id,
storage_quota=storage_quota,
used_storage=used_storage,
expire_time=expire_time,
collections=collections)
return entity
def parse_team_member_info(xml_content):
t = etree.fromstring(xml_content)
user_name = t.findtext("s:username", None, t.nsmap)
storage_quota = t.findtext("s:storageQuota", None, t.nsmap)
storage_quota = int(storage_quota) if storage_quota else None
expire_time = t.findtext("s:expireTime", None, t.nsmap)
expire_time = int(expire_time) / 1000 if expire_time else None # convert to timestamp
_sandboxes = t.findall("s:sandbox", t.nsmap)
sandboxes = []
for _sandbox in _sandboxes:
name = _sandbox.findtext("s:name", None, t.nsmap)
storage_quota = _sandbox.findtext("s:storageQuota", None, t.nsmap)
sandbox = Entity(name=name,
storage_quota=storage_quota)
sandboxes.append(sandbox)
entity = Entity(user_name=user_name,
storage_quota=storage_quota,
expire_time=expire_time,
sandboxes=sandboxes)
return entity
def parse_group_members(xml_content):
t = etree.fromstring(xml_content)
_subgroups = t.findall("s:subgroup", t.nsmap)
subgroups = []
for _subgroup in _subgroups:
group_id = _subgroup.findtext("s:id", None, t.nsmap)
group_id = int(group_id) if group_id else None
name = _subgroup.findtext("s:name", None, t.nsmap)
subgroup = Entity(group_id=group_id,
name=name)
subgroups.append(subgroup)
_admins = t.findall("s:admin", t.nsmap)
admins = []
for _admin in _admins:
user_name = _admin.findtext("s:username", None, t.nsmap)
nickname = _admin.findtext("s:nickname", None, t.nsmap)
admin = Entity(user_name=user_name,
nickname=nickname)
admins.append(admin)
_users = t.findall("s:user", t.nsmap)
users = []
for _user in _users:
user_name = _user.findtext("s:username", None, t.nsmap)
nickname = _user.findtext("s:nickname", None, t.nsmap)
user = Entity(user_name=user_name,
nickname=nickname)
users.append(user)
entity = Entity(subgroups=subgroups,
admins=admins,
users=users)
return entity
def parse_created_group(xml_content):
t = etree.fromstring(xml_content)
group_id = t.findtext("s:id", None, t.nsmap)
group_id = int(group_id) if group_id else None
return group_id
def parse_audit_logs(xml_content):
t = etree.fromstring(xml_content)
log_num = t.findtext("s:log_num", None, t.nsmap)
log_num = int(log_num) if log_num else None
first_operation_time = t.findtext("s:first_operation_time", None, t.nsmap)
first_operation_time = int(first_operation_time) / 1000 if first_operation_time else None # convert to timestamp
last_operation_time = t.findtext("s:last_operation_time", None, t.nsmap)
last_operation_time = int(last_operation_time) / 1000 if last_operation_time else None # convert to timestamp
has_more = t.findtext("s:has_more", None, t.nsmap)
has_more = has_more == "true" if has_more else None
_activities = t.findall("s:activity", t.nsmap)
activities = []
for _activity in _activities:
operator = _activity.findtext("s:operator", None, t.nsmap)
operation = _activity.findtext("s:operation", None, t.nsmap)
ip = _activity.findtext("s:ip", None, t.nsmap)
ip_location = _activity.findtext("s:ip_location", None, t.nsmap)
terminal = _activity.findtext("s:terminal", None, t.nsmap)
consuming = _activity.findtext("s:consuming", None, t.nsmap)
activity = Entity(operator=operator,
operation=operation,
ip=ip,
ip_location=ip_location,
terminal=terminal,
consuming=consuming)
activities.append(activity)
entity = Entity(log_num=log_num,
first_operation_time=first_operation_time,
last_operation_time=last_operation_time,
has_more=has_more,
activities=activities)
return entity
class Entity(dict):
"""
It works like a normal dict but values can be accessed as attribute.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError("No such key '%s'" % name)
__setattr__ = dict.__setitem__
def __delattr__(self, name):
try:
del self[name]
except KeyError:
raise AttributeError("No such key '%s'" % name)
| 2.734375 | 3 |
setup.py | teekaay/aws-switchrole-link | 2 | 12760249 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import io
from setuptools import setup, find_packages
VERSION = (1, 0, 0)
__versionstr__ = '.'.join(map(str, VERSION))
setup(
name = 'aws_switchrole_links',
description = "Generate links for switching role in AWS console",
license="Apache License, Version 2.0",
url = "https://github.com/teekaay/aws-switchrole-links",
long_description = io.open('README.md', 'r', encoding='utf-8').read(),
platform='any',
zip_safe=False,
version = __versionstr__,
author = "<NAME>",
author_email = "<EMAIL>",
packages=find_packages(exclude=('test*', )),
classifiers = [
"Development Status :: 1 - Planning",
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
include_package_data=True,
install_requires=[
],
extras_require={
'develop': ['nose', 'flake8']
},
entry_points = {
'console_scripts': [
'aws-switchrole-links = aws_switchrole_links.__main__:main']
},
keywords='aws aws-console'
)
| 1.421875 | 1 |
python3/berrymq_singlethread/__init__.py | shibukawa/berrymq | 0 | 12760250 | <gh_stars>0
# -*- coding: utf-8 -*-
from .berrymq import (following,
following_function,
auto_twitter,
cond,
Follower,
twitter)
| 1.023438 | 1 |